async reader + test (#1464)

This commit is contained in:
Victor Sokolov
2025-07-25 11:29:53 +02:00
committed by GitHub
parent a95c0bcd66
commit 0e0714b5a1
2 changed files with 788 additions and 0 deletions

440
asyncbuffer/buffer.go Normal file
View File

@@ -0,0 +1,440 @@
// Package asyncbuffer provides an asynchronous buffer that reads data from an
// io.Reader in the background.
//
// When created, AsyncBuffer starts reading from the upstream reader in the
// background. If a read error occurs, it is stored and can be checked with
// AsyncBuffer.Error().
//
// When reading through AsyncBuffer.Reader().Read(), the error is only returned
// once the reader reaches the point where the error occurred. In other words,
// errors are delayed until encountered by the reader.
//
// However, AsyncBuffer.Close() and AsyncBuffer.Error() will immediately return
// any stored error, even if the reader has not yet reached the error point.
package asyncbuffer
import (
"errors"
"io"
"sync"
"sync/atomic"
)
// ChunkSize is the size of each chunk in bytes
const ChunkSize = 4096
// byteChunk is a struct that holds a buffer and the data read from the upstream reader
// data slice is required since the chunk read may be smaller than ChunkSize
type byteChunk struct {
buf []byte
data []byte
}
// chunkPool is a global sync.Pool that holds byteChunk objects for
// all readers
var chunkPool = sync.Pool{
New: func() any {
buf := make([]byte, ChunkSize)
return &byteChunk{
buf: buf,
data: buf[:0],
}
},
}
// AsyncBuffer is a wrapper around io.Reader that reads data in chunks
// in background and allows reading from synchronously.
type AsyncBuffer struct {
r io.Reader // Upstream reader
chunks []*byteChunk // References to the chunks read from the upstream reader
err atomic.Value // Error that occurred during reading
finished atomic.Bool // Indicates that the reader has finished reading
len atomic.Int64 // Total length of the data read
closed atomic.Bool // Indicates that the reader was closed
mu sync.RWMutex // Mutex on chunks slice
newChunkSignal chan struct{} // Tick-tock channel that indicates that a new chunk is ready
}
// Underlying Reader that provides io.ReadSeeker interface for the actual data reading
// What is the purpose of this Reader?
type Reader struct {
ab *AsyncBuffer
pos int64
}
// FromReadCloser creates a new AsyncBuffer that reads from the given io.Reader in background
func FromReader(r io.Reader) *AsyncBuffer {
ab := &AsyncBuffer{
r: r,
newChunkSignal: make(chan struct{}),
}
go ab.readChunks()
return ab
}
// getNewChunkSignal returns the channel that signals when a new chunk is ready
// Lock is required to read the channel, so it is not closed while reading
func (ab *AsyncBuffer) getNewChunkSignal() chan struct{} {
ab.mu.RLock()
defer ab.mu.RUnlock()
return ab.newChunkSignal
}
// addChunk adds a new chunk to the AsyncBuffer, increments len and signals that a chunk is ready
func (ab *AsyncBuffer) addChunk(chunk *byteChunk) {
ab.mu.Lock()
defer ab.mu.Unlock()
// Store the chunk, increase chunk size, increase length of the data read
ab.chunks = append(ab.chunks, chunk)
ab.len.Add(int64(len(chunk.data)))
// Signal that a chunk is ready
currSignal := ab.newChunkSignal
ab.newChunkSignal = make(chan struct{})
close(currSignal)
}
// finish marks the reader as finished
func (ab *AsyncBuffer) finish() {
// Indicate that the reader has finished reading
ab.finished.Store(true)
// This indicates that Close() was called before all the chunks were read, we do not need to close the channel
// since it was closed already.
if !ab.closed.Load() {
close(ab.newChunkSignal)
}
}
// readChunks reads data from the upstream reader in background and stores them in the pool
func (ab *AsyncBuffer) readChunks() {
defer ab.finish()
// Stop reading if the reader is finished
for !ab.finished.Load() {
// Get a chunk from the pool
// If the pool is empty, it will create a new byteChunk with ChunkSize
chunk, ok := chunkPool.Get().(*byteChunk)
if !ok {
ab.err.Store(errors.New("asyncbuffer.AsyncBuffer.readChunks: failed to get chunk from pool"))
return
}
// Read data into the chunk's buffer
n, err := io.ReadFull(ab.r, chunk.buf)
// If it's not the EOF, we need to store the error
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
ab.err.Store(err)
return
}
// No bytes were read (n == 0), we can return the chunk to the pool
if err == io.EOF || n == 0 {
chunkPool.Put(chunk)
return
}
// Resize the chunk's data slice to the number of bytes read
chunk.data = chunk.buf[:n]
// Store the reference to the chunk in the AsyncBuffer
ab.addChunk(chunk)
// We got ErrUnexpectedEOF meaning that some bytes were read, but this is the
// end of the stream, so we can stop reading
if err == io.ErrUnexpectedEOF {
return
}
}
}
// closedError returns an error if the attempt to read on a closed reader was made.
// If the reader had an error, it returns that error instead.
func (ab *AsyncBuffer) closedError() error {
// If the reader is closed, we return the error or nil
if ab.closed.Load() {
err := ab.Error()
if err == nil {
err = errors.New("asyncbuffer.AsyncBuffer.ReadAt: attempt to read on closed reader")
}
return err
}
return nil
}
// offsetAvailable checks if the data at the given offset is available for reading.
// It may return io.EOF if the reader is finished reading and the offset is beyond the end of the stream.
func (ab *AsyncBuffer) offsetAvailable(off int64) (bool, error) {
// We can not read data from the closed reader, none
if ab.closed.Load() {
return false, ab.closedError()
}
// In case the offset falls within the already read chunks, we can return immediately,
// even if error has occurred in the future
if off < ab.len.Load() {
return true, nil
}
// In case the reader is finished reading, and we have not read enough
// data yet, return either error or EOF
if ab.finished.Load() {
// In case, error has occurred, we need to return it
err := ab.Error()
if err != nil {
return false, err
}
// Otherwise, it's EOF if the offset is beyond the end of the stream
return false, io.EOF
}
// No available data
return false, nil
}
// WaitFor waits for the data to be ready at the given offset. nil means ok.
// It guarantees that the chunk at the given offset is ready to be read.
func (ab *AsyncBuffer) WaitFor(off int64) error {
for {
ok, err := ab.offsetAvailable(off)
if ok || err != nil {
return err
}
<-ab.getNewChunkSignal()
}
}
// Wait waits for the reader to finish reading all data and returns
// the total length of the data read.
func (ab *AsyncBuffer) Wait() (int64, error) {
for {
// We can not read data from the closed reader even if there were no errors
if ab.closed.Load() {
return 0, ab.closedError()
}
// In case the reader is finished reading, we can return immediately
if ab.finished.Load() {
size := ab.len.Load()
// If there was an error during reading, we need to return it no matter what position
// had the error happened
err := ab.err.Load()
if err != nil {
err, ok := err.(error)
if !ok {
return size, errors.New("asyncbuffer.AsyncBuffer.Wait: failed to get error")
}
return size, err
}
return size, nil
}
// Lock until the next chunk is ready
<-ab.getNewChunkSignal()
}
}
// Error returns the error that occurred during reading data in background.
func (ab *AsyncBuffer) Error() error {
err := ab.err.Load()
if err == nil {
return nil
}
errCast, ok := err.(error)
if !ok {
return errors.New("asyncbuffer.AsyncBuffer.Error: failed to get error")
}
return errCast
}
// readChunkAt copies data from the chunk at the given absolute offset to the provided slice.
// Chunk must be available when this method is called.
// Returns the number of bytes copied to the slice or 0 if chunk has no data
// (eg. offset is beyond the end of the stream).
func (ab *AsyncBuffer) readChunkAt(p []byte, off, rem int64) int {
// If the chunk is not available, we return 0
if off >= ab.len.Load() {
return 0
}
ind := off / ChunkSize // chunk index
chunk := ab.chunks[ind]
startOffset := off % ChunkSize // starting offset in the chunk
// If the offset in current chunk is greater than the data
// it has, we return 0
if startOffset >= int64(len(chunk.data)) {
return 0
}
// How many bytes we could read from the chunk. No more than:
// - left to read totally
// - chunk size minus the start offset
// - chunk has
size := min(rem, ChunkSize-startOffset, int64(len(chunk.data)))
if size == 0 {
return 0
}
return copy(p, chunk.data[startOffset:startOffset+size])
}
// readAt reads data from the AsyncBuffer at the given offset.
//
// If full is true:
//
// The behaviour is similar to io.ReaderAt.ReadAt. It blocks until the maxumum amount of data possible
// is read from the buffer. It may return io.UnexpectedEOF in case we tried to read more data than was
// available in the buffer.
//
// If full is false:
//
// It behaves like a regular non-blocking Read.
func (ab *AsyncBuffer) readAt(p []byte, off int64) (int, error) {
size := int64(len(p)) // total size of the data to read
if off < 0 {
return 0, errors.New("asyncbuffer.AsyncBuffer.readAt: negative offset")
}
// Wait for the offset to be available.
// It may return io.EOF if the offset is beyond the end of the stream.
err := ab.WaitFor(off)
if err != nil {
return 0, err
}
ab.mu.RLock()
defer ab.mu.RUnlock()
// If the reader is closed, we return an error
if ab.closed.Load() {
return 0, ab.closedError()
}
// Read data from the first chunk
n := ab.readChunkAt(p, off, size)
if n == 0 {
return 0, io.EOF // Failed to read any data: means we tried to read beyond the end of the stream
}
size -= int64(n)
off += int64(n) // Here and beyond off always points to the last read byte + 1
// Now, let's try to read the rest of the data from next chunks while they are available
for size > 0 {
// If data is not available at the given offset, we can return data read so far.
ok, err := ab.offsetAvailable(off)
if !ok || err != nil {
return n, err
}
// Read data from the next chunk
nX := ab.readChunkAt(p[n:], off, size)
n += nX
size -= int64(nX)
off += int64(nX)
// If we read data shorter than ChunkSize or, in case that was the last chunk, less than
// the size of the tail, return kind of EOF
if int64(nX) < min(size, int64(ChunkSize)) {
return n, io.EOF
}
}
return n, nil
}
// Close closes the AsyncBuffer and releases all resources.
// It returns an error if the reader was already closed or if there was
// an error during reading data in background even if none of the subsequent
// readers have reached the position where the error occurred.
func (ab *AsyncBuffer) Close() error {
ab.mu.Lock()
defer ab.mu.Unlock()
// If the reader is already closed, we return immediately error or nil
if ab.closed.Load() {
return ab.Error()
}
ab.closed.Store(true)
// If the reader is still running, we need to signal that it should stop and close the channel
if !ab.finished.Load() {
ab.finished.Store(true)
close(ab.newChunkSignal)
}
// Return all chunks to the pool
for _, chunk := range ab.chunks {
chunkPool.Put(chunk)
}
return nil
}
// Reader returns an io.ReadSeeker+io.ReaderAt that can be used to read actual data from the AsyncBuffer
func (ab *AsyncBuffer) Reader() *Reader {
return &Reader{ab: ab, pos: 0}
}
// Read reads data from the AsyncBuffer.
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.ab.readAt(p, r.pos)
if err != nil {
return n, err
}
r.pos += int64(n)
return n, nil
}
// Seek sets the position of the reader to the given offset and returns the new position
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
switch whence {
case io.SeekStart:
r.pos = offset
case io.SeekCurrent:
r.pos += offset
case io.SeekEnd:
size, err := r.ab.Wait()
if err != nil {
return 0, err
}
r.pos = size + offset
default:
return 0, errors.New("asyncbuffer.AsyncBuffer.ReadAt: invalid whence")
}
if r.pos < 0 {
return 0, errors.New("asyncbuffer.AsyncBuffer.ReadAt: negative position")
}
return r.pos, nil
}

348
asyncbuffer/buffer_test.go Normal file
View File

@@ -0,0 +1,348 @@
package asyncbuffer
import (
"bytes"
"crypto/rand"
"errors"
"io"
"sync"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
const (
halfChunkSize = ChunkSize / 2
quaterChunkSize = ChunkSize / 4
)
// erraticReader is a test reader that simulates a slow read and can fail after reading a certain number of bytes
type erraticReader struct {
reader bytes.Reader
failAt int64 // if set, will return an error after reading this many bytes
}
// Read reads data from the testReader, simulating a slow read and a potential failure
func (r *erraticReader) Read(p []byte) (n int, err error) {
cur, _ := r.reader.Seek(0, io.SeekCurrent)
if r.failAt > 0 && r.failAt < cur+int64(len(p)) {
return 0, errors.New("simulated read failure")
}
return r.reader.Read(p)
}
// blockingReader is a test reader which flushes data in chunks
type blockingReader struct {
reader bytes.Reader
mu sync.Mutex // locked reader does not return anything
unlocking atomic.Bool // if true, will proceed without locking each chunk
}
// newBlockingReader creates a new partialReader in locked state
func newBlockingReader(reader bytes.Reader) *blockingReader {
r := &blockingReader{
reader: reader,
}
r.mu.Lock()
return r
}
// flushNextChunk unlocks the reader, allowing it to return the next chunk of data
func (r *blockingReader) flushNextChunk() {
r.mu.Unlock()
}
// flush unlocks the reader, allowing it to return all data as usual
func (r *blockingReader) flush() {
r.unlocking.Store(true) // allow reading data without blocking
r.mu.Unlock() // and continue
}
// Read reads data from the testReader, simulating a slow read and a potential failure
func (r *blockingReader) Read(p []byte) (n int, err error) {
if !r.unlocking.Load() {
r.mu.Lock()
}
n, err = r.reader.Read(p)
return n, err
}
// generateSourceData generates a byte slice with 4.5 chunks of data
func generateSourceData(t *testing.T, size int64) ([]byte, *bytes.Reader) {
// We use small chunks for tests, let's check the ChunkSize just in case
assert.GreaterOrEqual(t, ChunkSize, 20, "ChunkSize required for tests must be greater than 10 bytes")
// Create a byte slice with 4 chunks of ChunkSize
source := make([]byte, size)
// Fill the source with random data
_, err := rand.Read(source)
require.NoError(t, err)
return source, bytes.NewReader(source)
}
// TestAsyncBufferRead tests reading from AsyncBuffer using readAt method which is base for all other methods
func TestAsyncBufferReadAt(t *testing.T) {
// Let's use source buffer which is 4.5 chunks long
source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
asyncBuffer := FromReader(bytesReader)
defer asyncBuffer.Close()
asyncBuffer.Wait() // Wait for all chunks to be read since we're going to read all data
// Let's read all the data
target := make([]byte, len(source))
n, err := asyncBuffer.readAt(target, 0)
require.NoError(t, err)
assert.Equal(t, len(source), n)
assert.Equal(t, target, source)
// Let's read all the data + a bit more
target = make([]byte, len(source)+1)
n, err = asyncBuffer.readAt(target, 0)
require.ErrorIs(t, err, io.EOF) // We read all the data, and reached end
assert.Equal(t, len(source), n)
assert.Equal(t, target[:n], source)
// Let's read > 1 chunk, but with offset from the beginning and the end
target = make([]byte, len(source)-halfChunkSize)
n, err = asyncBuffer.readAt(target, quaterChunkSize)
require.NoError(t, err)
assert.Equal(t, len(target), n)
assert.Equal(t, target, source[quaterChunkSize:len(source)-quaterChunkSize])
// Let's read some data from the middle of the stream < chunk size
target = make([]byte, ChunkSize/4)
n, err = asyncBuffer.readAt(target, ChunkSize+ChunkSize/4)
require.NoError(t, err)
assert.Equal(t, quaterChunkSize, n)
assert.Equal(t, target, source[ChunkSize+quaterChunkSize:ChunkSize+quaterChunkSize*2])
// Let's read some data from the latest half chunk
target = make([]byte, quaterChunkSize)
n, err = asyncBuffer.readAt(target, ChunkSize*4+quaterChunkSize)
require.NoError(t, err)
assert.Equal(t, quaterChunkSize, n)
assert.Equal(t, target, source[ChunkSize*4+quaterChunkSize:ChunkSize*4+halfChunkSize])
// Let's try to read more data then available in the stream
target = make([]byte, ChunkSize*2)
n, err = asyncBuffer.readAt(target, ChunkSize*4)
require.Error(t, err)
assert.Equal(t, err, io.EOF)
assert.Equal(t, ChunkSize/2, n)
assert.Equal(t, target[:ChunkSize/2], source[ChunkSize*4:]) // We read only last half chunk
// Let's try to read data beyond the end of the stream
target = make([]byte, ChunkSize*2)
n, err = asyncBuffer.readAt(target, ChunkSize*5)
require.Error(t, err)
assert.Equal(t, err, io.EOF)
assert.Equal(t, 0, n)
}
// TestAsyncBufferRead tests reading from AsyncBuffer using ReadAt method
func TestAsyncBufferReadAtSmallBuffer(t *testing.T) {
source, bytesReader := generateSourceData(t, 20)
asyncBuffer := FromReader(bytesReader)
defer asyncBuffer.Close()
// First, let's read all the data
target := make([]byte, len(source))
n, err := asyncBuffer.readAt(target, 0)
require.NoError(t, err)
assert.Equal(t, len(source), n)
assert.Equal(t, target, source)
// Let's read some data
target = make([]byte, 2)
n, err = asyncBuffer.readAt(target, 1)
require.NoError(t, err)
assert.Equal(t, len(target), n)
assert.Equal(t, target, source[1:3])
// Let's read some data beyond the end of the stream
target = make([]byte, 2)
n, err = asyncBuffer.readAt(target, 50)
require.Error(t, err)
assert.Equal(t, err, io.EOF)
assert.Equal(t, 0, n)
}
func TestAsyncBufferReader(t *testing.T) {
source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
// Create an AsyncBuffer with the byte slice
asyncBuffer := FromReader(bytesReader)
defer asyncBuffer.Close()
// Let's wait for all chunks to be read
size, err := asyncBuffer.Wait()
require.NoError(t, err, "AsyncBuffer failed to wait for all chunks")
assert.Equal(t, int64(ChunkSize*4+halfChunkSize), size)
reader := asyncBuffer.Reader()
// Ensure the total length of the data is ChunkSize*4
require.NoError(t, err)
// Read the first two chunks
twoChunks := make([]byte, ChunkSize*2)
n, err := reader.Read(twoChunks)
require.NoError(t, err)
assert.Equal(t, ChunkSize*2, n)
assert.Equal(t, source[:ChunkSize*2], twoChunks)
// Seek to the last chunk + 10 bytes
pos, err := reader.Seek(ChunkSize*3+5, io.SeekStart)
require.NoError(t, err)
assert.Equal(t, int64(ChunkSize*3+5), pos)
// Read the next 10 bytes
smallSlice := make([]byte, 10)
n, err = reader.Read(smallSlice)
require.NoError(t, err)
assert.Equal(t, 10, n)
assert.Equal(t, source[ChunkSize*3+5:ChunkSize*3+5+10], smallSlice)
// Seek -10 bytes from the current position
pos, err = reader.Seek(-10, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, int64(ChunkSize*3+5), pos)
// Read data again
n, err = reader.Read(smallSlice)
require.NoError(t, err)
assert.Equal(t, 10, n)
assert.Equal(t, source[ChunkSize*3+5:ChunkSize*3+5+10], smallSlice)
// Seek -10 bytes from end of the stream
pos, err = reader.Seek(-10, io.SeekEnd)
require.NoError(t, err)
assert.Equal(t, size-10, pos)
// Read last 10 bytes
n, err = reader.Read(smallSlice)
require.NoError(t, err)
assert.Equal(t, 10, n)
assert.Equal(t, source[size-10:], smallSlice)
// Seek beyond the end of the stream and try to read
pos, err = reader.Seek(1024, io.SeekCurrent)
require.NoError(t, err)
assert.Equal(t, size+1024, pos)
_, err = reader.Read(smallSlice)
require.ErrorIs(t, err, io.EOF)
}
// TestAsyncBufferClose tests closing the AsyncBuffer
func TestAsyncBufferClose(t *testing.T) {
_, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
// Create an AsyncBuffer with the byte slice
asyncBuffer := FromReader(bytesReader)
reader1 := asyncBuffer.Reader()
reader2 := asyncBuffer.Reader()
asyncBuffer.Close()
b := make([]byte, 10)
_, err := reader1.Read(b)
require.Error(t, err, "asyncbuffer.AsyncBuffer.ReadAt: attempt to read on closed reader")
_, err = reader2.Read(b)
require.Error(t, err, "asyncbuffer.AsyncBuffer.ReadAt: attempt to read on closed reader")
// After closing the closed reader, it should not panic
asyncBuffer.Close()
_, err = reader2.Read(b)
require.Error(t, err, "asyncbuffer.AsyncBuffer.ReadAt: attempt to read on closed reader")
}
// TestAsyncBufferReadAtErrAtSomePoint tests reading from AsyncBuffer using readAt method
// which would fail somewhere
func TestAsyncBufferReadAtErrAtSomePoint(t *testing.T) {
// Let's use source buffer which is 4.5 chunks long
source, bytesReader := generateSourceData(t, int64(ChunkSize*4)+halfChunkSize)
slowReader := &erraticReader{reader: *bytesReader, failAt: ChunkSize*3 + 5} // fails at last chunk
asyncBuffer := FromReader(slowReader)
defer asyncBuffer.Close()
// Let's wait for all chunks to be read
_, err := asyncBuffer.Wait()
require.Error(t, err, "simulated read failure")
// Let's read something, but before error occurs
target := make([]byte, halfChunkSize)
n, err := asyncBuffer.readAt(target, 0)
require.NoError(t, err)
assert.Equal(t, len(target), n)
assert.Equal(t, target, source[:halfChunkSize])
// And again
target = make([]byte, halfChunkSize)
n, err = asyncBuffer.readAt(target, halfChunkSize)
require.NoError(t, err)
assert.Equal(t, len(target), n)
assert.Equal(t, target, source[halfChunkSize:halfChunkSize*2])
// Let's read something, but when error occurs
target = make([]byte, halfChunkSize)
_, err = asyncBuffer.readAt(target, ChunkSize*3)
require.Error(t, err, "simulated read failure")
}
// TestAsyncBufferReadAsync tests reading from AsyncBuffer using readAt method
// with full = false
func TestAsyncBufferReadAsync(t *testing.T) {
// Let's use source buffer which is 4.5 chunks long
source, bytesReader := generateSourceData(t, int64(ChunkSize)*3)
blockingReader := newBlockingReader(*bytesReader)
asyncBuffer := FromReader(blockingReader)
defer asyncBuffer.Close()
// flush the first chunk to allow reading
blockingReader.flushNextChunk()
// Let's try to read first two chunks, however,
// we know that only the first chunk is available
target := make([]byte, ChunkSize*2)
n, err := asyncBuffer.readAt(target, 0)
require.NoError(t, err)
assert.Equal(t, ChunkSize, n)
assert.Equal(t, target[:ChunkSize], source[:ChunkSize])
blockingReader.flushNextChunk() // unlock reader to allow read second chunk
asyncBuffer.WaitFor(ChunkSize + 1) // wait for the second chunk to be available
target = make([]byte, ChunkSize*2)
n, err = asyncBuffer.readAt(target, 0)
require.NoError(t, err)
assert.Equal(t, ChunkSize*2, n)
assert.Equal(t, target, source[:ChunkSize*2])
blockingReader.flush() // Flush the rest of the data
asyncBuffer.Wait()
// Try to read near end of the stream, EOF
target = make([]byte, ChunkSize)
n, err = asyncBuffer.readAt(target, ChunkSize*3-1)
require.ErrorIs(t, err, io.EOF)
assert.Equal(t, 1, n)
assert.Equal(t, target[0], source[ChunkSize*3-1])
// Try to read beyond the end of the stream == eof
target = make([]byte, ChunkSize)
n, err = asyncBuffer.readAt(target, ChunkSize*3)
require.ErrorIs(t, io.EOF, err)
assert.Equal(t, 0, n)
}