diff --git a/asyncbuffer/buffer.go b/asyncbuffer/buffer.go index e36611cd..387b7497 100644 --- a/asyncbuffer/buffer.go +++ b/asyncbuffer/buffer.go @@ -269,7 +269,7 @@ func (ab *AsyncBuffer) Error() error { // Chunk must be available when this method is called. // Returns the number of bytes copied to the slice or 0 if chunk has no data // (eg. offset is beyond the end of the stream). -func (ab *AsyncBuffer) readChunkAt(p []byte, off, rem int64) int { +func (ab *AsyncBuffer) readChunkAt(p []byte, off int64) int { // If the chunk is not available, we return 0 if off >= ab.len.Load() { return 0 @@ -286,17 +286,9 @@ func (ab *AsyncBuffer) readChunkAt(p []byte, off, rem int64) int { return 0 } - // How many bytes we could read from the chunk. No more than: - // - left to read totally - // - chunk size minus the start offset - // - chunk has - size := min(rem, ChunkSize-startOffset, int64(len(chunk.data))) - - if size == 0 { - return 0 - } - - return copy(p, chunk.data[startOffset:startOffset+size]) + // Copy data to the target slice. The number of bytes to copy is limited by the + // size of the target slice and the size of the data in the chunk. + return copy(p, chunk.data[startOffset:]) } // readAt reads data from the AsyncBuffer at the given offset. @@ -333,7 +325,7 @@ func (ab *AsyncBuffer) readAt(p []byte, off int64) (int, error) { } // Read data from the first chunk - n := ab.readChunkAt(p, off, size) + n := ab.readChunkAt(p, off) if n == 0 { return 0, io.EOF // Failed to read any data: means we tried to read beyond the end of the stream } @@ -350,7 +342,7 @@ func (ab *AsyncBuffer) readAt(p []byte, off int64) (int, error) { } // Read data from the next chunk - nX := ab.readChunkAt(p[n:], off, size) + nX := ab.readChunkAt(p[n:], off) n += nX size -= int64(nX) off += int64(nX) @@ -402,13 +394,11 @@ func (ab *AsyncBuffer) Reader() *Reader { // Read reads data from the AsyncBuffer. func (r *Reader) Read(p []byte) (int, error) { n, err := r.ab.readAt(p, r.pos) - if err != nil { - return n, err + if err == nil { + r.pos += int64(n) } - r.pos += int64(n) - - return n, nil + return n, err } // Seek sets the position of the reader to the given offset and returns the new position diff --git a/asyncbuffer/buffer_test.go b/asyncbuffer/buffer_test.go index 059604ae..815e646b 100644 --- a/asyncbuffer/buffer_test.go +++ b/asyncbuffer/buffer_test.go @@ -5,6 +5,7 @@ import ( "crypto/rand" "errors" "io" + "os" "sync" "sync/atomic" "testing" @@ -346,3 +347,15 @@ func TestAsyncBufferReadAsync(t *testing.T) { require.ErrorIs(t, io.EOF, err) assert.Equal(t, 0, n) } + +// TestAsyncBufferReadAllCompability tests that ReadAll methods works as expected +func TestAsyncBufferReadAllCompability(t *testing.T) { + source, err := os.ReadFile("../testdata/test1.jpg") + require.NoError(t, err) + asyncBuffer := FromReader(bytes.NewReader(source)) + defer asyncBuffer.Close() + + b, err := io.ReadAll(asyncBuffer.Reader()) + require.NoError(t, err) + require.Len(t, b, len(source)) +} diff --git a/etag/etag.go b/etag/etag.go index a2a39886..2dc7b129 100644 --- a/etag/etag.go +++ b/etag/etag.go @@ -6,6 +6,7 @@ import ( "encoding/json" "fmt" "hash" + "io" "net/textproto" "strings" "sync" @@ -105,7 +106,7 @@ func (h *Handler) ImageEtagExpected() string { return h.imgEtagExpected } -func (h *Handler) SetActualImageData(imgdata *imagedata.ImageData) bool { +func (h *Handler) SetActualImageData(imgdata *imagedata.ImageData) (bool, error) { var haveActualImgETag bool h.imgEtagActual, haveActualImgETag = imgdata.Headers["ETag"] haveActualImgETag = haveActualImgETag && len(h.imgEtagActual) > 0 @@ -113,7 +114,7 @@ func (h *Handler) SetActualImageData(imgdata *imagedata.ImageData) bool { // Just in case server didn't check ETag properly and returned the same one // as we expected if haveActualImgETag && h.imgEtagExpected == h.imgEtagActual { - return true + return true, nil } haveExpectedImgHash := len(h.imgHashExpected) != 0 @@ -123,14 +124,18 @@ func (h *Handler) SetActualImageData(imgdata *imagedata.ImageData) bool { defer eTagCalcPool.Put(c) c.hash.Reset() - c.hash.Write(imgdata.Data) + + _, err := io.Copy(c.hash, imgdata.Reader()) + if err != nil { + return false, err + } h.imgHashActual = base64.RawURLEncoding.EncodeToString(c.hash.Sum(nil)) - return haveExpectedImgHash && h.imgHashActual == h.imgHashExpected + return haveExpectedImgHash && h.imgHashActual == h.imgHashExpected, nil } - return false + return false, nil } func (h *Handler) GenerateActualETag() string { diff --git a/etag/etag_test.go b/etag/etag_test.go index aa307289..1d20e6f9 100644 --- a/etag/etag_test.go +++ b/etag/etag_test.go @@ -2,6 +2,7 @@ package etag import ( "io" + "net/http" "os" "strings" "testing" @@ -14,29 +15,36 @@ import ( "github.com/imgproxy/imgproxy/v3/options" ) -var ( - po = options.NewProcessingOptions() - - imgWithETag = imagedata.ImageData{ - Data: []byte("Hello Test"), - Headers: map[string]string{"ETag": `"loremipsumdolor"`}, - } - imgWithoutETag = imagedata.ImageData{ - Data: []byte("Hello Test"), - } - +const ( etagReq = `"yj0WO6sFU4GCciYUBWjzvvfqrBh869doeOC2Pp5EI1Y/RImxvcmVtaXBzdW1kb2xvciI"` - etagData = `"yj0WO6sFU4GCciYUBWjzvvfqrBh869doeOC2Pp5EI1Y/DvyChhMNu_sFX7jrjoyrgQbnFwfoOVv7kzp_Fbs6hQBg"` + etagData = `"yj0WO6sFU4GCciYUBWjzvvfqrBh869doeOC2Pp5EI1Y/D3t8wWhX4piqDCV4ZMEZsKvOaIO6onhKjbf9f-ZfYUV0"` ) type EtagTestSuite struct { suite.Suite + po *options.ProcessingOptions + imgWithETag *imagedata.ImageData + imgWithoutETag *imagedata.ImageData + h Handler } func (s *EtagTestSuite) SetupSuite() { logrus.SetOutput(io.Discard) + s.po = options.NewProcessingOptions() + + d, err := os.ReadFile("../testdata/test1.jpg") + s.Require().NoError(err) + + imgWithETag, err := imagedata.NewFromBytes(d, http.Header{"ETag": []string{`"loremipsumdolor"`}}) + s.Require().NoError(err) + + imgWithoutETag, err := imagedata.NewFromBytes(d, make(http.Header)) + s.Require().NoError(err) + + s.imgWithETag = imgWithETag + s.imgWithoutETag = imgWithoutETag } func (s *EtagTestSuite) TeardownSuite() { @@ -49,15 +57,15 @@ func (s *EtagTestSuite) SetupTest() { } func (s *EtagTestSuite) TestGenerateActualReq() { - s.h.SetActualProcessingOptions(po) - s.h.SetActualImageData(&imgWithETag) + s.h.SetActualProcessingOptions(s.po) + s.h.SetActualImageData(s.imgWithETag) s.Require().Equal(etagReq, s.h.GenerateActualETag()) } func (s *EtagTestSuite) TestGenerateActualData() { - s.h.SetActualProcessingOptions(po) - s.h.SetActualImageData(&imgWithoutETag) + s.h.SetActualProcessingOptions(s.po) + s.h.SetActualImageData(s.imgWithoutETag) s.Require().Equal(etagData, s.h.GenerateActualETag()) } @@ -75,7 +83,7 @@ func (s *EtagTestSuite) TestGenerateExpectedData() { func (s *EtagTestSuite) TestProcessingOptionsCheckSuccess() { s.h.ParseExpectedETag(etagReq) - s.Require().True(s.h.SetActualProcessingOptions(po)) + s.Require().True(s.h.SetActualProcessingOptions(s.po)) s.Require().True(s.h.ProcessingOptionsMatch()) } @@ -85,7 +93,7 @@ func (s *EtagTestSuite) TestProcessingOptionsCheckFailure() { s.h.ParseExpectedETag(wrongEtag) - s.Require().False(s.h.SetActualProcessingOptions(po)) + s.Require().False(s.h.SetActualProcessingOptions(s.po)) s.Require().False(s.h.ProcessingOptionsMatch()) } @@ -93,7 +101,7 @@ func (s *EtagTestSuite) TestImageETagExpectedPresent() { s.h.ParseExpectedETag(etagReq) //nolint:testifylint // False-positive expected-actual - s.Require().Equal(imgWithETag.Headers["ETag"], s.h.ImageEtagExpected()) + s.Require().Equal(s.imgWithETag.Headers["ETag"], s.h.ImageEtagExpected()) } func (s *EtagTestSuite) TestImageETagExpectedBlank() { @@ -104,7 +112,7 @@ func (s *EtagTestSuite) TestImageETagExpectedBlank() { func (s *EtagTestSuite) TestImageDataCheckDataToDataSuccess() { s.h.ParseExpectedETag(etagData) - s.Require().True(s.h.SetActualImageData(&imgWithoutETag)) + s.Require().True(s.h.SetActualImageData(s.imgWithoutETag)) } func (s *EtagTestSuite) TestImageDataCheckDataToDataFailure() { @@ -112,12 +120,12 @@ func (s *EtagTestSuite) TestImageDataCheckDataToDataFailure() { wrongEtag := etagData[:i] + `/Dwrongimghash"` s.h.ParseExpectedETag(wrongEtag) - s.Require().False(s.h.SetActualImageData(&imgWithoutETag)) + s.Require().False(s.h.SetActualImageData(s.imgWithoutETag)) } func (s *EtagTestSuite) TestImageDataCheckDataToReqSuccess() { s.h.ParseExpectedETag(etagData) - s.Require().True(s.h.SetActualImageData(&imgWithETag)) + s.Require().True(s.h.SetActualImageData(s.imgWithETag)) } func (s *EtagTestSuite) TestImageDataCheckDataToReqFailure() { @@ -125,19 +133,19 @@ func (s *EtagTestSuite) TestImageDataCheckDataToReqFailure() { wrongEtag := etagData[:i] + `/Dwrongimghash"` s.h.ParseExpectedETag(wrongEtag) - s.Require().False(s.h.SetActualImageData(&imgWithETag)) + s.Require().False(s.h.SetActualImageData(s.imgWithETag)) } func (s *EtagTestSuite) TestImageDataCheckReqToDataFailure() { s.h.ParseExpectedETag(etagReq) - s.Require().False(s.h.SetActualImageData(&imgWithoutETag)) + s.Require().False(s.h.SetActualImageData(s.imgWithoutETag)) } func (s *EtagTestSuite) TestETagBusterFailure() { config.ETagBuster = "busted" s.h.ParseExpectedETag(etagReq) - s.Require().False(s.h.SetActualImageData(&imgWithoutETag)) + s.Require().False(s.h.SetActualImageData(s.imgWithoutETag)) } func TestEtag(t *testing.T) { diff --git a/imagedata/factory.go b/imagedata/factory.go new file mode 100644 index 00000000..8d1be5d3 --- /dev/null +++ b/imagedata/factory.go @@ -0,0 +1,31 @@ +package imagedata + +import ( + "bytes" + "net/http" + "strings" + + "github.com/imgproxy/imgproxy/v3/imagemeta" +) + +// NewFromBytes creates a new ImageData instance from the provided byte slice. +func NewFromBytes(b []byte, headers http.Header) (*ImageData, error) { + r := bytes.NewReader(b) + + meta, err := imagemeta.DecodeMeta(r) + if err != nil { + return nil, err + } + + // Temporary workaround for the old ImageData interface + h := make(map[string]string, len(headers)) + for k, v := range headers { + h[k] = strings.Join(v, ", ") + } + + return &ImageData{ + data: b, + meta: meta, + Headers: h, + }, nil +} diff --git a/imagedata/image_data.go b/imagedata/image_data.go index 0bdabc3f..e8106b72 100644 --- a/imagedata/image_data.go +++ b/imagedata/image_data.go @@ -1,15 +1,18 @@ package imagedata import ( + "bytes" "context" "encoding/base64" "fmt" + "io" "os" "strings" "sync" "github.com/imgproxy/imgproxy/v3/config" "github.com/imgproxy/imgproxy/v3/ierrors" + "github.com/imgproxy/imgproxy/v3/imagemeta" "github.com/imgproxy/imgproxy/v3/imagetype" "github.com/imgproxy/imgproxy/v3/security" ) @@ -20,20 +23,43 @@ var ( ) type ImageData struct { - Type imagetype.Type - Data []byte + meta imagemeta.Meta + data []byte Headers map[string]string cancel context.CancelFunc cancelOnce sync.Once } -func (d *ImageData) Close() { +func (d *ImageData) Close() error { d.cancelOnce.Do(func() { if d.cancel != nil { d.cancel() } }) + + return nil +} + +// Meta returns the image metadata +func (d *ImageData) Meta() imagemeta.Meta { + return d.meta +} + +// Format returns the image format based on the metadata +func (d *ImageData) Format() imagetype.Type { + return d.meta.Format() +} + +// Reader returns an io.ReadSeeker for the image data +func (d *ImageData) Reader() io.ReadSeeker { + return bytes.NewReader(d.data) +} + +// Size returns the size of the image data in bytes. +// NOTE: asyncbuffer implementation will .Wait() for the data to be fully read +func (d *ImageData) Size() (int, error) { + return len(d.data), nil } func (d *ImageData) SetCancel(cancel context.CancelFunc) { diff --git a/imagedata/image_data_test.go b/imagedata/image_data_test.go index 9fc7e1ec..12809bbb 100644 --- a/imagedata/image_data_test.go +++ b/imagedata/image_data_test.go @@ -94,8 +94,8 @@ func (s *ImageDataTestSuite) TestDownloadStatusOK() { s.Require().NoError(err) s.Require().NotNil(imgdata) - s.Require().Equal(s.defaultData, imgdata.Data) - s.Require().Equal(imagetype.JPEG, imgdata.Type) + s.Require().Equal(s.defaultData, imgdata.data) + s.Require().Equal(imagetype.JPEG, imgdata.Format()) } func (s *ImageDataTestSuite) TestDownloadStatusPartialContent() { @@ -165,8 +165,8 @@ func (s *ImageDataTestSuite) TestDownloadStatusPartialContent() { } else { s.Require().NoError(err) s.Require().NotNil(imgdata) - s.Require().Equal(s.defaultData, imgdata.Data) - s.Require().Equal(imagetype.JPEG, imgdata.Type) + s.Require().Equal(s.defaultData, imgdata.data) + s.Require().Equal(imagetype.JPEG, imgdata.Format()) } }) } @@ -278,8 +278,8 @@ func (s *ImageDataTestSuite) TestDownloadGzip() { s.Require().NoError(err) s.Require().NotNil(imgdata) - s.Require().Equal(s.defaultData, imgdata.Data) - s.Require().Equal(imagetype.JPEG, imgdata.Type) + s.Require().Equal(s.defaultData, imgdata.data) + s.Require().Equal(imagetype.JPEG, imgdata.Format()) } func (s *ImageDataTestSuite) TestFromFile() { @@ -287,8 +287,8 @@ func (s *ImageDataTestSuite) TestFromFile() { s.Require().NoError(err) s.Require().NotNil(imgdata) - s.Require().Equal(s.defaultData, imgdata.Data) - s.Require().Equal(imagetype.JPEG, imgdata.Type) + s.Require().Equal(s.defaultData, imgdata.data) + s.Require().Equal(imagetype.JPEG, imgdata.Format()) } func (s *ImageDataTestSuite) TestFromBase64() { @@ -298,8 +298,8 @@ func (s *ImageDataTestSuite) TestFromBase64() { s.Require().NoError(err) s.Require().NotNil(imgdata) - s.Require().Equal(s.defaultData, imgdata.Data) - s.Require().Equal(imagetype.JPEG, imgdata.Type) + s.Require().Equal(s.defaultData, imgdata.data) + s.Require().Equal(imagetype.JPEG, imgdata.Format()) } func TestImageData(t *testing.T) { diff --git a/imagedata/read.go b/imagedata/read.go index 988df8dd..20be72d0 100644 --- a/imagedata/read.go +++ b/imagedata/read.go @@ -50,8 +50,8 @@ func readAndCheckImage(r io.Reader, contentLength int, secopts security.Options) } return &ImageData{ - Data: buf.Bytes(), - Type: meta.Format(), + data: buf.Bytes(), + meta: meta, cancel: cancel, }, nil } diff --git a/imagedatanew/image_data.go b/imagedatanew/image_data.go new file mode 100644 index 00000000..6e65247d --- /dev/null +++ b/imagedatanew/image_data.go @@ -0,0 +1,21 @@ +package imagedatanew + +import ( + "io" + "net/http" + + "github.com/imgproxy/imgproxy/v3/imagemeta" + "github.com/imgproxy/imgproxy/v3/imagetype" +) + +// ImageData is an interface that defines methods for reading image data and metadata +type ImageData interface { + io.Closer // Close closes the image data and releases any resources held by it + Reader() io.ReadSeeker // Reader returns a new ReadSeeker for the image data + Meta() imagemeta.Meta // Meta returns the metadata of the image data + Format() imagetype.Type // Format returns the image format from the metadata (shortcut) + Size() (int, error) // Size returns the size of the image data in bytes + + // This will be removed in the future + Headers() http.Header // Headers returns the HTTP headers of the image data, will be removed in the future +} diff --git a/processing/prepare.go b/processing/prepare.go index c5813375..ce1085af 100644 --- a/processing/prepare.go +++ b/processing/prepare.go @@ -251,7 +251,7 @@ func (pctx *pipelineContext) limitScale(widthToScale, heightToScale int, po *opt func prepare(pctx *pipelineContext, img *vips.Image, po *options.ProcessingOptions, imgdata *imagedata.ImageData) error { pctx.imgtype = imagetype.Unknown if imgdata != nil { - pctx.imgtype = imgdata.Type + pctx.imgtype = imgdata.Format() } pctx.srcWidth, pctx.srcHeight, pctx.angle, pctx.flip = extractMeta(img, po.Rotate, po.AutoRotate) @@ -266,7 +266,7 @@ func prepare(pctx *pipelineContext, img *vips.Image, po *options.ProcessingOptio // The size of a vector image is not checked during download, yet it can be very large. // So we should scale it down to the maximum allowed resolution - if !pctx.trimmed && imgdata != nil && imgdata.Type.IsVector() && !po.Enlarge { + if !pctx.trimmed && imgdata != nil && imgdata.Format().IsVector() && !po.Enlarge { resolution := imath.Round((float64(img.Width()*img.Height()) * pctx.wscale * pctx.hscale)) if resolution > po.SecurityOptions.MaxSrcResolution { scale := math.Sqrt(float64(po.SecurityOptions.MaxSrcResolution) / float64(resolution)) diff --git a/processing/processing.go b/processing/processing.go index d15249a6..c71e93c8 100644 --- a/processing/processing.go +++ b/processing/processing.go @@ -217,7 +217,16 @@ func saveImageToFitBytes(ctx context.Context, po *options.ProcessingOptions, img for { imgdata, err := img.Save(po.Format, quality) - if err != nil || len(imgdata.Data) <= po.MaxBytes || quality <= 10 { + if err != nil { + return nil, err + } + + size, err := imgdata.Size() + if err != nil { + return nil, err + } + + if size <= po.MaxBytes || quality <= 10 { return imgdata, err } imgdata.Close() @@ -226,7 +235,7 @@ func saveImageToFitBytes(ctx context.Context, po *options.ProcessingOptions, img return nil, err } - delta := float64(len(imgdata.Data)) / float64(po.MaxBytes) + delta := float64(size) / float64(po.MaxBytes) switch { case delta > 3: diff = 0.25 @@ -247,7 +256,7 @@ func ProcessImage(ctx context.Context, imgdata *imagedata.ImageData, po *options animationSupport := po.SecurityOptions.MaxAnimationFrames > 1 && - imgdata.Type.SupportsAnimationLoad() && + imgdata.Format().SupportsAnimationLoad() && (po.Format == imagetype.Unknown || po.Format.SupportsAnimationSave()) pages := 1 @@ -258,7 +267,7 @@ func ProcessImage(ctx context.Context, imgdata *imagedata.ImageData, po *options img := new(vips.Image) defer img.Clear() - if po.EnforceThumbnail && imgdata.Type.SupportsThumbnail() { + if po.EnforceThumbnail && imgdata.Format().SupportsThumbnail() { if err := img.LoadThumbnail(imgdata); err != nil { log.Debugf("Can't load thumbnail: %s", err) // Failed to load thumbnail, rollback to the full image @@ -286,10 +295,10 @@ func ProcessImage(ctx context.Context, imgdata *imagedata.ImageData, po *options po.Format = imagetype.AVIF case po.PreferWebP: po.Format = imagetype.WEBP - case isImageTypePreferred(imgdata.Type): - po.Format = imgdata.Type + case isImageTypePreferred(imgdata.Format()): + po.Format = imgdata.Format() default: - po.Format = findBestFormat(imgdata.Type, animated, expectAlpha) + po.Format = findBestFormat(imgdata.Format(), animated, expectAlpha) } case po.EnforceJxl && !animated: po.Format = imagetype.JXL diff --git a/processing/scale_on_load.go b/processing/scale_on_load.go index 309e840d..4d2c3f88 100644 --- a/processing/scale_on_load.go +++ b/processing/scale_on_load.go @@ -18,7 +18,7 @@ func canScaleOnLoad(pctx *pipelineContext, imgdata *imagedata.ImageData, scale f return false } - if imgdata.Type.IsVector() { + if imgdata.Format().IsVector() { return true } @@ -26,10 +26,10 @@ func canScaleOnLoad(pctx *pipelineContext, imgdata *imagedata.ImageData, scale f return false } - return imgdata.Type == imagetype.JPEG || - imgdata.Type == imagetype.WEBP || - imgdata.Type == imagetype.HEIC || - imgdata.Type == imagetype.AVIF + return imgdata.Format() == imagetype.JPEG || + imgdata.Format() == imagetype.WEBP || + imgdata.Format() == imagetype.HEIC || + imgdata.Format() == imagetype.AVIF } func calcJpegShink(shrink float64) int { @@ -57,7 +57,7 @@ func scaleOnLoad(pctx *pipelineContext, img *vips.Image, po *options.ProcessingO var newWidth, newHeight int - if imgdata.Type.SupportsThumbnail() { + if imgdata.Format().SupportsThumbnail() { thumbnail := new(vips.Image) defer thumbnail.Clear() diff --git a/processing/trim.go b/processing/trim.go index 77cd72a6..d19a02b9 100644 --- a/processing/trim.go +++ b/processing/trim.go @@ -15,7 +15,7 @@ func trim(pctx *pipelineContext, img *vips.Image, po *options.ProcessingOptions, // The size of a vector image is not checked during download, yet it can be very large. // So we should scale it down to the maximum allowed resolution - if imgdata != nil && imgdata.Type.IsVector() { + if imgdata != nil && imgdata.Format().IsVector() { if resolution := img.Width() * img.Height(); resolution > po.SecurityOptions.MaxSrcResolution { scale := math.Sqrt(float64(po.SecurityOptions.MaxSrcResolution) / float64(resolution)) if err := img.Load(imgdata, 1, scale, 1); err != nil { diff --git a/processing/watermark.go b/processing/watermark.go index 7ad37e77..bff5aabe 100644 --- a/processing/watermark.go +++ b/processing/watermark.go @@ -29,7 +29,7 @@ func prepareWatermark(wm *vips.Image, wmData *imagedata.ImageData, opts *options po.ResizingType = options.ResizeFit po.Dpr = 1 po.Enlarge = true - po.Format = wmData.Type + po.Format = wmData.Format() if opts.Scale > 0 { po.Width = imath.Max(imath.ScaleToEven(imgWidth, opts.Scale), 1) diff --git a/processing_handler.go b/processing_handler.go index 32ab014b..edf43a95 100644 --- a/processing_handler.go +++ b/processing_handler.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "net/http" "net/url" "slices" @@ -121,12 +122,12 @@ func setCanonical(rw http.ResponseWriter, originURL string) { func respondWithImage(reqID string, r *http.Request, rw http.ResponseWriter, statusCode int, resultData *imagedata.ImageData, po *options.ProcessingOptions, originURL string, originData *imagedata.ImageData) { var contentDisposition string if len(po.Filename) > 0 { - contentDisposition = resultData.Type.ContentDisposition(po.Filename, po.ReturnAttachment) + contentDisposition = resultData.Format().ContentDisposition(po.Filename, po.ReturnAttachment) } else { - contentDisposition = resultData.Type.ContentDispositionFromURL(originURL, po.ReturnAttachment) + contentDisposition = resultData.Format().ContentDispositionFromURL(originURL, po.ReturnAttachment) } - rw.Header().Set("Content-Type", resultData.Type.Mime()) + rw.Header().Set("Content-Type", resultData.Format().Mime()) rw.Header().Set("Content-Disposition", contentDisposition) setCacheControl(rw, po.Expires, originData.Headers) @@ -135,7 +136,12 @@ func respondWithImage(reqID string, r *http.Request, rw http.ResponseWriter, sta setCanonical(rw, originURL) if config.EnableDebugHeaders { - rw.Header().Set("X-Origin-Content-Length", strconv.Itoa(len(originData.Data))) + originSize, err := originData.Size() + if err != nil { + checkErr(r.Context(), "image_data_size", err) + } + + rw.Header().Set("X-Origin-Content-Length", strconv.Itoa(originSize)) rw.Header().Set("X-Origin-Width", resultData.Headers["X-Origin-Width"]) rw.Header().Set("X-Origin-Height", resultData.Headers["X-Origin-Height"]) rw.Header().Set("X-Result-Width", resultData.Headers["X-Result-Width"]) @@ -144,9 +150,15 @@ func respondWithImage(reqID string, r *http.Request, rw http.ResponseWriter, sta rw.Header().Set("Content-Security-Policy", "script-src 'none'") - rw.Header().Set("Content-Length", strconv.Itoa(len(resultData.Data))) + resultSize, err := resultData.Size() + if err != nil { + checkErr(r.Context(), "image_data_size", err) + } + + rw.Header().Set("Content-Length", strconv.Itoa(resultSize)) rw.WriteHeader(statusCode) - _, err := rw.Write(resultData.Data) + + _, err = io.Copy(rw, resultData.Reader()) var ierr *ierrors.Error if err != nil { @@ -404,13 +416,14 @@ func handleProcessing(reqID string, rw http.ResponseWriter, r *http.Request) { checkErr(ctx, "timeout", router.CheckTimeout(ctx)) if config.ETagEnabled && statusCode == http.StatusOK { - imgDataMatch := etagHandler.SetActualImageData(originData) + imgDataMatch, terr := etagHandler.SetActualImageData(originData) + if terr == nil { + rw.Header().Set("ETag", etagHandler.GenerateActualETag()) - rw.Header().Set("ETag", etagHandler.GenerateActualETag()) - - if imgDataMatch && etagHandler.ProcessingOptionsMatch() { - respondWithNotModified(reqID, r, rw, po, imageURL, originData.Headers) - return + if imgDataMatch && etagHandler.ProcessingOptionsMatch() { + respondWithNotModified(reqID, r, rw, po, imageURL, originData.Headers) + return + } } } @@ -419,12 +432,12 @@ func handleProcessing(reqID string, rw http.ResponseWriter, r *http.Request) { // Skip processing svg with unknown or the same destination imageType // if it's not forced by AlwaysRasterizeSvg option // Also skip processing if the format is in SkipProcessingFormats - shouldSkipProcessing := (originData.Type == po.Format || po.Format == imagetype.Unknown) && - (slices.Contains(po.SkipProcessingFormats, originData.Type) || - originData.Type == imagetype.SVG && !config.AlwaysRasterizeSvg) + shouldSkipProcessing := (originData.Format() == po.Format || po.Format == imagetype.Unknown) && + (slices.Contains(po.SkipProcessingFormats, originData.Format()) || + originData.Format() == imagetype.SVG && !config.AlwaysRasterizeSvg) if shouldSkipProcessing { - if originData.Type == imagetype.SVG && config.SanitizeSvg { + if originData.Format() == imagetype.SVG && config.SanitizeSvg { sanitized, svgErr := svg.Sanitize(originData) checkErr(ctx, "svg_processing", svgErr) @@ -438,10 +451,10 @@ func handleProcessing(reqID string, rw http.ResponseWriter, r *http.Request) { return } - if !vips.SupportsLoad(originData.Type) { + if !vips.SupportsLoad(originData.Format()) { sendErrAndPanic(ctx, "processing", newInvalidURLErrorf( http.StatusUnprocessableEntity, - "Source image format is not supported: %s", originData.Type, + "Source image format is not supported: %s", originData.Format(), )) } diff --git a/processing_handler_test.go b/processing_handler_test.go index cfb00b6c..d1134c41 100644 --- a/processing_handler_test.go +++ b/processing_handler_test.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "fmt" "io" "net/http" @@ -25,6 +24,7 @@ import ( "github.com/imgproxy/imgproxy/v3/options" "github.com/imgproxy/imgproxy/v3/router" "github.com/imgproxy/imgproxy/v3/svg" + "github.com/imgproxy/imgproxy/v3/testutil" "github.com/imgproxy/imgproxy/v3/vips" ) @@ -86,8 +86,21 @@ func (s *ProcessingHandlerTestSuite) readTestFile(name string) []byte { return data } -func (s *ProcessingHandlerTestSuite) readBody(res *http.Response) []byte { - data, err := io.ReadAll(res.Body) +func (s *ProcessingHandlerTestSuite) readTestImageData(name string) *imagedata.ImageData { + wd, err := os.Getwd() + s.Require().NoError(err) + + data, err := os.ReadFile(filepath.Join(wd, "testdata", name)) + s.Require().NoError(err) + + imgdata, err := imagedata.NewFromBytes(data, make(http.Header)) + s.Require().NoError(err) + + return imgdata +} + +func (s *ProcessingHandlerTestSuite) readImageData(imgdata *imagedata.ImageData) []byte { + data, err := io.ReadAll(imgdata.Reader()) s.Require().NoError(err) return data } @@ -100,10 +113,7 @@ func (s *ProcessingHandlerTestSuite) sampleETagData(imgETag string) (string, *im po.Width = 4 po.Height = 4 - imgdata := imagedata.ImageData{ - Type: imagetype.PNG, - Data: s.readTestFile("test1.png"), - } + imgdata := s.readTestImageData("test1.png") if len(imgETag) != 0 { imgdata.Headers = map[string]string{"ETag": imgETag} @@ -112,8 +122,8 @@ func (s *ProcessingHandlerTestSuite) sampleETagData(imgETag string) (string, *im var h etag.Handler h.SetActualProcessingOptions(po) - h.SetActualImageData(&imgdata) - return poStr, &imgdata, h.GenerateActualETag() + h.SetActualImageData(imgdata) + return poStr, imgdata, h.GenerateActualETag() } func (s *ProcessingHandlerTestSuite) TestRequest() { @@ -262,10 +272,9 @@ func (s *ProcessingHandlerTestSuite) TestSkipProcessingConfig() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected := s.readTestFile("test1.png") + expected := s.readTestImageData("test1.png") - s.Require().True(bytes.Equal(expected, actual)) + s.Require().True(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestSkipProcessingPO() { @@ -274,10 +283,9 @@ func (s *ProcessingHandlerTestSuite) TestSkipProcessingPO() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected := s.readTestFile("test1.png") + expected := s.readTestImageData("test1.png") - s.Require().True(bytes.Equal(expected, actual)) + s.Require().True(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestSkipProcessingSameFormat() { @@ -288,10 +296,9 @@ func (s *ProcessingHandlerTestSuite) TestSkipProcessingSameFormat() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected := s.readTestFile("test1.png") + expected := s.readTestImageData("test1.png") - s.Require().True(bytes.Equal(expected, actual)) + s.Require().True(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestSkipProcessingDifferentFormat() { @@ -302,10 +309,9 @@ func (s *ProcessingHandlerTestSuite) TestSkipProcessingDifferentFormat() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected := s.readTestFile("test1.png") + expected := s.readTestImageData("test1.png") - s.Require().False(bytes.Equal(expected, actual)) + s.Require().False(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestSkipProcessingSVG() { @@ -314,12 +320,10 @@ func (s *ProcessingHandlerTestSuite) TestSkipProcessingSVG() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected, err := svg.Sanitize(&imagedata.ImageData{Data: s.readTestFile("test1.svg")}) - + expected, err := svg.Sanitize(s.readTestImageData("test1.svg")) s.Require().NoError(err) - s.Require().True(bytes.Equal(expected.Data, actual)) + s.Require().True(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestNotSkipProcessingSVGToJPG() { @@ -328,10 +332,9 @@ func (s *ProcessingHandlerTestSuite) TestNotSkipProcessingSVGToJPG() { s.Require().Equal(200, res.StatusCode) - actual := s.readBody(res) - expected := s.readTestFile("test1.svg") + expected := s.readTestImageData("test1.svg") - s.Require().False(bytes.Equal(expected, actual)) + s.Require().False(testutil.ReadersEqual(s.T(), expected.Reader(), res.Body)) } func (s *ProcessingHandlerTestSuite) TestErrorSavingToSVG() { @@ -435,7 +438,7 @@ func (s *ProcessingHandlerTestSuite) TestETagDataNoIfNotModified() { s.Empty(r.Header.Get("If-None-Match")) rw.WriteHeader(200) - rw.Write(imgdata.Data) + rw.Write(s.readImageData(imgdata)) })) defer ts.Close() @@ -477,7 +480,7 @@ func (s *ProcessingHandlerTestSuite) TestETagDataMatch() { s.Empty(r.Header.Get("If-None-Match")) rw.WriteHeader(200) - rw.Write(imgdata.Data) + rw.Write(s.readImageData(imgdata)) })) defer ts.Close() @@ -502,7 +505,7 @@ func (s *ProcessingHandlerTestSuite) TestETagReqNotMatch() { rw.Header().Set("ETag", imgdata.Headers["ETag"]) rw.WriteHeader(200) - rw.Write(imgdata.Data) + rw.Write(s.readImageData(imgdata)) })) defer ts.Close() @@ -527,7 +530,7 @@ func (s *ProcessingHandlerTestSuite) TestETagDataNotMatch() { s.Empty(r.Header.Get("If-None-Match")) rw.WriteHeader(200) - rw.Write(imgdata.Data) + rw.Write(s.readImageData(imgdata)) })) defer ts.Close() @@ -553,7 +556,7 @@ func (s *ProcessingHandlerTestSuite) TestETagProcessingOptionsNotMatch() { rw.Header().Set("ETag", imgdata.Headers["ETag"]) rw.WriteHeader(200) - rw.Write(imgdata.Data) + rw.Write(s.readImageData(imgdata)) })) defer ts.Close() diff --git a/svg/svg.go b/svg/svg.go index 103c5089..82415306 100644 --- a/svg/svg.go +++ b/svg/svg.go @@ -1,8 +1,8 @@ package svg import ( - "bytes" "io" + "net/http" "strings" "github.com/tdewolff/parse/v2" @@ -11,21 +11,17 @@ import ( "github.com/imgproxy/imgproxy/v3/imagedata" ) -func cloneHeaders(src map[string]string) map[string]string { - if src == nil { - return nil - } - - dst := make(map[string]string, len(src)) +func cloneHeaders(src map[string]string) http.Header { + h := make(http.Header, len(src)) for k, v := range src { - dst[k] = v + h.Set(k, v) } - return dst + return h } func Sanitize(data *imagedata.ImageData) (*imagedata.ImageData, error) { - r := bytes.NewReader(data.Data) + r := data.Reader() l := xml.NewLexer(parse.NewInput(r)) buf, cancel := imagedata.BorrowBuffer() @@ -58,14 +54,13 @@ func Sanitize(data *imagedata.ImageData) (*imagedata.ImageData, error) { return nil, l.Err() } - newData := imagedata.ImageData{ - Data: buf.Bytes(), - Type: data.Type, - Headers: cloneHeaders(data.Headers), + newData, err := imagedata.NewFromBytes(buf.Bytes(), cloneHeaders(data.Headers)) + if err != nil { + return nil, err } newData.SetCancel(cancel) - return &newData, nil + return newData, nil case xml.StartTagToken: curTagName = strings.ToLower(string(l.Text())) diff --git a/svg/svg_test.go b/svg/svg_test.go index c141ef3f..e818e623 100644 --- a/svg/svg_test.go +++ b/svg/svg_test.go @@ -1,15 +1,17 @@ package svg import ( + "net/http" "os" "path/filepath" "testing" "github.com/stretchr/testify/suite" + "go.withmatt.com/httpheaders" "github.com/imgproxy/imgproxy/v3/config" "github.com/imgproxy/imgproxy/v3/imagedata" - "github.com/imgproxy/imgproxy/v3/imagetype" + "github.com/imgproxy/imgproxy/v3/testutil" ) type SvgTestSuite struct { @@ -30,24 +32,23 @@ func (s *SvgTestSuite) readTestFile(name string) *imagedata.ImageData { data, err := os.ReadFile(filepath.Join(wd, "..", "testdata", name)) s.Require().NoError(err) - return &imagedata.ImageData{ - Type: imagetype.SVG, - Data: data, - Headers: map[string]string{ - "Content-Type": "image/svg+xml", - "Cache-Control": "public, max-age=12345", - }, - } + h := make(http.Header) + h.Set(httpheaders.ContentType, "image/svg+xml") + h.Set(httpheaders.CacheControl, "public, max-age=12345") + + d, err := imagedata.NewFromBytes(data, h) + s.Require().NoError(err) + + return d } func (s *SvgTestSuite) TestSanitize() { origin := s.readTestFile("test1.svg") expected := s.readTestFile("test1.sanitized.svg") - actual, err := Sanitize(origin) s.Require().NoError(err) - s.Require().Equal(string(expected.Data), string(actual.Data)) + s.Require().True(testutil.ReadersEqual(s.T(), expected.Reader(), actual.Reader())) s.Require().Equal(origin.Headers, actual.Headers) } diff --git a/testutil/testutil.go b/testutil/testutil.go new file mode 100644 index 00000000..aeca6701 --- /dev/null +++ b/testutil/testutil.go @@ -0,0 +1,38 @@ +package testutil + +import ( + "io" + + "github.com/stretchr/testify/require" +) + +const bufSize = 4096 + +// RequireReadersEqual compares two io.Reader contents in a streaming manner. +// It fails the test if contents differ or if reading fails. +func ReadersEqual(t require.TestingT, expected, actual io.Reader) bool { + if h, ok := t.(interface{ Helper() }); ok { + h.Helper() + } + + buf1 := make([]byte, bufSize) + buf2 := make([]byte, bufSize) + + for { + n1, err1 := expected.Read(buf1) + n2, err2 := actual.Read(buf2) + + if n1 != n2 { + return false + } + + require.Equal(t, buf1[:n1], buf2[:n1]) + + if err1 == io.EOF && err2 == io.EOF { + return true + } + + require.NoError(t, err1) + require.NoError(t, err2) + } +} diff --git a/vips/vips.go b/vips/vips.go index 58b7f581..e7b18968 100644 --- a/vips/vips.go +++ b/vips/vips.go @@ -9,7 +9,6 @@ package vips */ import "C" import ( - "bytes" "context" "fmt" "math" @@ -359,11 +358,11 @@ func (img *Image) Load(imgdata *imagedata.ImageData, shrink int, scale float64, err := C.int(0) - reader := bytes.NewReader(imgdata.Data) + reader := imgdata.Reader() source := newVipsImgproxySource(reader) defer C.unref_imgproxy_source(source) - switch imgdata.Type { + switch imgdata.Format() { case imagetype.JPEG: err = C.vips_jpegload_source_go(source, C.int(shrink), &tmp) case imagetype.JXL: @@ -393,7 +392,7 @@ func (img *Image) Load(imgdata *imagedata.ImageData, shrink int, scale float64, C.swap_and_clear(&img.VipsImage, tmp) - if imgdata.Type == imagetype.TIFF { + if imgdata.Format() == imagetype.TIFF { if C.vips_fix_float_tiff(img.VipsImage, &tmp) == 0 { C.swap_and_clear(&img.VipsImage, tmp) } else { @@ -405,13 +404,13 @@ func (img *Image) Load(imgdata *imagedata.ImageData, shrink int, scale float64, } func (img *Image) LoadThumbnail(imgdata *imagedata.ImageData) error { - if imgdata.Type != imagetype.HEIC && imgdata.Type != imagetype.AVIF { + if imgdata.Format() != imagetype.HEIC && imgdata.Format() != imagetype.AVIF { return newVipsError("Usupported image type to load thumbnail") } var tmp *C.VipsImage - reader := bytes.NewReader(imgdata.Data) + reader := imgdata.Reader() source := newVipsImgproxySource(reader) defer C.unref_imgproxy_source(source) @@ -469,14 +468,17 @@ func (img *Image) Save(imgtype imagetype.Type, quality int) (*imagedata.ImageDat var blob_ptr = C.vips_blob_get(target.blob, &imgsize) var ptr unsafe.Pointer = unsafe.Pointer(blob_ptr) - imgdata := imagedata.ImageData{ - Type: imgtype, - Data: ptrToBytes(ptr, int(imgsize)), + b := ptrToBytes(ptr, int(imgsize)) + + imgdata, ierr := imagedata.NewFromBytes(b, make(http.Header)) + if ierr != nil { + cancel() + return nil, ierr } imgdata.SetCancel(cancel) - return &imgdata, nil + return imgdata, nil } func (img *Image) Clear() {