mirror of
https://github.com/imgproxy/imgproxy.git
synced 2025-10-06 18:03:29 +02:00
Merge branch 'crohr/s3-protocol' into version/2.0
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1,3 @@
|
|||||||
|
*.swp
|
||||||
imgproxy
|
imgproxy
|
||||||
tmp/
|
tmp/
|
||||||
|
@@ -62,6 +62,7 @@ Massive processing of remote images is a potentially dangerous thing, security-w
|
|||||||
* [WebP support detection](./docs/configuration.md#webp-support-detection)
|
* [WebP support detection](./docs/configuration.md#webp-support-detection)
|
||||||
* [Presets](./docs/configuration.md#presets)
|
* [Presets](./docs/configuration.md#presets)
|
||||||
* [Serving local files](./docs/configuration.md#serving-local-files)
|
* [Serving local files](./docs/configuration.md#serving-local-files)
|
||||||
|
* [Serving files from Amazon S3](./docs/configuration.md#serving-files-from-amazon-s3)
|
||||||
* [Miscellaneous](./docs/configuration.md#miscellaneous)
|
* [Miscellaneous](./docs/configuration.md#miscellaneous)
|
||||||
4. [Generating the URL](./docs/generating_the_url_basic.md)
|
4. [Generating the URL](./docs/generating_the_url_basic.md)
|
||||||
* [Basic](./docs/generating_the_url_basic.md)
|
* [Basic](./docs/generating_the_url_basic.md)
|
||||||
@@ -69,6 +70,7 @@ Massive processing of remote images is a potentially dangerous thing, security-w
|
|||||||
* [Signing the URL](./docs/signing_the_url.md)
|
* [Signing the URL](./docs/signing_the_url.md)
|
||||||
5. [Presets](./docs/presets.md)
|
5. [Presets](./docs/presets.md)
|
||||||
6. [Serving local files](./docs/serving_local_files.md)
|
6. [Serving local files](./docs/serving_local_files.md)
|
||||||
|
7. [Serving files from Amazon S3](./docs/serving_files_from_s3.md)
|
||||||
7. [Source image formats support](./docs/source_image_formats_support.md)
|
7. [Source image formats support](./docs/source_image_formats_support.md)
|
||||||
8. [About processing pipeline](./docs/about_processing_pipeline.md)
|
8. [About processing pipeline](./docs/about_processing_pipeline.md)
|
||||||
9. [Health check](./docs/healthcheck.md)
|
9. [Health check](./docs/healthcheck.md)
|
||||||
|
@@ -137,6 +137,7 @@ type config struct {
|
|||||||
IgnoreSslVerification bool
|
IgnoreSslVerification bool
|
||||||
|
|
||||||
LocalFileSystemRoot string
|
LocalFileSystemRoot string
|
||||||
|
S3Enabled bool
|
||||||
|
|
||||||
ETagEnabled bool
|
ETagEnabled bool
|
||||||
|
|
||||||
@@ -159,6 +160,7 @@ var conf = config{
|
|||||||
Quality: 80,
|
Quality: 80,
|
||||||
GZipCompression: 5,
|
GZipCompression: 5,
|
||||||
ETagEnabled: false,
|
ETagEnabled: false,
|
||||||
|
S3Enabled: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -211,6 +213,8 @@ func init() {
|
|||||||
|
|
||||||
strEnvConfig(&conf.LocalFileSystemRoot, "IMGPROXY_LOCAL_FILESYSTEM_ROOT")
|
strEnvConfig(&conf.LocalFileSystemRoot, "IMGPROXY_LOCAL_FILESYSTEM_ROOT")
|
||||||
|
|
||||||
|
boolEnvConfig(&conf.S3Enabled, "IMGPROXY_USE_S3")
|
||||||
|
|
||||||
boolEnvConfig(&conf.ETagEnabled, "IMGPROXY_USE_ETAG")
|
boolEnvConfig(&conf.ETagEnabled, "IMGPROXY_USE_ETAG")
|
||||||
|
|
||||||
strEnvConfig(&conf.BaseURL, "IMGPROXY_BASE_URL")
|
strEnvConfig(&conf.BaseURL, "IMGPROXY_BASE_URL")
|
||||||
|
@@ -30,7 +30,7 @@ $ echo $(xxd -g 2 -l 64 -p /dev/random | tr -d '\n')
|
|||||||
* `IMGPROXY_CONCURRENCY` — the maximum number of image requests to be processed simultaneously. Default: double number of CPU cores;
|
* `IMGPROXY_CONCURRENCY` — the maximum number of image requests to be processed simultaneously. Default: double number of CPU cores;
|
||||||
* `IMGPROXY_MAX_CLIENTS` — the maximum number of simultaneous active connections. Default: `IMGPROXY_CONCURRENCY * 10`;
|
* `IMGPROXY_MAX_CLIENTS` — the maximum number of simultaneous active connections. Default: `IMGPROXY_CONCURRENCY * 10`;
|
||||||
* `IMGPROXY_TTL` — duration in seconds sent in `Expires` and `Cache-Control: max-age` headers. Default: `3600` (1 hour);
|
* `IMGPROXY_TTL` — duration in seconds sent in `Expires` and `Cache-Control: max-age` headers. Default: `3600` (1 hour);
|
||||||
* `IMGPROXY_USE_ETAG` — when true, enables using [ETag](https://en.wikipedia.org/wiki/HTTP_ETag) header for the cache control. Default: false;
|
* `IMGPROXY_USE_ETAG` — when `true`, enables using [ETag](https://en.wikipedia.org/wiki/HTTP_ETag) header for the cache control. Default: false;
|
||||||
|
|
||||||
### Security
|
### Security
|
||||||
|
|
||||||
@@ -104,6 +104,14 @@ imgproxy can serve your local images, but this feature is disabled by default. T
|
|||||||
|
|
||||||
Check out [Serving local files](../docs/serving_local_files.md) guide to get more info.
|
Check out [Serving local files](../docs/serving_local_files.md) guide to get more info.
|
||||||
|
|
||||||
|
### Serving files from Amazon S3
|
||||||
|
|
||||||
|
imgproxy can process files from Amazon S3 buckets, but this feature is disabled by default. To enable it, set `IMGPROXY_USE_S3` as `true`:
|
||||||
|
|
||||||
|
* `IMGPROXY_USE_S3` — when `true`, enables fetching the images from Amazon S3 buckets. Default: false.
|
||||||
|
|
||||||
|
Check out [Serving files from S3](../docs/serving_files_from_s3.md) guide to get more info.
|
||||||
|
|
||||||
### Miscellaneous
|
### Miscellaneous
|
||||||
|
|
||||||
* `IMGPROXY_BASE_URL` - base URL part which will be added to every requested image URL. For example, if base URL is `http://example.com/images` and `/path/to/image.png` is requested, imgproxy will download the image from `http://example.com/images/path/to/image.png`. Default: blank.
|
* `IMGPROXY_BASE_URL` - base URL part which will be added to every requested image URL. For example, if base URL is `http://example.com/images` and `/path/to/image.png` is requested, imgproxy will download the image from `http://example.com/images/path/to/image.png`. Default: blank.
|
||||||
|
40
docs/serving_files_from_s3.md
Normal file
40
docs/serving_files_from_s3.md
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Serving files from S3
|
||||||
|
|
||||||
|
imgproxy can process files from S3 buckets. To use this feature do the following:
|
||||||
|
|
||||||
|
1. Set `IMGPROXY_USE_S3` environment variable as `true`;
|
||||||
|
2. [Setup credentials](#setup-credentials) to grant access to your bucket;
|
||||||
|
3. Use `s3://%bucket_name/%file_key` as the source image url.
|
||||||
|
|
||||||
|
### Setup credentials
|
||||||
|
|
||||||
|
There are three ways to specify your AWS credentials. The credentials need to be able to read from any of the buckets given in the source URLs.
|
||||||
|
|
||||||
|
#### Environment variables
|
||||||
|
|
||||||
|
You can specify AWS Acces Key ID and Secret Access Key by setting `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables.
|
||||||
|
|
||||||
|
``` bash
|
||||||
|
$ AWS_ACCESS_KEY_ID=my_access_key AWS_SECRET_ACCESS_KEY=my_secret_key imgproxy
|
||||||
|
|
||||||
|
# same for Docker
|
||||||
|
docker run -e AWS_ACCESS_KEY_ID=my_access_key -e AWS_SECRET_ACCESS_KEY=my_secret_key -it darthsim/imgproxy
|
||||||
|
```
|
||||||
|
|
||||||
|
It's recommended to use this way when you run dockerized imgproxy.
|
||||||
|
|
||||||
|
#### Shared credentials file
|
||||||
|
|
||||||
|
Create `.aws/credentials` file in your home directory with the following content:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[default]
|
||||||
|
aws_access_key_id = %access_key_id
|
||||||
|
aws_secret_access_key = %secret_access_key
|
||||||
|
```
|
||||||
|
|
||||||
|
#### IAM Roles for Amazon EC2 Instances
|
||||||
|
|
||||||
|
If you are running imgproxy on an Amazon EC2 instance, you can use the instance's [IAM role](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html) to get security credentials to make calls to AWS.
|
||||||
|
|
||||||
|
You can get more info about credentials in the [Configuring the AWS SDK for Go](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html) guide.
|
@@ -69,6 +69,10 @@ func initDownloading() {
|
|||||||
transport.RegisterProtocol("local", http.NewFileTransport(http.Dir(conf.LocalFileSystemRoot)))
|
transport.RegisterProtocol("local", http.NewFileTransport(http.Dir(conf.LocalFileSystemRoot)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if conf.S3Enabled {
|
||||||
|
transport.RegisterProtocol("s3", newS3Transport())
|
||||||
|
}
|
||||||
|
|
||||||
downloadClient = &http.Client{
|
downloadClient = &http.Client{
|
||||||
Timeout: time.Duration(conf.DownloadTimeout) * time.Second,
|
Timeout: time.Duration(conf.DownloadTimeout) * time.Second,
|
||||||
Transport: transport,
|
Transport: transport,
|
||||||
|
34
s3transport.go
Normal file
34
s3transport.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
http "net/http"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// s3Transport implements RoundTripper for the 's3' protocol.
|
||||||
|
type s3Transport struct {
|
||||||
|
svc *s3.S3
|
||||||
|
}
|
||||||
|
|
||||||
|
func newS3Transport() http.RoundTripper {
|
||||||
|
return s3Transport{s3.New(session.New())}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t s3Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||||
|
input := &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(req.URL.Host),
|
||||||
|
Key: aws.String(req.URL.Path),
|
||||||
|
}
|
||||||
|
s3req, _ := t.svc.GetObjectRequest(input)
|
||||||
|
|
||||||
|
s3err := s3req.Send()
|
||||||
|
if s3err == nil { // resp is now filled
|
||||||
|
return s3req.HTTPResponse, nil
|
||||||
|
}
|
||||||
|
fmt.Println("s3 error", s3err)
|
||||||
|
return nil, s3err
|
||||||
|
}
|
Reference in New Issue
Block a user