Disable HTTP2 for S3 backend with ?disable_http2=true option

This commit is contained in:
binwiederhier 2026-03-27 13:59:07 -04:00
parent 92fa88cf12
commit 67fc7fe96a
6 changed files with 63 additions and 32 deletions

View file

@ -45,7 +45,7 @@ func NewFileStore(dir string, totalSizeLimit int64, orphanGracePeriod time.Durat
// NewS3Store creates a new S3-backed attachment cache. The s3URL must be in the format:
//
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT][&disable_http2=true]
func NewS3Store(s3URL string, totalSizeLimit int64, orphanGracePeriod time.Duration, attachmentsWithSizes func() (map[string]int64, error)) (*Store, error) {
config, err := s3.ParseURL(s3URL)
if err != nil {

View file

@ -538,7 +538,7 @@ As an alternative to the local filesystem, you can store attachments in an S3-co
To use an S3-compatible storage for attachments, set `attachment-cache-dir` to an S3 URL with the following format:
```
s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT][&disable_http2=true]
```
Here are a few examples:
@ -546,7 +546,7 @@ Here are a few examples:
=== "/etc/ntfy/server.yml (DigitalOcean Spaces)"
``` yaml
base-url: "https://ntfy.example.com"
attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=nyc3&endpoint=https://nyc3.digitaloceanspaces.com"
attachment-cache-dir: "s3://ACCESS_KEY:SECRET_KEY@my-bucket/attachments?region=nyc3&endpoint=https://nyc3.digitaloceanspaces.com&disable_http2=true"
```
=== "/etc/ntfy/server.yml (AWS S3)"
@ -564,6 +564,9 @@ Here are a few examples:
Note that the access key and secret key may have to be URL encoded. For instance, a secret key `YmxhY+mxhYmxhC` (note the `+`) should
be encoded as `YmxhY%2BmxhYmxhC` (note the `%2B`), so the URL would be `s3://ACCESS_KEY:YmxhY%2BmxhYmxhC@my-bucket/attachments...`.
If you experience upload failures with HTTP/2 stream errors (common with DigitalOcean Spaces and some other S3-compatible providers),
add `&disable_http2=true` to force HTTP/1.1 connections.
!!! info
ntfy.sh is hosted and sponsored by DigitalOcean. I can highly recommend their public cloud offering. It's been rock solid
for 4 years. They offer an S3-compatible storage for $5/month and 250 GB of storage, with 1 TiB of bandwidth.
@ -2189,7 +2192,7 @@ variable before running the `ntfy` command (e.g. `export NTFY_LISTEN_HTTP=:80`).
| `behind-proxy` | `NTFY_BEHIND_PROXY` | *bool* | false | If set, use forwarded header (e.g. X-Forwarded-For, X-Client-IP) to determine visitor IP address (for rate limiting) |
| `proxy-forwarded-header` | `NTFY_PROXY_FORWARDED_HEADER` | *string* | `X-Forwarded-For` | Use specified header to determine visitor IP address (for rate limiting) |
| `proxy-trusted-hosts` | `NTFY_PROXY_TRUSTED_HOSTS` | *comma-separated host/IP/CIDR list* | - | Comma-separated list of trusted IP addresses, hosts, or CIDRs to remove from forwarded header |
| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory or S3 URL* | - | Cache directory for attached files, or S3 URL for object storage (format: `s3://KEY:SECRET@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]`). |
| `attachment-cache-dir` | `NTFY_ATTACHMENT_CACHE_DIR` | *directory or S3 URL* | - | Cache directory for attached files, or S3 URL for object storage (format: `s3://KEY:SECRET@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT][&disable_http2=true]`). |
| `attachment-total-size-limit` | `NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT` | *size* | 5G | Limit of the on-disk attachment cache directory. If the limits is exceeded, new attachments will be rejected. |
| `attachment-file-size-limit` | `NTFY_ATTACHMENT_FILE_SIZE_LIMIT` | *size* | 15M | Per-file attachment size limit (e.g. 300k, 2M, 100M). Larger attachment will be rejected. |
| `attachment-expiry-duration` | `NTFY_ATTACHMENT_EXPIRY_DURATION` | *duration* | 3h | Duration after which uploaded attachments will be deleted (e.g. 3h, 20h). Strongly affects `visitor-attachment-total-size-limit`. |
@ -2291,7 +2294,7 @@ OPTIONS:
--auth-file value, --auth_file value, -H value auth database file used for access control [$NTFY_AUTH_FILE]
--auth-startup-queries value, --auth_startup_queries value queries run when the auth database is initialized [$NTFY_AUTH_STARTUP_QUERIES]
--auth-default-access value, --auth_default_access value, -p value default permissions if no matching entries in the auth database are found (default: "read-write") [$NTFY_AUTH_DEFAULT_ACCESS]
--attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]) [$NTFY_ATTACHMENT_CACHE_DIR]
--attachment-cache-dir value, --attachment_cache_dir value cache directory for attached files, or S3 URL (s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT][&disable_http2=true]) [$NTFY_ATTACHMENT_CACHE_DIR]
--attachment-total-size-limit value, --attachment_total_size_limit value, -A value limit of the on-disk attachment cache (default: "5G") [$NTFY_ATTACHMENT_TOTAL_SIZE_LIMIT]
--attachment-file-size-limit value, --attachment_file_size_limit value, -Y value per-file attachment size limit (e.g. 300k, 2M, 100M) (default: "15M") [$NTFY_ATTACHMENT_FILE_SIZE_LIMIT]
--attachment-expiry-duration value, --attachment_expiry_duration value, -X value duration after which uploaded attachments will be deleted (e.g. 3h, 20h) (default: "3h") [$NTFY_ATTACHMENT_EXPIRY_DURATION]

View file

@ -62,17 +62,10 @@ type Client struct {
func New(config *Config) *Client {
httpClient := config.HTTPClient
if httpClient == nil {
// Force HTTP/1.1 to avoid HTTP/2 stream errors with S3-compatible providers
// (e.g. DigitalOcean Spaces). HTTP/2 can cause non-retryable failures on
// streaming uploads when the server resets the stream mid-transfer.
httpClient = &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
ForceAttemptHTTP2: false,
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
},
if config.DisableHTTP2 {
httpClient = newHTTP1Client()
} else {
httpClient = http.DefaultClient
}
}
return &Client{
@ -312,3 +305,20 @@ func (c *Client) do(ctx context.Context, op, method, reqURL string, body []byte,
}
return respBody, nil
}
// newHTTP1Client creates an HTTP client that forces HTTP/1.1 by disabling HTTP/2
// ALPN negotiation. This works around HTTP/2 stream errors with some S3-compatible
// providers (e.g. DigitalOcean Spaces) that can cause non-retryable failures on
// streaming uploads when the server resets the stream mid-transfer.
// See https://github.com/rclone/rclone/issues/4673, https://github.com/golang/go/issues/42777
func newHTTP1Client() *http.Client {
return &http.Client{
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
MinVersion: tls.VersionTLS12,
},
ForceAttemptHTTP2: false,
TLSNextProto: make(map[string]func(string, *tls.Conn) http.RoundTripper),
},
}
}

View file

@ -92,6 +92,18 @@ func TestParseURL_EmptyBucket(t *testing.T) {
require.Contains(t, err.Error(), "bucket")
}
func TestParseURL_DisableHTTP2(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1&disable_http2=true")
require.Nil(t, err)
require.True(t, cfg.DisableHTTP2)
}
func TestParseURL_DisableHTTP2_NotSet(t *testing.T) {
cfg, err := ParseURL("s3://AKID:SECRET@my-bucket?region=us-east-1")
require.Nil(t, err)
require.False(t, cfg.DisableHTTP2)
}
// --- Unit tests: URL construction ---
func TestConfig_BucketURL_PathStyle(t *testing.T) {

View file

@ -18,7 +18,8 @@ type Config struct {
Region string
AccessKey string
SecretKey string
HTTPClient *http.Client // if nil, http.DefaultClient is used
DisableHTTP2 bool // Force HTTP/1.1 to work around HTTP/2 issues with some S3-compatible providers
HTTPClient *http.Client // if nil, a default client is created (respecting DisableHTTP2)
}
// BucketURL returns the base URL for bucket-level operations.

View file

@ -10,6 +10,7 @@ import (
"net/http"
"net/url"
"sort"
"strconv"
"strings"
)
@ -41,9 +42,11 @@ const (
// ParseURL parses an S3 URL of the form:
//
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT]
// s3://ACCESS_KEY:SECRET_KEY@BUCKET[/PREFIX]?region=REGION[&endpoint=ENDPOINT][&disable_http2=true]
//
// When endpoint is specified, path-style addressing is enabled automatically.
// When disable_http2=true is set, the client forces HTTP/1.1 to work around
// HTTP/2 stream errors with some S3-compatible providers (e.g. DigitalOcean Spaces).
func ParseURL(s3URL string) (*Config, error) {
u, err := url.Parse(s3URL)
if err != nil {
@ -80,6 +83,7 @@ func ParseURL(s3URL string) (*Config, error) {
endpoint = fmt.Sprintf("s3.%s.amazonaws.com", region)
pathStyle = false
}
disableHTTP2, _ := strconv.ParseBool(u.Query().Get("disable_http2"))
return &Config{
Endpoint: endpoint,
PathStyle: pathStyle,
@ -88,6 +92,7 @@ func ParseURL(s3URL string) (*Config, error) {
Region: region,
AccessKey: accessKey,
SecretKey: secretKey,
DisableHTTP2: disableHTTP2,
}, nil
}