Simplify a little, manual review

This commit is contained in:
binwiederhier 2026-03-19 22:42:38 -04:00
parent d86e20173c
commit 1f270b68e0
6 changed files with 141 additions and 154 deletions

View file

@ -131,7 +131,6 @@ func (c *Store) Remove(ids ...string) error {
// deletes orphans (not in the valid ID set and older than 1 hour), and recomputes
// the total size from the remaining objects.
func (c *Store) sync() error {
log.Tag(tagStore).Debug("Sync: starting sync loop")
localIDs, err := c.localIDs()
if err != nil {
return fmt.Errorf("attachment sync: failed to get valid IDs: %w", err)
@ -161,21 +160,21 @@ func (c *Store) sync() error {
sizes[obj.ID] = obj.Size
}
}
log.Tag(tagStore).Debug("Sync: cache size updated to %s", util.FormatSizeHuman(size))
log.Tag(tagStore).Debug("Attachment cache size updated to %s", util.FormatSizeHuman(size))
c.mu.Lock()
c.size = size
c.sizes = sizes
c.mu.Unlock()
// Delete orphaned attachments
if len(orphanIDs) > 0 {
log.Tag(tagStore).Debug("Sync: deleting %d orphaned attachment(s)", len(orphanIDs))
log.Tag(tagStore).Debug("Deleting %d orphaned attachment(s)", len(orphanIDs))
if err := c.backend.Delete(orphanIDs...); err != nil {
return fmt.Errorf("attachment sync: failed to delete orphaned objects: %w", err)
}
}
// Clean up incomplete uploads (S3 only)
if err := c.backend.DeleteIncomplete(cutoff); err != nil {
log.Tag(tagStore).Err(err).Warn("Sync: failed to abort incomplete uploads")
log.Tag(tagStore).Err(err).Warn("Failed to abort incomplete uploads from attachment cache")
}
return nil
}

View file

@ -1,6 +1,3 @@
// Package s3 provides a minimal S3-compatible client that works with AWS S3, DigitalOcean Spaces,
// GCP Cloud Storage, MinIO, Backblaze B2, and other S3-compatible providers. It uses raw HTTP
// requests with AWS Signature V4 signing, no AWS SDK dependency required.
package s3
import (
@ -57,32 +54,26 @@ func (c *Client) PutObject(ctx context.Context, key string, body io.Reader) erro
first := make([]byte, partSize)
n, err := io.ReadFull(body, first)
if errors.Is(err, io.ErrUnexpectedEOF) || err == io.EOF {
log.Tag(tagS3Client).Debug("PutObject key=%s size=%d (simple)", key, n)
return c.putObject(ctx, key, bytes.NewReader(first[:n]), int64(n))
return c.putObjectSimple(ctx, key, bytes.NewReader(first[:n]), int64(n))
} else if err != nil {
return fmt.Errorf("error reading object %s from client: %w", key, err)
}
if err != nil {
return fmt.Errorf("s3: PutObject read: %w", err)
}
log.Tag(tagS3Client).Debug("PutObject key=%s (multipart)", key)
combined := io.MultiReader(bytes.NewReader(first), body)
return c.putObjectMultipart(ctx, key, combined)
return c.putObjectMultipart(ctx, key, io.MultiReader(bytes.NewReader(first), body))
}
// GetObject downloads an object. The key is automatically prefixed with the client's configured
// prefix. The caller must close the returned ReadCloser.
func (c *Client) GetObject(ctx context.Context, key string) (io.ReadCloser, int64, error) {
log.Tag(tagS3Client).Debug("GetObject key=%s", key)
fullKey := c.objectKey(key)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.objectURL(fullKey), nil)
log.Tag(tagS3Client).Debug("Fetching object %s from backend", key)
req, err := http.NewRequestWithContext(ctx, http.MethodGet, c.objectURL(key), nil)
if err != nil {
return nil, 0, fmt.Errorf("s3: GetObject request: %w", err)
return nil, 0, fmt.Errorf("error creating HTTP GET request for %s: %w", key, err)
}
c.signV4(req, emptyPayloadHash)
resp, err := c.http.Do(req)
if err != nil {
return nil, 0, fmt.Errorf("s3: GetObject: %w", err)
}
if !isHTTPSuccess(resp) {
return nil, 0, fmt.Errorf("error fetching object %s: %w", key, err)
} else if !isHTTPSuccess(resp) {
err := parseError(resp)
resp.Body.Close()
return nil, 0, err
@ -97,25 +88,27 @@ func (c *Client) GetObject(ctx context.Context, key string) (io.ReadCloser, int6
// Even when S3 returns HTTP 200, individual keys may fail. If any per-key errors are present
// in the response, they are returned as a combined error.
func (c *Client) DeleteObjects(ctx context.Context, keys []string) error {
log.Tag(tagS3Client).Debug("DeleteObjects keys=%d", len(keys))
var body bytes.Buffer
body.WriteString("<Delete><Quiet>true</Quiet>")
for _, key := range keys {
body.WriteString("<Object><Key>")
xml.EscapeText(&body, []byte(c.objectKey(key)))
body.WriteString("</Key></Object>")
log.Tag(tagS3Client).Debug("Deleting %d object(s)", len(keys))
req := &deleteRequest{
Quiet: true,
}
for _, key := range keys {
req.Objects = append(req.Objects, &deleteObject{Key: c.objectKey(key)})
}
body, err := xml.Marshal(req)
if err != nil {
return fmt.Errorf("error marshalling XML for deleting objects: %w", err)
}
body.WriteString("</Delete>")
bodyBytes := body.Bytes()
// Content-MD5 is required by the S3 protocol for DeleteObjects requests.
md5Sum := md5.Sum(bodyBytes) //nolint:gosec
contentMD5 := base64.StdEncoding.EncodeToString(md5Sum[:])
respBody, err := c.doWithBodyAndHeaders(ctx, http.MethodPost, c.config.BucketURL()+"?delete=", bodyBytes,
map[string]string{"Content-MD5": contentMD5}, "DeleteObjects")
md5Sum := md5.Sum(body) //nolint:gosec
headers := map[string]string{
"Content-MD5": base64.StdEncoding.EncodeToString(md5Sum[:]),
}
reqURL := c.config.BucketURL() + "?delete="
respBody, err := c.do(ctx, http.MethodPost, reqURL, body, headers, "DeleteObjects")
if err != nil {
return err
return fmt.Errorf("error deleting objects: %w", err)
}
// S3 may return HTTP 200 with per-key errors in the response body
@ -128,7 +121,7 @@ func (c *Client) DeleteObjects(ctx context.Context, keys []string) error {
for _, e := range result.Errors {
msgs = append(msgs, fmt.Sprintf("%s: %s", e.Key, e.Message))
}
return fmt.Errorf("s3: DeleteObjects partial failure: %s", strings.Join(msgs, "; "))
return fmt.Errorf("error deleting objects, partial failure: %s", strings.Join(msgs, "; "))
}
return nil
}
@ -147,7 +140,7 @@ func (c *Client) listObjects(ctx context.Context, continuationToken string, maxK
if maxKeys > 0 {
query.Set("max-keys", strconv.Itoa(maxKeys))
}
respBody, err := c.do(ctx, http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, "ListObjects")
respBody, err := c.do(ctx, http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil, "ListObjects")
if err != nil {
return nil, err
}
@ -198,12 +191,12 @@ func (c *Client) ListAllObjects(ctx context.Context) ([]Object, error) {
return nil, fmt.Errorf("s3: ListAllObjects exceeded %d pages", maxPages)
}
// putObject uploads a body with known size using a simple PUT with UNSIGNED-PAYLOAD.
func (c *Client) putObject(ctx context.Context, key string, body io.Reader, size int64) error {
fullKey := c.objectKey(key)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, c.objectURL(fullKey), body)
// putObjectSimple uploads a body with known size using a simple PUT with UNSIGNED-PAYLOAD.
func (c *Client) putObjectSimple(ctx context.Context, key string, body io.Reader, size int64) error {
log.Tag(tagS3Client).Debug("Uploading object %s (%d bytes)", key, size)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, c.objectURL(key), body)
if err != nil {
return fmt.Errorf("s3: PutObject request: %w", err)
return fmt.Errorf("uploading object %s failed: %w", key, err)
}
req.ContentLength = size
c.signV4(req, unsignedPayload)
@ -218,50 +211,32 @@ func (c *Client) putObject(ctx context.Context, key string, body io.Reader, size
return nil
}
// do creates a request, signs it with an empty payload, executes it, reads the response body,
// and checks for errors. It is used for bodiless GET/POST requests.
func (c *Client) do(ctx context.Context, method, reqURL string, body io.Reader, op string) ([]byte, error) {
req, err := http.NewRequestWithContext(ctx, method, reqURL, body)
// do creates a signed request, executes it, reads the response body, and checks for errors.
// If body is nil, the request is sent with an empty payload. If body is non-nil, it is sent
// with a computed SHA-256 payload hash and Content-Type: application/xml.
func (c *Client) do(ctx context.Context, method, reqURL string, body []byte, headers map[string]string, op string) ([]byte, error) {
var reader io.Reader
var hash string
if body != nil {
reader = bytes.NewReader(body)
hash = sha256Hex(body)
} else {
hash = emptyPayloadHash
}
req, err := http.NewRequestWithContext(ctx, method, reqURL, reader)
if err != nil {
return nil, fmt.Errorf("s3: %s request: %w", op, err)
}
if body == nil {
if body != nil {
req.ContentLength = int64(len(body))
req.Header.Set("Content-Type", "application/xml")
} else {
req.ContentLength = 0
}
c.signV4(req, emptyPayloadHash)
resp, err := c.http.Do(req)
if err != nil {
return nil, fmt.Errorf("s3: %s: %w", op, err)
}
respBody, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseBytes))
resp.Body.Close()
if err != nil {
return nil, fmt.Errorf("s3: %s read: %w", op, err)
}
if !isHTTPSuccess(resp) {
return nil, parseErrorFromBytes(resp.StatusCode, respBody)
}
return respBody, nil
}
// doWithBody is like do, but sends a body with a computed SHA-256 payload hash and Content-Type: application/xml.
func (c *Client) doWithBody(ctx context.Context, method, reqURL string, bodyBytes []byte, op string) ([]byte, error) {
return c.doWithBodyAndHeaders(ctx, method, reqURL, bodyBytes, nil, op)
}
// doWithBodyAndHeaders is like doWithBody, but allows setting additional headers (e.g. Content-MD5).
func (c *Client) doWithBodyAndHeaders(ctx context.Context, method, reqURL string, bodyBytes []byte, headers map[string]string, op string) ([]byte, error) {
payloadHash := sha256Hex(bodyBytes)
req, err := http.NewRequestWithContext(ctx, method, reqURL, bytes.NewReader(bodyBytes))
if err != nil {
return nil, fmt.Errorf("s3: %s request: %w", op, err)
}
req.ContentLength = int64(len(bodyBytes))
req.Header.Set("Content-Type", "application/xml")
for k, v := range headers {
req.Header.Set(k, v)
}
c.signV4(req, payloadHash)
c.signV4(req, hash)
resp, err := c.http.Do(req)
if err != nil {
return nil, fmt.Errorf("s3: %s: %w", op, err)
@ -277,14 +252,6 @@ func (c *Client) doWithBodyAndHeaders(ctx context.Context, method, reqURL string
return respBody, nil
}
// objectKey prepends the configured prefix to the given key.
func (c *Client) objectKey(key string) string {
if c.config.Prefix != "" {
return c.config.Prefix + "/" + key
}
return key
}
// prefixForList returns the prefix to use in ListObjectsV2 requests,
// with a trailing slash so that only objects under the prefix directory are returned.
func (c *Client) prefixForList() string {
@ -303,12 +270,16 @@ func (c *Client) stripPrefix(key string) string {
return key
}
// objectURL returns the full URL for an object (key should already include the prefix).
// Each path segment is URI-encoded to handle special characters in keys.
func (c *Client) objectURL(key string) string {
segments := strings.Split(key, "/")
for i, seg := range segments {
segments[i] = uriEncode(seg)
// objectKey prepends the configured prefix to the given key.
func (c *Client) objectKey(key string) string {
if c.config.Prefix != "" {
return c.config.Prefix + "/" + key
}
return c.config.BucketURL() + "/" + strings.Join(segments, "/")
return key
}
// objectURL returns the full URL for an object, automatically prepending the configured prefix.
func (c *Client) objectURL(key string) string {
u, _ := url.JoinPath(c.config.BucketURL(), c.objectKey(key))
return u
}

View file

@ -11,7 +11,7 @@ import (
// signV4 signs req in place using AWS Signature V4. payloadHash is the hex-encoded SHA-256
// of the request body, or the literal string "UNSIGNED-PAYLOAD" for streaming uploads.
func (c *Client) signV4(req *http.Request, payloadHash string) {
func (c *Client) signV4(req *http.Request, hash string) {
now := time.Now().UTC()
datestamp := now.Format("20060102")
amzDate := now.Format("20060102T150405Z")
@ -19,7 +19,7 @@ func (c *Client) signV4(req *http.Request, payloadHash string) {
// Required headers
req.Header.Set("Host", c.config.HostHeader())
req.Header.Set("X-Amz-Date", amzDate)
req.Header.Set("X-Amz-Content-Sha256", payloadHash)
req.Header.Set("X-Amz-Content-Sha256", hash)
// Canonical headers (all headers we set, sorted by lowercase key)
signedKeys := make([]string, 0, len(req.Header))
@ -46,7 +46,7 @@ func (c *Client) signV4(req *http.Request, payloadHash string) {
canonicalQueryString(req.URL.Query()),
chBuf.String(),
signedHeadersStr,
payloadHash,
hash,
}, "\n")
// String to sign
@ -61,8 +61,9 @@ func (c *Client) signV4(req *http.Request, payloadHash string) {
[]byte("aws4_request"))
signature := hex.EncodeToString(hmacSHA256(signingKey, []byte(stringToSign)))
req.Header.Set("Authorization", fmt.Sprintf(
header := fmt.Sprintf(
"AWS4-HMAC-SHA256 Credential=%s/%s, SignedHeaders=%s, Signature=%s",
c.config.AccessKey, credentialScope, signedHeadersStr, signature,
))
)
req.Header.Set("Authorization", header)
}

View file

@ -14,9 +14,25 @@ import (
"heckel.io/ntfy/v2/log"
)
// ListMultipartUploads returns in-progress multipart uploads for the client's prefix.
// AbortIncompleteUploads lists all in-progress multipart uploads and aborts those initiated
// before the given cutoff time. This cleans up orphaned upload parts from interrupted uploads.
func (c *Client) AbortIncompleteUploads(ctx context.Context, cutoff time.Time) error {
uploads, err := c.listMultipartUploads(ctx)
if err != nil {
return err
}
for _, u := range uploads {
if !u.Initiated.IsZero() && u.Initiated.Before(cutoff) {
log.Tag(tagS3Client).Debug("DeleteIncomplete key=%s uploadId=%s initiated=%s", u.Key, u.UploadID, u.Initiated)
c.abortMultipartUpload(ctx, u.Key, u.UploadID)
}
}
return nil
}
// listMultipartUploads returns in-progress multipart uploads for the client's prefix.
// It paginates automatically, stopping after 10,000 pages as a safety valve.
func (c *Client) ListMultipartUploads(ctx context.Context) ([]MultipartUpload, error) {
func (c *Client) listMultipartUploads(ctx context.Context) ([]MultipartUpload, error) {
var all []MultipartUpload
var keyMarker, uploadIDMarker string
for page := 0; page < maxPages; page++ {
@ -28,13 +44,13 @@ func (c *Client) ListMultipartUploads(ctx context.Context) ([]MultipartUpload, e
query.Set("key-marker", keyMarker)
query.Set("upload-id-marker", uploadIDMarker)
}
respBody, err := c.do(ctx, http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, "ListMultipartUploads")
respBody, err := c.do(ctx, http.MethodGet, c.config.BucketURL()+"?"+query.Encode(), nil, nil, "listMultipartUploads")
if err != nil {
return nil, err
}
var result listMultipartUploadsResult
if err := xml.Unmarshal(respBody, &result); err != nil {
return nil, fmt.Errorf("s3: ListMultipartUploads XML: %w", err)
return nil, fmt.Errorf("s3: listMultipartUploads XML: %w", err)
}
for _, u := range result.Uploads {
var initiated time.Time
@ -53,33 +69,17 @@ func (c *Client) ListMultipartUploads(ctx context.Context) ([]MultipartUpload, e
keyMarker = result.NextKeyMarker
uploadIDMarker = result.NextUploadIDMarker
}
return nil, fmt.Errorf("s3: ListMultipartUploads exceeded %d pages", maxPages)
}
// AbortIncompleteUploads lists all in-progress multipart uploads and aborts those initiated
// before the given cutoff time. This cleans up orphaned upload parts from interrupted uploads.
func (c *Client) AbortIncompleteUploads(ctx context.Context, cutoff time.Time) error {
uploads, err := c.ListMultipartUploads(ctx)
if err != nil {
return err
}
for _, u := range uploads {
if !u.Initiated.IsZero() && u.Initiated.Before(cutoff) {
log.Tag(tagS3Client).Debug("DeleteIncomplete key=%s uploadId=%s initiated=%s", u.Key, u.UploadID, u.Initiated)
c.abortMultipartUpload(ctx, u.Key, u.UploadID)
}
}
return nil
return nil, fmt.Errorf("s3: listMultipartUploads exceeded %d pages", maxPages)
}
// putObjectMultipart uploads body using S3 multipart upload. It reads the body in partSize
// chunks, uploading each as a separate part. This allows uploading without knowing the total
// body size in advance.
func (c *Client) putObjectMultipart(ctx context.Context, key string, body io.Reader) error {
fullKey := c.objectKey(key)
log.Tag(tagS3Client).Debug("Uploading multipart object %s", key)
// Step 1: Initiate multipart upload
uploadID, err := c.initiateMultipartUpload(ctx, fullKey)
uploadID, err := c.initiateMultipartUpload(ctx, key)
if err != nil {
return err
}
@ -91,9 +91,9 @@ func (c *Client) putObjectMultipart(ctx context.Context, key string, body io.Rea
for {
n, err := io.ReadFull(body, buf)
if n > 0 {
etag, uploadErr := c.uploadPart(ctx, fullKey, uploadID, partNumber, buf[:n])
etag, uploadErr := c.uploadPart(ctx, key, uploadID, partNumber, buf[:n])
if uploadErr != nil {
c.abortMultipartUpload(ctx, fullKey, uploadID)
c.abortMultipartUpload(ctx, key, uploadID)
return uploadErr
}
parts = append(parts, completedPart{PartNumber: partNumber, ETag: etag})
@ -103,18 +103,18 @@ func (c *Client) putObjectMultipart(ctx context.Context, key string, body io.Rea
break
}
if err != nil {
c.abortMultipartUpload(ctx, fullKey, uploadID)
c.abortMultipartUpload(ctx, key, uploadID)
return fmt.Errorf("s3: PutObject read: %w", err)
}
}
// Step 3: Complete multipart upload
return c.completeMultipartUpload(ctx, fullKey, uploadID, parts)
return c.completeMultipartUpload(ctx, key, uploadID, parts)
}
// initiateMultipartUpload starts a new multipart upload and returns the upload ID.
func (c *Client) initiateMultipartUpload(ctx context.Context, fullKey string) (string, error) {
respBody, err := c.do(ctx, http.MethodPost, c.objectURL(fullKey)+"?uploads", nil, "InitiateMultipartUpload")
func (c *Client) initiateMultipartUpload(ctx context.Context, key string) (string, error) {
respBody, err := c.do(ctx, http.MethodPost, c.objectURL(key)+"?uploads", nil, nil, "InitiateMultipartUpload")
if err != nil {
return "", err
}
@ -122,14 +122,14 @@ func (c *Client) initiateMultipartUpload(ctx context.Context, fullKey string) (s
if err := xml.Unmarshal(respBody, &result); err != nil {
return "", fmt.Errorf("s3: InitiateMultipartUpload XML: %w", err)
}
log.Tag(tagS3Client).Debug("InitiateMultipartUpload key=%s uploadId=%s", fullKey, result.UploadID)
log.Tag(tagS3Client).Debug("InitiateMultipartUpload key=%s uploadId=%s", key, result.UploadID)
return result.UploadID, nil
}
// uploadPart uploads a single part of a multipart upload and returns the ETag.
func (c *Client) uploadPart(ctx context.Context, fullKey, uploadID string, partNumber int, data []byte) (string, error) {
log.Tag(tagS3Client).Debug("UploadPart key=%s part=%d size=%d", fullKey, partNumber, len(data))
reqURL := fmt.Sprintf("%s?partNumber=%d&uploadId=%s", c.objectURL(fullKey), partNumber, url.QueryEscape(uploadID))
func (c *Client) uploadPart(ctx context.Context, key, uploadID string, partNumber int, data []byte) (string, error) {
log.Tag(tagS3Client).Debug("UploadPart key=%s part=%d size=%d", key, partNumber, len(data))
reqURL := fmt.Sprintf("%s?partNumber=%d&uploadId=%s", c.objectURL(key), partNumber, url.QueryEscape(uploadID))
req, err := http.NewRequestWithContext(ctx, http.MethodPut, reqURL, bytes.NewReader(data))
if err != nil {
return "", fmt.Errorf("s3: UploadPart request: %w", err)
@ -149,17 +149,15 @@ func (c *Client) uploadPart(ctx context.Context, fullKey, uploadID string, partN
}
// completeMultipartUpload finalizes a multipart upload with the given parts.
func (c *Client) completeMultipartUpload(ctx context.Context, fullKey, uploadID string, parts []completedPart) error {
log.Tag(tagS3Client).Debug("CompleteMultipartUpload key=%s uploadId=%s parts=%d", fullKey, uploadID, len(parts))
var body bytes.Buffer
body.WriteString("<CompleteMultipartUpload>")
for _, p := range parts {
fmt.Fprintf(&body, "<Part><PartNumber>%d</PartNumber><ETag>%s</ETag></Part>", p.PartNumber, p.ETag)
func (c *Client) completeMultipartUpload(ctx context.Context, key, uploadID string, parts []completedPart) error {
log.Tag(tagS3Client).Debug("CompleteMultipartUpload key=%s uploadId=%s parts=%d", key, uploadID, len(parts))
bodyBytes, err := xml.Marshal(completeMultipartUploadRequest{Parts: parts})
if err != nil {
return fmt.Errorf("s3: CompleteMultipartUpload marshal: %w", err)
}
body.WriteString("</CompleteMultipartUpload>")
respBody, err := c.doWithBody(ctx, http.MethodPost,
fmt.Sprintf("%s?uploadId=%s", c.objectURL(fullKey), url.QueryEscape(uploadID)),
body.Bytes(), "CompleteMultipartUpload")
respBody, err := c.do(ctx, http.MethodPost,
fmt.Sprintf("%s?uploadId=%s", c.objectURL(key), url.QueryEscape(uploadID)),
bodyBytes, nil, "CompleteMultipartUpload")
if err != nil {
return err
}
@ -172,9 +170,9 @@ func (c *Client) completeMultipartUpload(ctx context.Context, fullKey, uploadID
}
// abortMultipartUpload cancels an in-progress multipart upload. Called on error to clean up.
func (c *Client) abortMultipartUpload(ctx context.Context, fullKey, uploadID string) {
log.Tag(tagS3Client).Debug("AbortMultipartUpload key=%s uploadId=%s", fullKey, uploadID)
reqURL := fmt.Sprintf("%s?uploadId=%s", c.objectURL(fullKey), url.QueryEscape(uploadID))
func (c *Client) abortMultipartUpload(ctx context.Context, key, uploadID string) {
log.Tag(tagS3Client).Debug("AbortMultipartUpload key=%s uploadId=%s", key, uploadID)
reqURL := fmt.Sprintf("%s?uploadId=%s", c.objectURL(key), url.QueryEscape(uploadID))
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, reqURL, nil)
if err != nil {
return

View file

@ -374,13 +374,13 @@ func TestConfig_BucketURL_VirtualHosted(t *testing.T) {
}
func TestClient_ObjectURL_PathStyle(t *testing.T) {
c := &Client{config: &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", PathStyle: true}}
require.Equal(t, "https://s3.example.com/my-bucket/prefix/obj", c.objectURL("prefix/obj"))
c := &Client{config: &Config{Endpoint: "s3.example.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: true}}
require.Equal(t, "https://s3.example.com/my-bucket/prefix/obj", c.objectURL("obj"))
}
func TestClient_ObjectURL_VirtualHosted(t *testing.T) {
c := &Client{config: &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", PathStyle: false}}
require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com/prefix/obj", c.objectURL("prefix/obj"))
c := &Client{config: &Config{Endpoint: "s3.us-east-1.amazonaws.com", Bucket: "my-bucket", Prefix: "prefix", PathStyle: false}}
require.Equal(t, "https://my-bucket.s3.us-east-1.amazonaws.com/prefix/obj", c.objectURL("obj"))
}
func TestConfig_HostHeader_PathStyle(t *testing.T) {

View file

@ -1,6 +1,7 @@
package s3
import (
"encoding/xml"
"fmt"
"net/http"
"time"
@ -76,6 +77,17 @@ type listObject struct {
LastModified string `xml:"LastModified"`
}
// deleteRequest is the XML request body for S3 DeleteObjects
type deleteRequest struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool `xml:"Quiet"`
Objects []*deleteObject `xml:"Object"`
}
type deleteObject struct {
Key string `xml:"Key"`
}
// deleteResult is the XML response from S3 DeleteObjects
type deleteResult struct {
Errors []deleteError `xml:"Error"`
@ -87,14 +99,14 @@ type deleteError struct {
Message string `xml:"Message"`
}
// MultipartUpload represents an in-progress multipart upload returned by ListMultipartUploads.
// MultipartUpload represents an in-progress multipart upload returned by listMultipartUploads.
type MultipartUpload struct {
Key string
UploadID string
Initiated time.Time
}
// listMultipartUploadsResult is the XML response from S3 ListMultipartUploads
// listMultipartUploadsResult is the XML response from S3 listMultipartUploads
type listMultipartUploadsResult struct {
Uploads []listUpload `xml:"Upload"`
IsTruncated bool `xml:"IsTruncated"`
@ -113,8 +125,14 @@ type initiateMultipartUploadResult struct {
UploadID string `xml:"UploadId"`
}
// completeMultipartUploadRequest is the XML request body for S3 CompleteMultipartUpload
type completeMultipartUploadRequest struct {
XMLName xml.Name `xml:"CompleteMultipartUpload"`
Parts []completedPart `xml:"Part"`
}
// completedPart represents a successfully uploaded part for CompleteMultipartUpload
type completedPart struct {
PartNumber int
ETag string
PartNumber int `xml:"PartNumber"`
ETag string `xml:"ETag"`
}