diff --git a/.fend.yaml b/.fend.yaml index 1526c31d..53318ff1 100644 --- a/.fend.yaml +++ b/.fend.yaml @@ -4,3 +4,6 @@ skip: - .vscode file: - jzfs + extension: + - .input + - .output diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 5b1e3133..77abde4e 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -32,6 +32,11 @@ jobs: run: | make build + - uses: docker-practice/actions-setup-docker@master + timeout-minutes: 12 + - run: | + docker version + - name: Test run: | go test -coverpkg=./... -coverprofile=coverage.out -covermode=atomic -timeout=30m -parallel=4 -v ./... diff --git a/api/api_impl/server.go b/api/api_impl/server.go index 480e42d8..2868d01f 100644 --- a/api/api_impl/server.go +++ b/api/api_impl/server.go @@ -3,7 +3,6 @@ package apiimpl import ( "context" "errors" - "net" "net/http" @@ -29,9 +28,6 @@ func SetupAPI(lc fx.Lifecycle, apiConfig *config.APIConfig, controller APIContro return err } - // Clear out the servers array in the swagger spec, that skips validating - // that server names match. We don't know how this thing will be run. - swagger.Servers = nil // This is how you set up a basic chi router r := chi.NewRouter() @@ -58,11 +54,11 @@ func SetupAPI(lc fx.Lifecycle, apiConfig *config.APIConfig, controller APIContro return nil }, }, + SilenceServersWarning: true, }), ) api.HandlerFromMuxWithBaseURL(controller, r, APIV1Prefix) - url, err := url.Parse(apiConfig.Listen) if err != nil { return err diff --git a/block/adapter.go b/block/adapter.go new file mode 100644 index 00000000..249c9dee --- /dev/null +++ b/block/adapter.go @@ -0,0 +1,154 @@ +package block + +import ( + "context" + "io" + "net/http" + "net/url" + "time" +) + +// MultipartPart single multipart information +type MultipartPart struct { + ETag string + PartNumber int +} + +// MultipartUploadCompletion parts described as part of complete multipart upload. Each part holds the part number and ETag received while calling part upload. +// NOTE that S3 implementation and our S3 gateway accept and returns ETag value surrounded with double-quotes ("), while +// the adapter implementations supply the raw value of the etag (without double quotes) and let the gateway manage the s3 +// protocol specifications. +type MultipartUploadCompletion struct { + Part []MultipartPart +} + +// IdentifierType is the type the ObjectPointer Identifier +type IdentifierType int32 + +// PreSignMode is the mode to use when generating a pre-signed URL (read/write) +type PreSignMode int32 + +const ( + BlockstoreTypeS3 = "s3" + BlockstoreTypeGS = "gs" + BlockstoreTypeAzure = "azure" + BlockstoreTypeLocal = "local" + BlockstoreTypeMem = "mem" + BlockstoreTypeTransient = "transient" +) + +const ( + // IdentifierTypeRelative indicates that the address is relative to the storage namespace. + // For example: "/foo/bar" + IdentifierTypeRelative IdentifierType = 1 + + // IdentifierTypeFull indicates that the address is the full address of the object in the object store. + // For example: "s3://bucket/foo/bar" + IdentifierTypeFull IdentifierType = 2 +) + +const ( + PreSignModeRead PreSignMode = iota + PreSignModeWrite +) + +// DefaultPreSignExpiryDuration is the amount of time pre-signed requests are valid for. +const DefaultPreSignExpiryDuration = 15 * time.Minute + +// ObjectPointer is a unique identifier of an object in the object +// store: the store is a 1:1 mapping between pointers and objects. +type ObjectPointer struct { + StorageNamespace string + Identifier string + + // Indicates whether the Identifier is relative to the StorageNamespace, + // full address to an object, or unknown. + IdentifierType IdentifierType +} + +// PutOpts contains optional arguments for Put. These should be +// analogous to options on some underlying storage layer. Missing +// arguments are mapped to the default if a storage layer implements +// the option. +// +// If the same Put command is implemented multiple times with the same +// contents but different option values, the first supplied option +// value is retained. +type PutOpts struct { + StorageClass *string // S3 storage class +} + +// WalkOpts is a unique identifier of a prefix in the object store. +type WalkOpts struct { + StorageNamespace string + Prefix string +} + +// CreateMultiPartUploadResponse multipart upload ID and additional headers (implementation specific) currently it targets s3 +// capabilities to enable encryption properties +type CreateMultiPartUploadResponse struct { + UploadID string + ServerSideHeader http.Header +} + +// CompleteMultiPartUploadResponse complete multipart etag, content length and additional headers (implementation specific) currently it targets s3. +// The ETag is a hex string value of the content checksum +type CompleteMultiPartUploadResponse struct { + ETag string + ContentLength int64 + ServerSideHeader http.Header +} + +// UploadPartResponse upload part ETag and additional headers (implementation specific) currently it targets s3 +// capabilities to enable encryption properties +// The ETag is a hex string value of the content checksum +type UploadPartResponse struct { + ETag string + ServerSideHeader http.Header +} + +// CreateMultiPartUploadOpts contains optional arguments for +// CreateMultiPartUpload. These should be analogous to options on +// some underlying storage layer. Missing arguments are mapped to the +// default if a storage layer implements the option. +// +// If the same CreateMultiPartUpload command is implemented multiple times with the same +// contents but different option values, the first supplied option +// value is retained. +type CreateMultiPartUploadOpts struct { + StorageClass *string // S3 storage class +} + +// Properties of an object stored on the underlying block store. +// Refer to the actual underlying Adapter for which properties are +// actually reported. +type Properties struct { + StorageClass *string +} + +type Adapter interface { + Put(ctx context.Context, obj ObjectPointer, sizeBytes int64, reader io.Reader, opts PutOpts) error + Get(ctx context.Context, obj ObjectPointer, expectedSize int64) (io.ReadCloser, error) + GetWalker(uri *url.URL) (Walker, error) + + // GetPreSignedURL returns a pre-signed URL for accessing obj with mode, and the + // expiry time for this URL. The expiry time IsZero() if reporting + // expiry is not supported. The expiry time will be sooner than + // Config.*.PreSignedExpiry if an auth token is about to expire. + GetPreSignedURL(ctx context.Context, obj ObjectPointer, mode PreSignMode) (string, time.Time, error) + Exists(ctx context.Context, obj ObjectPointer) (bool, error) + GetRange(ctx context.Context, obj ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) + GetProperties(ctx context.Context, obj ObjectPointer) (Properties, error) + Remove(ctx context.Context, obj ObjectPointer) error + Copy(ctx context.Context, sourceObj, destinationObj ObjectPointer) error + CreateMultiPartUpload(ctx context.Context, obj ObjectPointer, r *http.Request, opts CreateMultiPartUploadOpts) (*CreateMultiPartUploadResponse, error) + UploadPart(ctx context.Context, obj ObjectPointer, sizeBytes int64, reader io.Reader, uploadID string, partNumber int) (*UploadPartResponse, error) + UploadCopyPart(ctx context.Context, sourceObj, destinationObj ObjectPointer, uploadID string, partNumber int) (*UploadPartResponse, error) + UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj ObjectPointer, uploadID string, partNumber int, startPosition, endPosition int64) (*UploadPartResponse, error) + AbortMultiPartUpload(ctx context.Context, obj ObjectPointer, uploadID string) error + CompleteMultiPartUpload(ctx context.Context, obj ObjectPointer, uploadID string, multipartList *MultipartUploadCompletion) (*CompleteMultiPartUploadResponse, error) + BlockstoreType() string + GetStorageNamespaceInfo() StorageNamespaceInfo + ResolveNamespace(storageNamespace, key string, identifierType IdentifierType) (QualifiedKey, error) + RuntimeStats() map[string]string +} diff --git a/block/azure/adapter.go b/block/azure/adapter.go new file mode 100644 index 00000000..e57870fa --- /dev/null +++ b/block/azure/adapter.go @@ -0,0 +1,593 @@ +package azure + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/params" +) + +const ( + sizeSuffix = "_size" + idSuffix = "_id" + _1MiB = 1024 * 1024 + MaxBuffers = 1 + // udcCacheSize - Arbitrary number: exceeding this number means that in the expiry timeframe we requested pre-signed urls from + // more the 5000 different accounts, which is highly unlikely + udcCacheSize = 5000 + + BlobEndpointFormat = "https://%s.blob.core.windows.net/" +) + +type Adapter struct { + clientCache *ClientCache + preSignedExpiry time.Duration + disablePreSigned bool + disablePreSignedUI bool +} + +func NewAdapter(_ context.Context, params params.Azure) (*Adapter, error) { + log.With("type", "azure").Info("initialized blockstore adapter") + preSignedExpiry := params.PreSignedExpiry + if preSignedExpiry == 0 { + preSignedExpiry = block.DefaultPreSignExpiryDuration + } + cache, err := NewCache(params) + if err != nil { + return nil, err + } + return &Adapter{ + clientCache: cache, + preSignedExpiry: preSignedExpiry, + disablePreSigned: params.DisablePreSigned, + disablePreSignedUI: params.DisablePreSignedUI, + }, nil +} + +type BlobURLInfo struct { + StorageAccountName string + ContainerURL string + ContainerName string + BlobURL string +} + +type PrefixURLInfo struct { + StorageAccountName string + ContainerURL string + ContainerName string + Prefix string +} + +func ExtractStorageAccount(storageAccount *url.URL) (string, error) { + // In azure the subdomain is the storage account + const expectedHostParts = 2 + hostParts := strings.SplitN(storageAccount.Host, ".", expectedHostParts) + if len(hostParts) != expectedHostParts { + return "", fmt.Errorf("wrong host parts(%d): %w", len(hostParts), block.ErrInvalidAddress) + } + + return hostParts[0], nil +} + +func ResolveBlobURLInfoFromURL(pathURL *url.URL) (BlobURLInfo, error) { + var qk BlobURLInfo + err := block.ValidateStorageType(pathURL, block.StorageTypeAzure) + if err != nil { + return qk, err + } + + // In azure, the first part of the path is part of the storage namespace + trimmedPath := strings.Trim(pathURL.Path, "/") + pathParts := strings.Split(trimmedPath, "/") + if len(pathParts) == 0 { + return qk, fmt.Errorf("wrong path parts(%d): %w", len(pathParts), block.ErrInvalidAddress) + } + + storageAccount, err := ExtractStorageAccount(pathURL) + if err != nil { + return qk, err + } + + return BlobURLInfo{ + StorageAccountName: storageAccount, + ContainerURL: fmt.Sprintf("%s://%s/%s", pathURL.Scheme, pathURL.Host, pathParts[0]), + ContainerName: pathParts[0], + BlobURL: strings.Join(pathParts[1:], "/"), + }, nil +} + +func resolveBlobURLInfo(obj block.ObjectPointer) (BlobURLInfo, error) { + key := obj.Identifier + defaultNamespace := obj.StorageNamespace + var qk BlobURLInfo + // check if the key is fully qualified + parsedKey, err := url.ParseRequestURI(key) + if err != nil { + // is not fully qualified, treat as key only + // if we don't have a trailing slash for the namespace, add it. + parsedNamespace, err := url.ParseRequestURI(defaultNamespace) + if err != nil { + return qk, err + } + qp, err := ResolveBlobURLInfoFromURL(parsedNamespace) + if err != nil { + return qk, err + } + info := BlobURLInfo{ + StorageAccountName: qp.StorageAccountName, + ContainerURL: qp.ContainerURL, + ContainerName: qp.ContainerName, + BlobURL: qp.BlobURL + "/" + key, + } + if qp.BlobURL == "" { + info.BlobURL = key + } + return info, nil + } + return ResolveBlobURLInfoFromURL(parsedKey) +} + +func (a *Adapter) translatePutOpts(_ context.Context, opts block.PutOpts) azblob.UploadStreamOptions { + res := azblob.UploadStreamOptions{} + if opts.StorageClass == nil { + return res + } + + for _, t := range blob.PossibleAccessTierValues() { + if strings.EqualFold(*opts.StorageClass, string(t)) { + accessTier := t + res.AccessTier = &accessTier + break + } + } + + if res.AccessTier == nil { + log.With("tier_type", *opts.StorageClass).Warn("Unknown Azure tier type") + } + + return res +} + +func (a *Adapter) Put(ctx context.Context, obj block.ObjectPointer, sizeBytes int64, reader io.Reader, opts block.PutOpts) error { + var err error + defer reportMetrics("Put", time.Now(), &sizeBytes, &err) + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return err + } + o := a.translatePutOpts(ctx, opts) + containerClient, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return err + } + _, err = containerClient.NewBlockBlobClient(qualifiedKey.BlobURL).UploadStream(ctx, reader, &o) + return err +} + +func (a *Adapter) Get(ctx context.Context, obj block.ObjectPointer, _ int64) (io.ReadCloser, error) { + var err error + defer reportMetrics("Get", time.Now(), nil, &err) + + return a.Download(ctx, obj, 0, blockblob.CountToEnd) +} + +func (a *Adapter) GetWalker(uri *url.URL) (block.Walker, error) { + if err := block.ValidateStorageType(uri, block.StorageTypeAzure); err != nil { + return nil, err + } + + storageAccount, err := ExtractStorageAccount(uri) + if err != nil { + return nil, err + } + + client, err := a.clientCache.NewServiceClient(storageAccount) + if err != nil { + return nil, err + } + + return NewAzureBlobWalker(client) +} + +func (a *Adapter) GetPreSignedURL(ctx context.Context, obj block.ObjectPointer, mode block.PreSignMode) (string, time.Time, error) { + if a.disablePreSigned { + return "", time.Time{}, block.ErrOperationNotSupported + } + + permissions := sas.BlobPermissions{Read: true} + if mode == block.PreSignModeWrite { + permissions = sas.BlobPermissions{ + Read: true, + Add: true, + Write: true, + } + } + preSignedURL, err := a.getPreSignedURL(ctx, obj, permissions) + // TODO(#6347): Report expiry. + return preSignedURL, time.Time{}, err +} + +func (a *Adapter) getPreSignedURL(ctx context.Context, obj block.ObjectPointer, permissions sas.BlobPermissions) (string, error) { + if a.disablePreSigned { + return "", block.ErrOperationNotSupported + } + + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return "", err + } + + // Use shared credential for clients initialized with storage access key + if qualifiedKey.StorageAccountName == a.clientCache.params.StorageAccount && a.clientCache.params.StorageAccessKey != "" { + container, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return "", err + } + client := container.NewBlobClient(qualifiedKey.BlobURL) + urlExpiry := a.newPreSignedTime() + return client.GetSASURL(permissions, urlExpiry, &blob.GetSASURLOptions{}) + } + + // Otherwise assume using role based credentials and build signed URL using user delegation credentials + urlExpiry := a.newPreSignedTime() + udc, err := a.clientCache.NewUDC(ctx, qualifiedKey.StorageAccountName, &urlExpiry) + if err != nil { + return "", err + } + + // Create Blob Signature Values with desired permissions and sign with user delegation credential + blobSignatureValues := sas.BlobSignatureValues{ + Protocol: sas.ProtocolHTTPS, + ExpiryTime: urlExpiry, + Permissions: to.Ptr(permissions).String(), + ContainerName: qualifiedKey.ContainerName, + BlobName: qualifiedKey.BlobURL, + } + sasQueryParams, err := blobSignatureValues.SignWithUserDelegation(udc) + if err != nil { + return "", err + } + + // format blob URL with signed SAS query params + accountEndpoint := fmt.Sprintf(BlobEndpointFormat, qualifiedKey.StorageAccountName) + u, err := url.JoinPath(accountEndpoint, qualifiedKey.ContainerName, qualifiedKey.BlobURL) + if err != nil { + return "", err + } + u += "?" + sasQueryParams.Encode() + return u, nil +} + +func (a *Adapter) GetRange(ctx context.Context, obj block.ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) { + var err error + defer reportMetrics("GetRange", time.Now(), nil, &err) + + return a.Download(ctx, obj, startPosition, endPosition-startPosition+1) +} + +func (a *Adapter) Download(ctx context.Context, obj block.ObjectPointer, offset, count int64) (io.ReadCloser, error) { + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return nil, err + } + container, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return nil, err + } + blobURL := container.NewBlockBlobClient(qualifiedKey.BlobURL) + + downloadResponse, err := blobURL.DownloadStream(ctx, &azblob.DownloadStreamOptions{ + RangeGetContentMD5: nil, + Range: blob.HTTPRange{ + Offset: offset, + Count: count, + }, + }) + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return nil, block.ErrDataNotFound + } + if err != nil { + log.Errorf("failed to get azure blob from container %s key %s %v", container, blobURL, err) + return nil, err + } + return downloadResponse.Body, nil +} + +func (a *Adapter) Exists(ctx context.Context, obj block.ObjectPointer) (bool, error) { + var err error + defer reportMetrics("Exists", time.Now(), nil, &err) + + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return false, err + } + + containerClient, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return false, err + } + blobURL := containerClient.NewBlobClient(qualifiedKey.BlobURL) + + _, err = blobURL.GetProperties(ctx, nil) + + if bloberror.HasCode(err, bloberror.BlobNotFound) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (a *Adapter) GetProperties(ctx context.Context, obj block.ObjectPointer) (block.Properties, error) { + var err error + defer reportMetrics("GetProperties", time.Now(), nil, &err) + + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return block.Properties{}, err + } + + containerClient, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return block.Properties{}, err + } + blobURL := containerClient.NewBlobClient(qualifiedKey.BlobURL) + + props, err := blobURL.GetProperties(ctx, nil) + if err != nil { + return block.Properties{}, err + } + return block.Properties{StorageClass: props.AccessTier}, nil +} + +func (a *Adapter) Remove(ctx context.Context, obj block.ObjectPointer) error { + var err error + defer reportMetrics("Remove", time.Now(), nil, &err) + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return err + } + containerClient, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return err + } + blobURL := containerClient.NewBlobClient(qualifiedKey.BlobURL) + + _, err = blobURL.Delete(ctx, nil) + return err +} + +func (a *Adapter) Copy(ctx context.Context, sourceObj, destinationObj block.ObjectPointer) error { + var err error + defer reportMetrics("Copy", time.Now(), nil, &err) + + qualifiedDestinationKey, err := resolveBlobURLInfo(destinationObj) + if err != nil { + return err + } + + destContainerClient, err := a.clientCache.NewContainerClient(qualifiedDestinationKey.StorageAccountName, qualifiedDestinationKey.ContainerName) + if err != nil { + return err + } + destClient := destContainerClient.NewBlobClient(qualifiedDestinationKey.BlobURL) + + sasKey, _, err := a.GetPreSignedURL(ctx, sourceObj, block.PreSignModeRead) + if err != nil { + return err + } + + // Optimistic flow - try to copy synchronously + _, err = destClient.CopyFromURL(ctx, sasKey, nil) + if err == nil { + return nil + } + // Azure API (backend) returns ambiguous error code which requires us to parse the error message to understand what is the nature of the error + // See: https://github.com/Azure/azure-sdk-for-go/issues/19880 + if !bloberror.HasCode(err, bloberror.CannotVerifyCopySource) || + !strings.Contains(err.Error(), "The source request body for synchronous copy is too large and exceeds the maximum permissible limit") { + return err + } + + // Blob too big for synchronous copy. Perform async copy + logger := log.With( + "sourceObj", sourceObj.Identifier, + "destObj", destinationObj.Identifier, + ) + logger.Debug("Perform async copy") + res, err := destClient.StartCopyFromURL(ctx, sasKey, nil) + if err != nil { + return err + } + copyStatus := res.CopyStatus + if copyStatus == nil { + return fmt.Errorf("%w: failed to get copy status", block.ErrAsyncCopyFailed) + } + + progress := "" + const asyncPollInterval = 5 * time.Second + for { + select { + case <-ctx.Done(): + log.With("copy_progress", progress).Warn("context canceled, aborting copy") + // Context canceled - perform abort on copy use a different context for the abort + _, err := destClient.AbortCopyFromURL(context.Background(), *res.CopyID, nil) + if err != nil { + log.Errorf("failed to abort copy %v", err) + } + return ctx.Err() + + case <-time.After(asyncPollInterval): + p, err := destClient.GetProperties(ctx, nil) + if err != nil { + return err + } + copyStatus = p.CopyStatus + if copyStatus == nil { + return fmt.Errorf("%w: failed to get copy status", block.ErrAsyncCopyFailed) + } + progress = *p.CopyProgress + switch *copyStatus { + case blob.CopyStatusTypeSuccess: + log.With("object_properties", p).Debug("Async copy successful") + return nil + + case blob.CopyStatusTypeAborted: + return fmt.Errorf("%w: unexpected abort", block.ErrAsyncCopyFailed) + + case blob.CopyStatusTypeFailed: + return fmt.Errorf("%w: copy status failed", block.ErrAsyncCopyFailed) + + case blob.CopyStatusTypePending: + log.With("copy_progress", progress).Debug("Copy pending") + + default: + return fmt.Errorf("%w: invalid copy status: %s", block.ErrAsyncCopyFailed, *copyStatus) + } + } + } +} + +func (a *Adapter) CreateMultiPartUpload(_ context.Context, obj block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + // Azure has no create multipart upload + var err error + defer reportMetrics("CreateMultiPartUpload", time.Now(), nil, &err) + + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return nil, err + } + + return &block.CreateMultiPartUploadResponse{ + UploadID: qualifiedKey.BlobURL, + }, nil +} + +func (a *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, _ string, _ int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadPart", time.Now(), nil, &err) + + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return nil, err + } + + container, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return nil, err + } + hashReader := block.NewHashingReader(reader, block.HashFunctionMD5) + + multipartBlockWriter := NewMultipartBlockWriter(hashReader, *container, qualifiedKey.BlobURL) + _, err = copyFromReader(ctx, hashReader, multipartBlockWriter, blockblob.UploadStreamOptions{ + BlockSize: _1MiB, + Concurrency: MaxBuffers, + }) + if err != nil { + return nil, err + } + return &block.UploadPartResponse{ + ETag: strings.Trim(multipartBlockWriter.etag, `"`), + }, nil +} + +func (a *Adapter) UploadCopyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, _ string, _ int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadPart", time.Now(), nil, &err) + + return a.copyPartRange(ctx, sourceObj, destinationObj, 0, blockblob.CountToEnd) +} + +func (a *Adapter) UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, _ string, _ int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadPart", time.Now(), nil, &err) + return a.copyPartRange(ctx, sourceObj, destinationObj, startPosition, endPosition-startPosition+1) +} + +func (a *Adapter) copyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, startPosition, count int64) (*block.UploadPartResponse, error) { + qualifiedSourceKey, err := resolveBlobURLInfo(sourceObj) + if err != nil { + return nil, err + } + + qualifiedDestinationKey, err := resolveBlobURLInfo(destinationObj) + if err != nil { + return nil, err + } + + destinationContainer, err := a.clientCache.NewContainerClient(qualifiedDestinationKey.StorageAccountName, qualifiedDestinationKey.ContainerName) + if err != nil { + return nil, err + } + sourceContainer, err := a.clientCache.NewContainerClient(qualifiedSourceKey.StorageAccountName, qualifiedSourceKey.ContainerName) + if err != nil { + return nil, err + } + + sourceBlobURL := sourceContainer.NewBlockBlobClient(qualifiedSourceKey.BlobURL) + + return copyPartRange(ctx, *destinationContainer, qualifiedDestinationKey.BlobURL, *sourceBlobURL, startPosition, count) +} + +func (a *Adapter) AbortMultiPartUpload(_ context.Context, _ block.ObjectPointer, _ string) error { + // Azure has no abort. In case of commit, uncommitted parts are erased. Otherwise, staged data is erased after 7 days + return nil +} + +func (a *Adapter) BlockstoreType() string { + return block.BlockstoreTypeAzure +} + +func (a *Adapter) CompleteMultiPartUpload(ctx context.Context, obj block.ObjectPointer, _ string, multipartList *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + var err error + defer reportMetrics("CompleteMultiPartUpload", time.Now(), nil, &err) + qualifiedKey, err := resolveBlobURLInfo(obj) + if err != nil { + return nil, err + } + containerURL, err := a.clientCache.NewContainerClient(qualifiedKey.StorageAccountName, qualifiedKey.ContainerName) + if err != nil { + return nil, err + } + + return completeMultipart(ctx, multipartList.Part, *containerURL, qualifiedKey.BlobURL) +} + +func (a *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeAzure) + info.ImportValidityRegex = `^https?://[a-z0-9_-]+\.(blob|adls)\.core\.windows\.net` // added adls for import hint validation in UI + info.ValidityRegex = `^https?://[a-z0-9_-]+\.blob\.core\.windows\.net` + info.Example = "https://mystorageaccount.blob.core.windows.net/mycontainer/" + if a.disablePreSigned { + info.PreSignSupport = false + } + if !(a.disablePreSignedUI || a.disablePreSigned) { + info.PreSignSupportUI = true + } + return info +} + +func (a *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + return block.DefaultResolveNamespace(storageNamespace, key, identifierType) +} + +func (a *Adapter) RuntimeStats() map[string]string { + return nil +} + +func (a *Adapter) newPreSignedTime() time.Time { + return time.Now().UTC().Add(a.preSignedExpiry) +} diff --git a/block/azure/adapter_test.go b/block/azure/adapter_test.go new file mode 100644 index 00000000..f9c7eeb2 --- /dev/null +++ b/block/azure/adapter_test.go @@ -0,0 +1,84 @@ +package azure_test + +import ( + "context" + "net/url" + "regexp" + "testing" + + "github.com/jiaozifs/jiaozifs/block/azure" + "github.com/jiaozifs/jiaozifs/block/blocktest" + "github.com/jiaozifs/jiaozifs/block/params" + "github.com/stretchr/testify/require" +) + +func TestAzureAdapter(t *testing.T) { + basePath, err := url.JoinPath(blockURL, containerName) + require.NoError(t, err) + localPath, err := url.JoinPath(basePath, "lakefs") + require.NoError(t, err) + externalPath, err := url.JoinPath(basePath, "external") + require.NoError(t, err) + + adapter, err := azure.NewAdapter(context.Background(), params.Azure{ + StorageAccount: accountName, + StorageAccessKey: accountKey, + TestEndpointURL: blockURL, + }) + require.NoError(t, err, "create new adapter") + blocktest.AdapterTest(t, adapter, localPath, externalPath) +} + +func TestAdapterNamespace(t *testing.T) { + adapter, err := azure.NewAdapter(context.Background(), params.Azure{ + StorageAccount: accountName, + StorageAccessKey: accountKey, + TestEndpointURL: blockURL, + }) + require.NoError(t, err, "create new adapter") + + expr, err := regexp.Compile(adapter.GetStorageNamespaceInfo().ValidityRegex) + require.NoError(t, err) + + tests := []struct { + Name string + Namespace string + Success bool + }{ + { + Name: "valid_https", + Namespace: "https://test.blob.core.windows.net/container1/repo1", + Success: true, + }, + { + Name: "valid_http", + Namespace: "http://test.blob.core.windows.net/container1/repo1", + Success: true, + }, + { + Name: "invalid_subdomain", + Namespace: "https://test.adls.core.windows.net/container1/repo1", + Success: false, + }, + { + Name: "partial", + Namespace: "https://test.adls.core.windows.n", + Success: false, + }, + { + Name: "s3", + Namespace: "s3://test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_string", + Namespace: "this is a bad string", + Success: false, + }, + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + require.Equal(t, tt.Success, expr.MatchString(tt.Namespace)) + }) + } +} diff --git a/block/azure/chunkwriting.go b/block/azure/chunkwriting.go new file mode 100644 index 00000000..b81c910b --- /dev/null +++ b/block/azure/chunkwriting.go @@ -0,0 +1,335 @@ +package azure + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + guuid "github.com/google/uuid" +) + +var ErrEmptyBuffer = errors.New("BufferManager returned a 0 size buffer, this is a bug in the manager") + +// This code adapted from azblob chunkwriting.go +// The reason is that the original code commit the data at the end of the copy +// In order to support multipart upload we need to save the blockIDs instead of committing them +// And once complete multipart is called we commit all the blockIDs + +// blockWriter provides methods to upload blocks that represent a file to a server and commit them. +// This allows us to provide a local implementation that fakes the server for hermetic testing. +type blockWriter interface { + StageBlock(context.Context, string, io.ReadSeekCloser, *blockblob.StageBlockOptions) (blockblob.StageBlockResponse, error) + Upload(context.Context, io.ReadSeekCloser, *blockblob.UploadOptions) (blockblob.UploadResponse, error) + CommitBlockList(context.Context, []string, *blockblob.CommitBlockListOptions) (blockblob.CommitBlockListResponse, error) +} + +// copyFromReader copies a source io.Reader to blob storage using concurrent uploads. +func copyFromReader(ctx context.Context, from io.Reader, to blockWriter, o blockblob.UploadStreamOptions) (*blockblob.CommitBlockListResponse, error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + buffers := newMMBPool(o.Concurrency, o.BlockSize) + defer buffers.Free() + + cp := &copier{ + ctx: ctx, + cancel: cancel, + reader: from, + to: to, + id: newID(), + o: o, + errCh: make(chan error, 1), + buffers: buffers, + } + + // Send all our chunks until we get an error. + var ( + err error + buffer []byte + ) + for { + select { + case buffer = <-buffers.Acquire(): + // got a buffer + default: + // no buffer available; allocate a new buffer if possible + buffers.Grow() + // either grab the newly allocated buffer or wait for one to become available + buffer = <-buffers.Acquire() + } + err = cp.sendChunk(buffer) + if err != nil { + break + } + } + cp.wg.Wait() + // If the error is not EOF, then we have a problem. + if err != nil && !errors.Is(err, io.EOF) { + return nil, err + } + + // Close out our upload. + if err := cp.close(); err != nil { + return nil, err + } + + return &cp.result, nil +} + +// copier streams a file via chunks in parallel from a reader representing a file. +// Do not use directly, instead use copyFromReader(). +type copier struct { + // ctx holds the context of a copier. This is normally a faux pas to store a Context in a struct. In this case, + // the copier has the lifetime of a function call, so it's fine. + ctx context.Context + cancel context.CancelFunc + + // o contains our options for uploading. + o blockblob.UploadStreamOptions + + // id provides the ids for each chunk. + id *id + + // reader is the source to be written to storage. + reader io.Reader + // to is the location we are writing our chunks to. + to blockWriter + + // errCh is used to hold the first error from our concurrent writers. + errCh chan error + // wg provides a count of how many writers we are waiting to finish. + wg sync.WaitGroup + + // result holds the final result from blob storage after we have submitted all chunks. + result blockblob.CommitBlockListResponse + + buffers bufferManager[mmb] +} + +type copierChunk struct { + buffer []byte + id string +} + +// getErr returns an error by priority. First, if a function set an error, it returns that error. Next, if the Context has an error +// it returns that error. Otherwise it is nil. getErr supports only returning an error once per copier. +func (c *copier) getErr() error { + select { + case err := <-c.errCh: + return err + default: + } + return c.ctx.Err() +} + +// sendChunk reads data from out internal reader, creates a chunk, and sends it to be written via a channel. +// sendChunk returns io.EOF when the reader returns an io.EOF or io.ErrUnexpectedEOF. +func (c *copier) sendChunk(buffer []byte) error { + // TODO(niro): Need to find a solution to all the buffers.Release + if err := c.getErr(); err != nil { + c.buffers.Release(buffer) + return err + } + + if len(buffer) == 0 { + c.buffers.Release(buffer) + return ErrEmptyBuffer + } + + n, err := io.ReadFull(c.reader, buffer) + switch { + case err == nil && n == 0: + c.buffers.Release(buffer) + return nil + + case err == nil: + nextID := c.id.next() + c.wg.Add(1) + // NOTE: we must pass id as an arg to our goroutine else + // it's captured by reference and can change underneath us! + go func(nextID string) { + // signal that the block has been staged. + // we MUST do this after attempting to write to errCh + // to avoid it racing with the reading goroutine. + defer c.wg.Done() + defer c.buffers.Release(buffer) + // Upload the outgoing block, matching the number of bytes read + c.write(copierChunk{buffer: buffer[0:n], id: nextID}) + }(nextID) + return nil + + case err != nil && (errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF)) && n == 0: + c.buffers.Release(buffer) + return io.EOF + } + + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + nextID := c.id.next() + c.wg.Add(1) + go func(nextID string) { + defer c.wg.Done() + defer c.buffers.Release(buffer) + // Upload the outgoing block, matching the number of bytes read + c.write(copierChunk{buffer: buffer[0:n], id: nextID}) + }(nextID) + return io.EOF + } + if err := c.getErr(); err != nil { + c.buffers.Release(buffer) + return err + } + c.buffers.Release(buffer) + return err +} + +// write uploads a chunk to blob storage. +func (c *copier) write(chunk copierChunk) { + if err := c.ctx.Err(); err != nil { + return + } + _, err := c.to.StageBlock(c.ctx, chunk.id, streaming.NopCloser(bytes.NewReader(chunk.buffer)), &blockblob.StageBlockOptions{ + CPKInfo: c.o.CPKInfo, + CPKScopeInfo: c.o.CPKScopeInfo, + TransactionalValidation: c.o.TransactionalValidation, + }) + if err != nil { + c.errCh <- fmt.Errorf("write error: %w", err) + return + } +} + +// close commits our blocks to blob storage and closes our writer. +func (c *copier) close() error { + if err := c.getErr(); err != nil { + return err + } + + var err error + c.result, err = c.to.CommitBlockList(c.ctx, c.id.issued(), &blockblob.CommitBlockListOptions{ + Tags: c.o.Tags, + Metadata: c.o.Metadata, + Tier: c.o.AccessTier, + HTTPHeaders: c.o.HTTPHeaders, + CPKInfo: c.o.CPKInfo, + CPKScopeInfo: c.o.CPKScopeInfo, + AccessConditions: c.o.AccessConditions, + }) + return err +} + +// id allows the creation of unique IDs based on UUID4 + an int32. This auto-increments. +type id struct { + u [64]byte + num uint32 + all []string +} + +// newID constructs a new id. +func newID() *id { + uu := guuid.New() + u := [64]byte{} + copy(u[:], uu[:]) + return &id{u: u} +} + +// next returns the next ID. +func (id *id) next() string { + defer atomic.AddUint32(&id.num, 1) + + binary.BigEndian.PutUint32(id.u[len(guuid.UUID{}):], atomic.LoadUint32(&id.num)) + str := base64.StdEncoding.EncodeToString(id.u[:]) + id.all = append(id.all, str) + + return str +} + +// issued returns all ids that have been issued. This returned value shares the internal slice, so it is not safe to modify the return. +// The value is only valid until the next time next() is called. +func (id *id) issued() []string { + return id.all +} + +// Code taken from Azure SDK for go blockblob/chunkwriting.go + +// bufferManager provides an abstraction for the management of buffers. +// this is mostly for testing purposes, but does allow for different implementations without changing the algorithm. +type bufferManager[T ~[]byte] interface { + // Acquire returns the channel that contains the pool of buffers. + Acquire() <-chan T + + // Release releases the buffer back to the pool for reuse/cleanup. + Release(T) + + // Grow grows the number of buffers, up to the predefined max. + // It returns the total number of buffers or an error. + // No error is returned if the number of buffers has reached max. + // This is called only from the reading goroutine. + Grow() int + + // Free cleans up all buffers. + Free() +} + +// mmb is a memory mapped buffer +type mmb []byte + +// TODO (niro): consider implementation refactoring +// newMMB creates a new memory mapped buffer with the specified size +func newMMB(size int64) mmb { + return make(mmb, size) +} + +// delete cleans up the memory mapped buffer +func (m *mmb) delete() { +} + +// mmbPool implements the bufferManager interface. +// it uses anonymous memory mapped files for buffers. +// don't use this type directly, use newMMBPool() instead. +type mmbPool struct { + buffers chan mmb + count int + max int + size int64 +} + +func newMMBPool(maxBuffers int, bufferSize int64) bufferManager[mmb] { + return &mmbPool{ + buffers: make(chan mmb, maxBuffers), + max: maxBuffers, + size: bufferSize, + } +} + +func (pool *mmbPool) Acquire() <-chan mmb { + return pool.buffers +} + +func (pool *mmbPool) Grow() int { + if pool.count < pool.max { + buffer := newMMB(pool.size) + pool.buffers <- buffer + pool.count++ + } + return pool.count +} + +func (pool *mmbPool) Release(buffer mmb) { + pool.buffers <- buffer +} + +func (pool *mmbPool) Free() { + for i := 0; i < pool.count; i++ { + buffer := <-pool.buffers + buffer.delete() + } + pool.count = 0 +} diff --git a/block/azure/client_cache.go b/block/azure/client_cache.go new file mode 100644 index 00000000..c4f73f36 --- /dev/null +++ b/block/azure/client_cache.go @@ -0,0 +1,144 @@ +package azure + +import ( + "context" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + lru "github.com/hnlq715/golang-lru" + "github.com/jiaozifs/jiaozifs/block/params" + "github.com/puzpuzpuz/xsync" +) + +const UDCCacheExpiry = time.Hour +const UDCCacheWorkaroundDivider = 2 + +type ClientCache struct { + serviceToClient *xsync.MapOf[string, *service.Client] + containerToClient *xsync.MapOf[string, *container.Client] + // udcCache - User Delegation Credential cache used to reduce POST requests while creating pre-signed URLs + udcCache *lru.ARCCache + params params.Azure +} + +func NewCache(p params.Azure) (*ClientCache, error) { + l, err := lru.NewARCWithExpire(udcCacheSize, UDCCacheExpiry/UDCCacheWorkaroundDivider) + // TODO(Guys): dividing the udc cache expiry by 2 is a workaround for the fact that this package does not handle expiry correctly, we can remove this once we use https://github.com/hashicorp/golang-lru expirables + if err != nil { + return nil, err + } + + return &ClientCache{ + serviceToClient: xsync.NewMapOf[*service.Client](), + containerToClient: xsync.NewMapOf[*container.Client](), + udcCache: l, + params: p, + }, nil +} + +func mapKey(storageAccount, containerName string) string { + return fmt.Sprintf("%s#%s", storageAccount, containerName) +} + +func (c *ClientCache) NewContainerClient(storageAccount, containerName string) (*container.Client, error) { + key := mapKey(storageAccount, containerName) + + var err error + cl, _ := c.containerToClient.LoadOrCompute(key, func() *container.Client { + var svc *service.Client + svc, err = c.NewServiceClient(storageAccount) + if err != nil { + return nil + } + return svc.NewContainerClient(containerName) + }) + if err != nil { + return nil, err + } + + return cl, nil +} + +func (c *ClientCache) NewServiceClient(storageAccount string) (*service.Client, error) { + p := c.params + // Use StorageAccessKey to initialize storage account client only if it was provided for this given storage account + // Otherwise fall back to the default credentials + if p.StorageAccount != storageAccount { + p.StorageAccount = storageAccount + p.StorageAccessKey = "" + } + + var err error + cl, _ := c.serviceToClient.LoadOrCompute(storageAccount, func() *service.Client { + var svc *service.Client + svc, err = BuildAzureServiceClient(p) + if err != nil { + return nil + } + return svc + }) + if err != nil { + return nil, err + } + + return cl, nil +} + +func (c *ClientCache) NewUDC(ctx context.Context, storageAccount string, expiry *time.Time) (*service.UserDelegationCredential, error) { + var udc *service.UserDelegationCredential + // Check udcCache + res, ok := c.udcCache.Get(storageAccount) + if !ok { + baseTime := time.Now().UTC().Add(-10 * time.Second) + // UDC expiry time of PreSignedExpiry + hour + udcExpiry := expiry.Add(UDCCacheExpiry) + info := service.KeyInfo{ + Start: to.Ptr(baseTime.UTC().Format(sas.TimeFormat)), + Expiry: to.Ptr(udcExpiry.Format(sas.TimeFormat)), + } + svc, err := c.NewServiceClient(storageAccount) + if err != nil { + return nil, err + } + udc, err = svc.GetUserDelegationCredential(ctx, info, nil) + if err != nil { + return nil, err + } + // UDC expires after PreSignedExpiry + hour but cache entry expires after an hour + c.udcCache.Add(storageAccount, udc) + } else { + udc = res.(*service.UserDelegationCredential) + } + return udc, nil +} + +func BuildAzureServiceClient(params params.Azure) (*service.Client, error) { + var url string + if params.TestEndpointURL != "" { // For testing purposes - override default url template + url = params.TestEndpointURL + } else { + url = fmt.Sprintf(BlobEndpointFormat, params.StorageAccount) + } + + options := service.ClientOptions{ClientOptions: azcore.ClientOptions{Retry: policy.RetryOptions{TryTimeout: params.TryTimeout}}} + if params.StorageAccessKey != "" { + cred, err := service.NewSharedKeyCredential(params.StorageAccount, params.StorageAccessKey) + if err != nil { + return nil, fmt.Errorf("invalid credentials: %w", err) + } + return service.NewClientWithSharedKeyCredential(url, cred, &options) + } + + defaultCreds, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + return nil, fmt.Errorf("missing credentials: %w", err) + } + return service.NewClient(url, defaultCreds, &options) +} diff --git a/block/azure/client_test.go b/block/azure/client_test.go new file mode 100644 index 00000000..27bfa6b3 --- /dev/null +++ b/block/azure/client_test.go @@ -0,0 +1,57 @@ +package azure_test + +import ( + "net/url" + "testing" + + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/azure" + "github.com/stretchr/testify/require" +) + +func TestExtraction(t *testing.T) { + parse := func(p string) *url.URL { + u, err := url.Parse(p) + require.NoError(t, err) + return u + } + tests := []struct { + name string + url *url.URL + expectedErr error + expectedStorageAccount string + }{ + { + name: "simple", + url: parse("https://somestorageaccount.blob.core.windows.net/newcontainer/2023/"), + expectedStorageAccount: "somestorageaccount", + }, + { + name: "no container", + url: parse("https://somestorageaccount.blob.core.windows.net/"), + expectedStorageAccount: "somestorageaccount", + }, + { + name: "long prefix", + url: parse("https://somestorageaccount.blob.core.windows.net/container/somestorageaccount.blob.core.windows.net"), + expectedStorageAccount: "somestorageaccount", + }, + { + name: "No subdomains", + url: parse("https://Rgeaccountblobcorewindowsnet/newcontainer/2023/"), + expectedErr: block.ErrInvalidAddress, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + actualStorageAccount, err := azure.ExtractStorageAccount(tt.url) + if tt.expectedErr == nil { + require.NoError(t, err) + require.Equal(t, tt.expectedStorageAccount, actualStorageAccount) + } else { + require.ErrorIs(t, err, tt.expectedErr) + } + }) + } +} diff --git a/block/azure/main_test.go b/block/azure/main_test.go new file mode 100644 index 00000000..31543e37 --- /dev/null +++ b/block/azure/main_test.go @@ -0,0 +1,115 @@ +package azure_test + +import ( + "context" + "fmt" + "log" + "net" + "os" + "testing" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/benburkert/dns" + "github.com/ory/dockertest/v3" +) + +const ( + azuriteContainerTimeoutSeconds = 10 * 60 // 10 min + containerName = "container1" + accountName = "account1" + accountKey = "key1" + + domain = "azurite.test" // TLD for test + accountHost = accountName + "." + domain +) + +var ( + pool *dockertest.Pool + blockURL string +) + +func createDNSResolver(domain string) { + zone := dns.Zone{ + Origin: domain + ".", + TTL: 5 * time.Minute, + RRs: dns.RRSet{ + accountName: map[dns.Type][]dns.Record{ + dns.TypeA: { + &dns.A{A: net.IPv4(127, 0, 0, 1).To4()}, + }, + }, + }, + } + + mux := new(dns.ResolveMux) + mux.Handle(dns.TypeANY, zone.Origin, &zone) + client := &dns.Client{ + Resolver: mux, + } + net.DefaultResolver = &net.Resolver{ + PreferGo: true, + Dial: client.Dial, + } +} + +func runAzurite(dockerPool *dockertest.Pool) (string, func()) { + ctx := context.Background() + resource, err := dockerPool.Run("mcr.microsoft.com/azure-storage/azurite", "3.26.0", []string{ + fmt.Sprintf("AZURITE_ACCOUNTS=%s:%s", accountName, accountKey), + }) + if err != nil { + panic(err) + } + createDNSResolver(domain) + + // set cleanup + closer := func() { + err := dockerPool.Purge(resource) + if err != nil { + panic("could not purge Azurite container: " + err.Error()) + } + } + + // expire, just to make sure + err = resource.Expire(azuriteContainerTimeoutSeconds) + if err != nil { + panic("could not expire Azurite container: " + err.Error()) + } + + // create connection and test container + port := resource.GetPort("10000/tcp") + url := fmt.Sprintf("http://%s:%s", accountHost, port) + cred, err := azblob.NewSharedKeyCredential(accountName, accountKey) + if err != nil { + panic(err) + } + + blob, err := azblob.NewClientWithSharedKeyCredential(url, cred, nil) + if err != nil { + panic(err) + } + _, err = blob.CreateContainer(ctx, containerName, nil) + if err != nil { + fmt.Println(err) + panic(err) + } + + // return container URL + return url, closer +} + +func TestMain(m *testing.M) { + _ = os.Setenv("NO_PROXY", "*."+domain) //ignore proxy for domain do not remove it + + var err error + pool, err = dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to Docker: %s", err) + } + var cleanup func() + blockURL, cleanup = runAzurite(pool) + code := m.Run() + cleanup() + os.Exit(code) +} diff --git a/block/azure/multipart_block_writer.go b/block/azure/multipart_block_writer.go new file mode 100644 index 00000000..88cd4d9c --- /dev/null +++ b/block/azure/multipart_block_writer.go @@ -0,0 +1,225 @@ +package azure + +import ( + "bufio" + "context" + "encoding/base64" + "encoding/hex" + "fmt" + "io" + "sort" + "strconv" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/google/uuid" + logging "github.com/ipfs/go-log/v2" + "github.com/jiaozifs/jiaozifs/block" +) + +var log = logging.Logger("azure") + +type MultipartBlockWriter struct { + reader *block.HashingReader // the reader that would be passed to copyFromReader, this is needed in order to get size and md5 + // to is the location we are writing our chunks to. + to *blockblob.Client + toIDs *blockblob.Client + toSizes *blockblob.Client + etag string +} + +func NewMultipartBlockWriter(reader *block.HashingReader, containerURL container.Client, objName string) *MultipartBlockWriter { + return &MultipartBlockWriter{ + reader: reader, + to: containerURL.NewBlockBlobClient(objName), + toIDs: containerURL.NewBlockBlobClient(objName + idSuffix), + toSizes: containerURL.NewBlockBlobClient(objName + sizeSuffix), + } +} + +func (m *MultipartBlockWriter) StageBlock(ctx context.Context, base64BlockID string, body io.ReadSeekCloser, options *blockblob.StageBlockOptions) (blockblob.StageBlockResponse, error) { + return m.to.StageBlock(ctx, base64BlockID, body, options) +} + +func (m *MultipartBlockWriter) CommitBlockList(ctx context.Context, ids []string, options *blockblob.CommitBlockListOptions) (blockblob.CommitBlockListResponse, error) { + m.etag = "\"" + hex.EncodeToString(m.reader.Md5.Sum(nil)) + "\"" + base64Etag := base64.StdEncoding.EncodeToString([]byte(m.etag)) + + // write to blockIDs + pd := strings.Join(ids, "\n") + "\n" + var leaseAccessConditions *blob.LeaseAccessConditions + if options.AccessConditions != nil { + leaseAccessConditions = options.AccessConditions.LeaseAccessConditions + } + _, err := m.toIDs.StageBlock(ctx, base64Etag, streaming.NopCloser(strings.NewReader(pd)), &blockblob.StageBlockOptions{ + LeaseAccessConditions: leaseAccessConditions, + }) + if err != nil { + return blockblob.CommitBlockListResponse{}, fmt.Errorf("failed staging part data: %w", err) + } + // write block sizes + sd := strconv.Itoa(int(m.reader.CopiedSize)) + "\n" + _, err = m.toSizes.StageBlock(ctx, base64Etag, streaming.NopCloser(strings.NewReader(sd)), &blockblob.StageBlockOptions{ + LeaseAccessConditions: leaseAccessConditions, + }) + if err != nil { + return blockblob.CommitBlockListResponse{}, fmt.Errorf("failed staging part data: %w", err) + } + + return blockblob.CommitBlockListResponse{}, err +} + +func (m *MultipartBlockWriter) Upload(_ context.Context, _ io.ReadSeekCloser, _ *blockblob.UploadOptions) (blockblob.UploadResponse, error) { + panic("Should not be called") +} + +func completeMultipart(ctx context.Context, parts []block.MultipartPart, container container.Client, objName string) (*block.CompleteMultiPartUploadResponse, error) { + sort.Slice(parts, func(i, j int) bool { + return parts[i].PartNumber < parts[j].PartNumber + }) + // extract staging blockIDs + metaBlockIDs := make([]string, len(parts)) + for i, part := range parts { + // add Quotations marks (") if missing, Etags sent by spark include Quotations marks, Etags sent aws cli don't include Quotations marks + etag := strings.Trim(part.ETag, "\"") + etag = "\"" + etag + "\"" + base64Etag := base64.StdEncoding.EncodeToString([]byte(etag)) + metaBlockIDs[i] = base64Etag + } + + stageBlockIDs, err := getMultipartIDs(ctx, container, objName, metaBlockIDs) + if err != nil { + return nil, err + } + + size, err := getMultipartSize(ctx, container, objName, metaBlockIDs) + if err != nil { + return nil, err + } + blobURL := container.NewBlockBlobClient(objName) + + res, err := blobURL.CommitBlockList(ctx, stageBlockIDs, nil) + if err != nil { + return nil, err + } + etag := string(*res.ETag) + return &block.CompleteMultiPartUploadResponse{ + ETag: etag, + ContentLength: size, + }, nil +} + +func getMultipartIDs(ctx context.Context, container container.Client, objName string, base64BlockIDs []string) ([]string, error) { + blobURL := container.NewBlockBlobClient(objName + idSuffix) + _, err := blobURL.CommitBlockList(ctx, base64BlockIDs, nil) + if err != nil { + return nil, err + } + + downloadResponse, err := blobURL.DownloadStream(ctx, nil) + if err != nil { + return nil, err + } + bodyStream := downloadResponse.Body + defer func() { + _ = bodyStream.Close() + }() + scanner := bufio.NewScanner(bodyStream) + ids := make([]string, 0) + for scanner.Scan() { + id := scanner.Text() + ids = append(ids, id) + } + + // remove + _, err = blobURL.Delete(ctx, nil) + if err != nil { + log.With("blob_url", blobURL.URL()).Warnf("Failed to delete multipart ids data file %v", err) + } + return ids, nil +} + +func getMultipartSize(ctx context.Context, container container.Client, objName string, base64BlockIDs []string) (int64, error) { + blobURL := container.NewBlockBlobClient(objName + sizeSuffix) + _, err := blobURL.CommitBlockList(ctx, base64BlockIDs, nil) + if err != nil { + return 0, err + } + + downloadResponse, err := blobURL.DownloadStream(ctx, nil) + if err != nil { + return 0, err + } + bodyStream := downloadResponse.Body + defer func() { + _ = bodyStream.Close() + }() + scanner := bufio.NewScanner(bodyStream) + size := 0 + for scanner.Scan() { + s := scanner.Text() + stageSize, err := strconv.Atoi(s) + if err != nil { + return 0, err + } + size += stageSize + } + + // remove + _, err = blobURL.Delete(ctx, nil) + if err != nil { + log.With("blob_url", blobURL.URL()).Warnf("Failed to delete multipart size data file %v", err) + } + return int64(size), nil +} + +func copyPartRange(ctx context.Context, destinationContainer container.Client, destinationObjName string, sourceBlobURL blockblob.Client, startPosition, count int64) (*block.UploadPartResponse, error) { + base64BlockID := generateRandomBlockID() + _, err := sourceBlobURL.StageBlockFromURL(ctx, base64BlockID, sourceBlobURL.URL(), + &blockblob.StageBlockFromURLOptions{ + Range: blob.HTTPRange{ + Offset: startPosition, + Count: count, + }, + }) + if err != nil { + return nil, err + } + + // add size and id to etag + response, err := sourceBlobURL.GetProperties(ctx, nil) + if err != nil { + return nil, err + } + etag := "\"" + hex.EncodeToString(response.ContentMD5) + "\"" + size := response.ContentLength + base64Etag := base64.StdEncoding.EncodeToString([]byte(etag)) + // stage id data + blobIDsURL := destinationContainer.NewBlockBlobClient(destinationObjName + idSuffix) + _, err = blobIDsURL.StageBlock(ctx, base64Etag, streaming.NopCloser(strings.NewReader(base64BlockID+"\n")), nil) + if err != nil { + return nil, fmt.Errorf("failed staging part data: %w", err) + } + + // stage size data + sizeData := fmt.Sprintf("%d\n", size) + blobSizesURL := destinationContainer.NewBlockBlobClient(destinationObjName + sizeSuffix) + _, err = blobSizesURL.StageBlock(ctx, base64Etag, streaming.NopCloser(strings.NewReader(sizeData)), nil) + if err != nil { + return nil, fmt.Errorf("failed staging part data: %w", err) + } + + return &block.UploadPartResponse{ + ETag: strings.Trim(etag, `"`), + }, nil +} + +func generateRandomBlockID() string { + uu := uuid.New() + u := [64]byte{} + copy(u[:], uu[:]) + return base64.StdEncoding.EncodeToString(u[:]) +} diff --git a/block/azure/stats.go b/block/azure/stats.go new file mode 100644 index 00000000..def8f12c --- /dev/null +++ b/block/azure/stats.go @@ -0,0 +1,31 @@ +package azure + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var durationHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "azure_operation_duration_seconds", + Help: "durations of outgoing azure operations", + }, + []string{"operation", "error"}) + +var requestSizeHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "azure_operation_size_bytes", + Help: "handled sizes of outgoing azure operations", + Buckets: prometheus.ExponentialBuckets(1, 10, 10), //nolint: gomnd + }, []string{"operation", "error"}) + +func reportMetrics(operation string, start time.Time, sizeBytes *int64, err *error) { + isErrStr := strconv.FormatBool(*err != nil) + durationHistograms.WithLabelValues(operation, isErrStr).Observe(time.Since(start).Seconds()) + if sizeBytes != nil { + requestSizeHistograms.WithLabelValues(operation, isErrStr).Observe(float64(*sizeBytes)) + } +} diff --git a/block/azure/walker.go b/block/azure/walker.go new file mode 100644 index 00000000..1cf577f1 --- /dev/null +++ b/block/azure/walker.go @@ -0,0 +1,257 @@ +package azure + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "net/url" + "strings" + + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service" + "github.com/jiaozifs/jiaozifs/block" +) + +const DirectoryBlobMetadataKey = "hdi_isfolder" + +var ErrAzureInvalidURL = errors.New("invalid Azure storage URL") + +func NewAzureBlobWalker(svc *service.Client) (*BlobWalker, error) { + return &BlobWalker{ + client: svc, + mark: block.Mark{HasMore: true}, + }, nil +} + +type BlobWalker struct { + client *service.Client + mark block.Mark +} + +// extractAzurePrefix takes a URL that looks like this: https://storageaccount.blob.core.windows.net/container/prefix +// and return the URL for the container and a prefix, if one exists +func extractAzurePrefix(storageURI *url.URL) (*url.URL, string, error) { + path := strings.TrimLeft(storageURI.Path, "/") + if len(path) == 0 { + return nil, "", fmt.Errorf("%w: could not parse container URL: %s", ErrAzureInvalidURL, storageURI) + } + parts := strings.SplitN(path, "/", 2) // nolint: gomnd + if len(parts) == 1 { + // we only have a container + return storageURI, "", nil + } + // we have both prefix and storage container, rebuild URL + relativePath := url.URL{Path: "/" + parts[0]} + return storageURI.ResolveReference(&relativePath), parts[1], nil +} + +func getAzureBlobURL(containerURL *url.URL, blobName string) *url.URL { + relativePath := url.URL{Path: containerURL.Path + "/" + blobName} + return containerURL.ResolveReference(&relativePath) +} + +func (a *BlobWalker) Walk(ctx context.Context, storageURI *url.URL, op block.WalkOptions, walkFn func(e block.ObjectStoreEntry) error) error { + // we use bucket as container and prefix as path + containerURL, prefix, err := extractAzurePrefix(storageURI) + if err != nil { + return err + } + var basePath string + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + basePath = prefix[:idx+1] + } + + qk, err := ResolveBlobURLInfoFromURL(containerURL) + if err != nil { + return err + } + + containerClient := a.client.NewContainerClient(qk.ContainerName) + listBlob := containerClient.NewListBlobsFlatPager(&azblob.ListBlobsFlatOptions{ + Prefix: &prefix, + Marker: &op.ContinuationToken, + Include: container.ListBlobsInclude{ + Metadata: true, + }, + }) + + for listBlob.More() { + resp, err := listBlob.NextPage(ctx) + if err != nil { + return err + } + if resp.Marker != nil { + a.mark.ContinuationToken = *resp.Marker + } + for _, blobInfo := range resp.Segment.BlobItems { + // skipping everything in the page which is before 'After' (without forgetting the possible empty string key!) + if op.After != "" && *blobInfo.Name <= op.After { + continue + } + + // Skip folders + if isBlobItemFolder(blobInfo) { + continue + } + + a.mark.LastKey = *blobInfo.Name + if err := walkFn(block.ObjectStoreEntry{ + FullKey: *blobInfo.Name, + RelativeKey: strings.TrimPrefix(*blobInfo.Name, basePath), + Address: getAzureBlobURL(containerURL, *blobInfo.Name).String(), + ETag: extractBlobItemEtag(blobInfo), + Mtime: *blobInfo.Properties.LastModified, + Size: *blobInfo.Properties.ContentLength, + }); err != nil { + return err + } + } + } + + a.mark = block.Mark{ + HasMore: false, + } + + return nil +} + +// isBlobItemFolder returns true if the blob item is a folder. +// Make sure that metadata is populated before calling this function. +// Example: for listing using blob API passing options with `Include: container.ListBlobsInclude{ Metadata: true }` +// will populate the metadata. +func isBlobItemFolder(blobItem *container.BlobItem) bool { + if blobItem.Metadata == nil { + return false + } + if blobItem.Properties.ContentLength != nil && *blobItem.Properties.ContentLength != 0 { + return false + } + isFolder, ok := blobItem.Metadata[DirectoryBlobMetadataKey] + if !ok || isFolder == nil { + return false + } + return *isFolder == "true" +} + +// extractBlobItemEtag etag set by content md5 with fallback to use Etag value +func extractBlobItemEtag(blobItem *container.BlobItem) string { + if blobItem.Properties.ContentMD5 != nil { + return hex.EncodeToString(blobItem.Properties.ContentMD5) + } + if blobItem.Properties.ETag != nil { + etag := string(*blobItem.Properties.ETag) + return strings.TrimFunc(etag, func(r rune) bool { return r == '"' || r == ' ' }) + } + return "" +} + +func (a *BlobWalker) Marker() block.Mark { + return a.mark +} + +func (a *BlobWalker) GetSkippedEntries() []block.ObjectStoreEntry { + return nil +} + +// +// DataLakeWalker +// + +func NewAzureDataLakeWalker(svc *service.Client, skipOutOfOrder bool) (*DataLakeWalker, error) { + return &DataLakeWalker{ + client: svc, + mark: block.Mark{HasMore: true}, + skipOutOfOrder: skipOutOfOrder, + }, nil +} + +type DataLakeWalker struct { + client *service.Client + mark block.Mark + skipped []block.ObjectStoreEntry + skipOutOfOrder bool +} + +func (a *DataLakeWalker) Walk(ctx context.Context, storageURI *url.URL, op block.WalkOptions, walkFn func(e block.ObjectStoreEntry) error) error { + // we use bucket as container and prefix as a path + containerURL, prefix, err := extractAzurePrefix(storageURI) + if err != nil { + return err + } + var basePath string + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + basePath = prefix[:idx+1] + } + + qk, err := ResolveBlobURLInfoFromURL(containerURL) + if err != nil { + return err + } + + containerClient := a.client.NewContainerClient(qk.ContainerName) + listBlob := containerClient.NewListBlobsFlatPager(&azblob.ListBlobsFlatOptions{ + Prefix: &prefix, + Marker: &op.ContinuationToken, + Include: container.ListBlobsInclude{ + Metadata: true, + }, + }) + + skipCount := 0 + prev := "" + for listBlob.More() { + resp, err := listBlob.NextPage(ctx) + if err != nil { + return err + } + if resp.Marker != nil { + a.mark.ContinuationToken = *resp.Marker + } + for _, blobInfo := range resp.Segment.BlobItems { + // skipping everything in the page which is before 'After' (without forgetting the possible empty string key!) + if op.After != "" && *blobInfo.Name <= op.After { + continue + } + + // Skip folders + if isBlobItemFolder(blobInfo) { + continue + } + + entry := block.ObjectStoreEntry{ + FullKey: *blobInfo.Name, + RelativeKey: strings.TrimPrefix(*blobInfo.Name, basePath), + Address: getAzureBlobURL(containerURL, *blobInfo.Name).String(), + ETag: extractBlobItemEtag(blobInfo), + Mtime: *blobInfo.Properties.LastModified, + Size: *blobInfo.Properties.ContentLength, + } + if a.skipOutOfOrder && strings.Compare(prev, *blobInfo.Name) > 0 { // skip out of order + a.skipped = append(a.skipped, entry) + skipCount++ + continue + } + prev = *blobInfo.Name + + a.mark.LastKey = *blobInfo.Name + if err := walkFn(entry); err != nil { + return err + } + } + } + a.mark = block.Mark{ + HasMore: false, + } + + return nil +} + +func (a *DataLakeWalker) Marker() block.Mark { + return a.mark +} + +func (a *DataLakeWalker) GetSkippedEntries() []block.ObjectStoreEntry { + return a.skipped +} diff --git a/block/blocktest/adapter.go b/block/blocktest/adapter.go new file mode 100644 index 00000000..8140762f --- /dev/null +++ b/block/blocktest/adapter.go @@ -0,0 +1,415 @@ +package blocktest + +import ( + "bytes" + "context" + "fmt" + "io" + "net/url" + "path" + "path/filepath" + "sort" + "strings" + "testing" + + "github.com/go-test/deep" + "github.com/jiaozifs/jiaozifs/block" + "github.com/stretchr/testify/require" + "github.com/thanhpk/randstr" +) + +func AdapterTest(t *testing.T, adapter block.Adapter, storageNamespace, externalPath string) { + t.Run("Adapter_PutGet", func(t *testing.T) { testAdapterPutGet(t, adapter, storageNamespace, externalPath) }) + t.Run("Adapter_Copy", func(t *testing.T) { testAdapterCopy(t, adapter, storageNamespace) }) + t.Run("Adapter_Remove", func(t *testing.T) { testAdapterRemove(t, adapter, storageNamespace) }) + t.Run("Adapter_MultipartUpload", func(t *testing.T) { testAdapterMultipartUpload(t, adapter, storageNamespace) }) + t.Run("Adapter_Exists", func(t *testing.T) { testAdapterExists(t, adapter, storageNamespace) }) + t.Run("Adapter_GetRange", func(t *testing.T) { testAdapterGetRange(t, adapter, storageNamespace) }) + t.Run("Adapter_Walker", func(t *testing.T) { testAdapterWalker(t, adapter, storageNamespace) }) +} + +func testAdapterPutGet(t *testing.T, adapter block.Adapter, storageNamespace, externalPath string) { + ctx := context.Background() + const contents = "test_file" + size := int64(len(contents)) + + cases := []struct { + name string + identifierType block.IdentifierType + path string + }{ + {"identifier_relative", block.IdentifierTypeRelative, "test_file"}, + {"identifier_full", block.IdentifierTypeFull, externalPath + "/" + "test_file"}, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + obj := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: c.path, + IdentifierType: c.identifierType, + } + + err := adapter.Put(ctx, obj, size, strings.NewReader(contents), block.PutOpts{}) + require.NoError(t, err) + + reader, err := adapter.Get(ctx, obj, size) + require.NoError(t, err) + defer func() { + require.NoError(t, reader.Close()) + }() + got, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, contents, string(got)) + }) + } +} + +func testAdapterCopy(t *testing.T, adapter block.Adapter, storageNamespace string) { + ctx := context.Background() + contents := "foo bar baz quux" + src := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: "src", + IdentifierType: block.IdentifierTypeRelative, + } + dst := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: "export/to/dst", + IdentifierType: block.IdentifierTypeRelative, + } + + require.NoError(t, adapter.Put(ctx, src, int64(len(contents)), strings.NewReader(contents), block.PutOpts{})) + + require.NoError(t, adapter.Copy(ctx, src, dst)) + reader, err := adapter.Get(ctx, dst, 0) + require.NoError(t, err) + got, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, contents, string(got)) +} + +func testAdapterRemove(t *testing.T, adapter block.Adapter, storageNamespace string) { + ctx := context.Background() + const content = "Content used for testing" + tests := []struct { + name string + additionalObjects []string + path string + wantErr bool + wantTree []string + }{ + { + name: "test_single", + path: "README", + wantErr: false, + wantTree: []string{}, + }, + + { + name: "test_under_folder", + path: "src/tools.go", + wantErr: false, + wantTree: []string{}, + }, + { + name: "test_under_multiple_folders", + path: "a/b/c/d.txt", + wantErr: false, + wantTree: []string{}, + }, + { + name: "file_in_the_way", + path: "a/b/c/d.txt", + additionalObjects: []string{"a/b/blocker.txt"}, + wantErr: false, + wantTree: []string{"/a/b/blocker.txt"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // setup env + envObjects := tt.additionalObjects + envObjects = append(envObjects, tt.path) + for _, p := range envObjects { + obj := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: tt.name + "/" + p, + IdentifierType: block.IdentifierTypeRelative, + } + require.NoError(t, adapter.Put(ctx, obj, int64(len(content)), strings.NewReader(content), block.PutOpts{})) + } + + // test Remove + obj := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: tt.name + "/" + tt.path, + IdentifierType: block.IdentifierTypeRelative, + } + if err := adapter.Remove(ctx, obj); (err != nil) != tt.wantErr { + t.Errorf("Remove() error = %v, wantErr %v", err, tt.wantErr) + } + + qk, err := adapter.ResolveNamespace(storageNamespace, tt.name, block.IdentifierTypeRelative) + require.NoError(t, err) + + tree := dumpPathTree(t, ctx, adapter, qk) + if diff := deep.Equal(tt.wantTree, tree); diff != nil { + t.Errorf("Remove() tree diff = %s", diff) + } + }) + } +} + +func dumpPathTree(t testing.TB, ctx context.Context, adapter block.Adapter, qk block.QualifiedKey) []string { //nolint + t.Helper() + tree := make([]string, 0) + + uri, err := url.Parse(qk.Format()) + require.NoError(t, err) + + w, err := adapter.GetWalker(uri) + require.NoError(t, err) + + walker := block.NewWrapper(w, uri) + require.NoError(t, err) + + err = walker.Walk(ctx, block.WalkOptions{}, func(e block.ObjectStoreEntry) error { + _, p, _ := strings.Cut(e.Address, uri.String()) + tree = append(tree, p) + return nil + }) + if err != nil { + t.Fatalf("walking on '%s': %s", uri.String(), err) + } + sort.Strings(tree) + return tree +} + +func createMultipartFile() ([][]byte, []byte) { + const ( + multipartNumberOfParts = 3 + multipartPartSize = 5 * 1024 * 1024 + ) + parts := make([][]byte, multipartNumberOfParts) + var partsConcat []byte + for i := 0; i < multipartNumberOfParts; i++ { + parts[i] = randstr.Bytes(multipartPartSize + i) + partsConcat = append(partsConcat, parts[i]...) + } + return parts, partsConcat +} + +func testAdapterMultipartUpload(t *testing.T, adapter block.Adapter, storageNamespace string) { + ctx := context.Background() + parts, full := createMultipartFile() + + cases := []struct { + name string + path string + }{ + {"simple", "abc"}, + {"nested", "foo/bar"}, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + obj := block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: c.path, + IdentifierType: block.IdentifierTypeRelative, + } + resp, err := adapter.CreateMultiPartUpload(ctx, obj, nil, block.CreateMultiPartUploadOpts{}) + require.NoError(t, err) + + multiParts := make([]block.MultipartPart, len(parts)) + for i, content := range parts { + partNumber := i + 1 + partResp, err := adapter.UploadPart(ctx, obj, int64(len(content)), bytes.NewReader(content), resp.UploadID, partNumber) + require.NoError(t, err) + multiParts[i].PartNumber = partNumber + multiParts[i].ETag = partResp.ETag + } + _, err = adapter.CompleteMultiPartUpload(ctx, obj, resp.UploadID, &block.MultipartUploadCompletion{ + Part: multiParts, + }) + require.NoError(t, err) + + reader, err := adapter.Get(ctx, obj, 0) + require.NoError(t, err) + + got, err := io.ReadAll(reader) + require.NoError(t, err) + + require.Equal(t, full, got) + }) + } +} + +func testAdapterExists(t *testing.T, adapter block.Adapter, storageNamespace string) { + // TODO (niro): Test abs paths + const contents = "exists" + ctx := context.Background() + err := adapter.Put(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: contents, + IdentifierType: block.IdentifierTypeRelative, + }, int64(len(contents)), strings.NewReader(contents), block.PutOpts{}) + require.NoError(t, err) + + err = adapter.Put(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: "nested/and/" + contents, + IdentifierType: block.IdentifierTypeRelative, + }, int64(len(contents)), strings.NewReader(contents), block.PutOpts{}) + require.NoError(t, err) + + cases := []struct { + name string + path string + exists bool + }{ + {"exists", "exists", true}, + {"nested_exists", "nested/and/exists", true}, + {"simple_missing", "missing", false}, + {"nested_missing", "nested/down", false}, + {"nested_deep_missing", "nested/quite/deeply/and/missing", false}, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + ok, err := adapter.Exists(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: tt.path, + IdentifierType: block.IdentifierTypeRelative, + }) + require.NoError(t, err) + require.Equal(t, tt.exists, ok) + }) + } +} + +func testAdapterGetRange(t *testing.T, adapter block.Adapter, storageNamespace string) { + ctx := context.Background() + part1 := "this is the first part " + part2 := "this is the last part" + err := adapter.Put(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: "test_file", + IdentifierType: block.IdentifierTypeRelative, + }, int64(len(part1+part2)), strings.NewReader(part1+part2), block.PutOpts{}) + require.NoError(t, err) + + cases := []struct { + name string + startPos int + endPos int + expected string + expectFailure bool + }{ + {"read_suffix", len(part1), len(part1 + part2), part2, false}, + {"read_prefix", 0, len(part1) - 1, part1, false}, + {"read_middle", 8, len(part1) + 6, "the first part this is", false}, + // {"end_smaller_than_start", 10, 1, "", false}, // TODO (niro): To be determined + // {"negative_position", -1, len(part1), "", true}, // S3 and Azure not aligned + {"one_byte", 1, 1, string(part1[1]), false}, + {"out_of_bounds", 0, len(part1+part2) + 10, part1 + part2, false}, + } + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + reader, err := adapter.GetRange(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: "test_file", + IdentifierType: block.IdentifierTypeRelative, + }, int64(tt.startPos), int64(tt.endPos)) + require.Equal(t, tt.expectFailure, err != nil) + if err == nil { + got, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, tt.expected, string(got)) + } + }) + } +} + +func testAdapterWalker(t *testing.T, adapter block.Adapter, storageNamespace string) { + ctx := context.Background() + const ( + testPrefix = "test_walker" + filesAndFolders = 5 + contents = "test_file" + ) + + for i := 0; i < filesAndFolders; i++ { + for j := 0; j < filesAndFolders; j++ { + err := adapter.Put(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: fmt.Sprintf("%s/folder_%d/test_file_%d", testPrefix, filesAndFolders-i-1, filesAndFolders-j-1), + IdentifierType: block.IdentifierTypeRelative, + }, int64(len(contents)), strings.NewReader(contents), block.PutOpts{}) + require.NoError(t, err) + } + } + + err := adapter.Put(ctx, block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: fmt.Sprintf("%s/folder_0.txt", testPrefix), + IdentifierType: block.IdentifierTypeRelative, + }, int64(len(contents)), strings.NewReader(contents), block.PutOpts{}) + require.NoError(t, err) + + cases := []struct { + name string + prefix string + }{ + { + name: "root", + prefix: "", + }, + { + name: "prefix", + prefix: "folder_1", + }, + { + name: "prefix/", + prefix: "folder_2", + }, + } + for _, tt := range cases { + qk, err := adapter.ResolveNamespace(storageNamespace, filepath.Join(testPrefix, tt.prefix), block.IdentifierTypeRelative) + require.NoError(t, err) + uri, err := url.Parse(qk.Format()) + require.NoError(t, err) + t.Run(tt.name, func(t *testing.T) { + reader, err := adapter.GetWalker(uri) + require.NoError(t, err) + + var results []string + err = reader.Walk(ctx, uri, block.WalkOptions{}, func(e block.ObjectStoreEntry) error { + results = append(results, e.RelativeKey) + return nil + }) + require.NoError(t, err) + var prefix string + if tt.prefix == "" { + if adapter.BlockstoreType() != block.BlockstoreTypeLocal { + prefix = testPrefix + } + + require.Equal(t, path.Join(prefix, "folder_0.txt"), results[0]) + results = results[1:] + for i := 0; i < filesAndFolders; i++ { + for j := 0; j < filesAndFolders; j++ { + require.Equal(t, path.Join(prefix, fmt.Sprintf("folder_%d/test_file_%d", i, j)), results[i*filesAndFolders+j]) + } + } + } else { + if adapter.BlockstoreType() != block.BlockstoreTypeLocal { + prefix = tt.prefix + } + for j := 0; j < filesAndFolders; j++ { + require.Equal(t, path.Join(prefix, fmt.Sprintf("test_file_%d", j)), results[j]) + } + } + }) + } +} diff --git a/block/errors.go b/block/errors.go new file mode 100644 index 00000000..0ae3d157 --- /dev/null +++ b/block/errors.go @@ -0,0 +1,13 @@ +package block + +import "github.com/pkg/errors" + +var ( + ErrDataNotFound = errors.New("not found") + ErrOperationNotSupported = errors.New("operation not supported") + ErrAsyncCopyFailed = errors.New("async copy failed") + ErrBadIndex = errors.New("bad index") + ErrForbidden = errors.New("forbidden") + ErrInvalidAddress = errors.New("invalid address") + ErrInvalidNamespace = errors.New("invalid namespace") +) diff --git a/block/factory/build.go b/block/factory/build.go new file mode 100644 index 00000000..89bc1298 --- /dev/null +++ b/block/factory/build.go @@ -0,0 +1,140 @@ +package factory + +import ( + "context" + "fmt" + + "cloud.google.com/go/storage" + "github.com/aws/aws-sdk-go-v2/service/s3" + logging "github.com/ipfs/go-log/v2" + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/azure" + "github.com/jiaozifs/jiaozifs/block/gs" + "github.com/jiaozifs/jiaozifs/block/local" + "github.com/jiaozifs/jiaozifs/block/mem" + "github.com/jiaozifs/jiaozifs/block/params" + s3a "github.com/jiaozifs/jiaozifs/block/s3" + "github.com/jiaozifs/jiaozifs/block/transient" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +var log = logging.Logger("block_factory") + +const ( + // googleAuthCloudPlatform - Cloud Storage authentication https://cloud.google.com/storage/docs/authentication + googleAuthCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +func BuildBlockAdapter(ctx context.Context, c params.AdapterConfig) (block.Adapter, error) { + blockstore := c.BlockstoreType() + log.With("type", blockstore). + Info("initialize blockstore adapter") + switch blockstore { + case block.BlockstoreTypeLocal: + p, err := c.BlockstoreLocalParams() + if err != nil { + return nil, err + } + return buildLocalAdapter(ctx, p) + case block.BlockstoreTypeS3: + p, err := c.BlockstoreS3Params() + if err != nil { + return nil, err + } + return buildS3Adapter(ctx, p) + case block.BlockstoreTypeMem, "memory": + return mem.New(ctx), nil + case block.BlockstoreTypeTransient: + return transient.New(ctx), nil + case block.BlockstoreTypeGS: + p, err := c.BlockstoreGSParams() + if err != nil { + return nil, err + } + return buildGSAdapter(ctx, p) + case block.BlockstoreTypeAzure: + p, err := c.BlockstoreAzureParams() + if err != nil { + return nil, err + } + return azure.NewAdapter(ctx, p) + default: + return nil, fmt.Errorf("%w '%s' please choose one of %s", + block.ErrInvalidAddress, blockstore, []string{block.BlockstoreTypeLocal, block.BlockstoreTypeS3, block.BlockstoreTypeAzure, block.BlockstoreTypeMem, block.BlockstoreTypeTransient, block.BlockstoreTypeGS}) + } +} + +func buildLocalAdapter(_ context.Context, params params.Local) (*local.Adapter, error) { + adapter, err := local.NewAdapter(params.Path, + local.WithAllowedExternalPrefixes(params.AllowedExternalPrefixes), + local.WithImportEnabled(params.ImportEnabled), + ) + if err != nil { + return nil, fmt.Errorf("got error opening a local block adapter with path %s: %w", params.Path, err) + } + log.With( + "type", "local", + "path", params.Path, + ).Info("initialized blockstore adapter") + return adapter, nil +} + +func BuildS3Client(ctx context.Context, params params.S3) (*s3.Client, error) { + cfg, err := s3a.LoadConfig(ctx, params) + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(cfg, s3a.WithClientParams(params)) + return client, nil +} + +func buildS3Adapter(ctx context.Context, params params.S3) (*s3a.Adapter, error) { + opts := []s3a.AdapterOption{ + s3a.WithDiscoverBucketRegion(params.DiscoverBucketRegion), + s3a.WithPreSignedExpiry(params.PreSignedExpiry), + s3a.WithDisablePreSigned(params.DisablePreSigned), + s3a.WithDisablePreSignedUI(params.DisablePreSignedUI), + } + if params.ServerSideEncryption != "" { + opts = append(opts, s3a.WithServerSideEncryption(params.ServerSideEncryption)) + } + if params.ServerSideEncryptionKmsKeyID != "" { + opts = append(opts, s3a.WithServerSideEncryptionKmsKeyID(params.ServerSideEncryptionKmsKeyID)) + } + adapter, err := s3a.NewAdapter(ctx, params, opts...) + if err != nil { + return nil, err + } + log.With("type", "s3").Info("initialized blockstore adapter") + return adapter, nil +} + +func BuildGSClient(ctx context.Context, params params.GS) (*storage.Client, error) { + var opts []option.ClientOption + if params.CredentialsFile != "" { + opts = append(opts, option.WithCredentialsFile(params.CredentialsFile)) + } else if params.CredentialsJSON != "" { + cred, err := google.CredentialsFromJSON(ctx, []byte(params.CredentialsJSON), googleAuthCloudPlatform) + if err != nil { + return nil, err + } + opts = append(opts, option.WithCredentials(cred)) + } + return storage.NewClient(ctx, opts...) +} + +func buildGSAdapter(ctx context.Context, params params.GS) (*gs.Adapter, error) { + client, err := BuildGSClient(ctx, params) + if err != nil { + return nil, err + } + adapter := gs.NewAdapter(client, + gs.WithPreSignedExpiry(params.PreSignedExpiry), + gs.WithDisablePreSigned(params.DisablePreSigned), + gs.WithDisablePreSignedUI(params.DisablePreSignedUI), + ) + log.With("type", "gs").Info("initialized blockstore adapter") + return adapter, nil +} diff --git a/block/gs/adapter.go b/block/gs/adapter.go new file mode 100644 index 00000000..d19ebecc --- /dev/null +++ b/block/gs/adapter.go @@ -0,0 +1,584 @@ +package gs + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "cloud.google.com/go/storage" + logging "github.com/ipfs/go-log/v2" + "github.com/jiaozifs/jiaozifs/block" + "google.golang.org/api/iterator" +) + +var log = logging.Logger("gs") + +const ( + MaxMultipartObjects = 10000 + + delimiter = "/" + partSuffix = ".part_" + markerSuffix = ".multipart" +) + +var ( + ErrMismatchPartETag = errors.New("mismatch part ETag") + ErrMismatchPartName = errors.New("mismatch part name") + ErrMaxMultipartObjects = errors.New("maximum multipart object reached") + ErrPartListMismatch = errors.New("multipart part list mismatch") + ErrMissingTargetAttrs = errors.New("missing target attributes") +) + +type Adapter struct { + client *storage.Client + preSignedExpiry time.Duration + disablePreSigned bool + disablePreSignedUI bool +} + +func WithPreSignedExpiry(v time.Duration) func(a *Adapter) { + return func(a *Adapter) { + if v == 0 { + a.preSignedExpiry = block.DefaultPreSignExpiryDuration + } else { + a.preSignedExpiry = v + } + } +} + +func WithDisablePreSigned(b bool) func(a *Adapter) { + return func(a *Adapter) { + if b { + a.disablePreSigned = true + } + } +} + +func WithDisablePreSignedUI(b bool) func(a *Adapter) { + return func(a *Adapter) { + if b { + a.disablePreSignedUI = true + } + } +} + +func NewAdapter(client *storage.Client, opts ...func(adapter *Adapter)) *Adapter { + a := &Adapter{ + client: client, + preSignedExpiry: block.DefaultPreSignExpiryDuration, + } + for _, opt := range opts { + opt(a) + } + return a +} + +func (a *Adapter) newPreSignedTime() time.Time { + return time.Now().UTC().Add(a.preSignedExpiry) +} + +func (a *Adapter) Put(ctx context.Context, obj block.ObjectPointer, sizeBytes int64, reader io.Reader, _ block.PutOpts) error { + var err error + defer reportMetrics("Put", time.Now(), &sizeBytes, &err) + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + w := a.client.Bucket(bucket).Object(key).NewWriter(ctx) + _, err = io.Copy(w, reader) + if err != nil { + return fmt.Errorf("io.Copy: %w", err) + } + err = w.Close() + if err != nil { + return fmt.Errorf("writer.Close: %w", err) + } + return nil +} + +func (a *Adapter) Get(ctx context.Context, obj block.ObjectPointer, _ int64) (io.ReadCloser, error) { + var err error + defer reportMetrics("Get", time.Now(), nil, &err) + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + r, err := a.client.Bucket(bucket).Object(key).NewReader(ctx) + if isErrNotFound(err) { + return nil, block.ErrDataNotFound + } + if err != nil { + log.Errorf("failed to get object bucket %s key %s %v", bucket, key, err) + return nil, err + } + return r, nil +} + +func (a *Adapter) GetWalker(uri *url.URL) (block.Walker, error) { + if err := block.ValidateStorageType(uri, block.StorageTypeGS); err != nil { + return nil, err + } + return NewGCSWalker(a.client), nil +} + +func (a *Adapter) GetPreSignedURL(_ context.Context, obj block.ObjectPointer, mode block.PreSignMode) (string, time.Time, error) { + if a.disablePreSigned { + return "", time.Time{}, block.ErrOperationNotSupported + } + + var err error + defer reportMetrics("GetPreSignedURL", time.Now(), nil, &err) + + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return "", time.Time{}, err + } + method := http.MethodGet + if mode == block.PreSignModeWrite { + method = http.MethodPut + } + opts := &storage.SignedURLOptions{ + Scheme: storage.SigningSchemeV4, + Method: method, + Expires: a.newPreSignedTime(), + } + k, err := a.client.Bucket(bucket).SignedURL(key, opts) + if err != nil { + log.Errorf("error generating pre-signed URL %v", err) + return "", time.Time{}, err + } + // TODO(#6347): Report expiry. + return k, time.Time{}, nil +} + +func isErrNotFound(err error) bool { + return errors.Is(err, storage.ErrObjectNotExist) +} + +func (a *Adapter) Exists(ctx context.Context, obj block.ObjectPointer) (bool, error) { + var err error + defer reportMetrics("Exists", time.Now(), nil, &err) + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return false, err + } + _, err = a.client.Bucket(bucket).Object(key).Attrs(ctx) + if isErrNotFound(err) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil +} + +func (a *Adapter) GetRange(ctx context.Context, obj block.ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) { + var err error + defer reportMetrics("GetRange", time.Now(), nil, &err) + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + r, err := a.client.Bucket(bucket).Object(key).NewRangeReader(ctx, startPosition, endPosition-startPosition+1) + if isErrNotFound(err) { + return nil, block.ErrDataNotFound + } + if err != nil { + log.Errorf("failed to get object bucket %s key %s %v", bucket, key, err) + return nil, err + } + return r, nil +} + +func (a *Adapter) GetProperties(ctx context.Context, obj block.ObjectPointer) (block.Properties, error) { + var err error + defer reportMetrics("GetProperties", time.Now(), nil, &err) + var props block.Properties + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return props, err + } + _, err = a.client.Bucket(bucket).Object(key).Attrs(ctx) + if err != nil { + return props, err + } + return props, nil +} + +func (a *Adapter) Remove(ctx context.Context, obj block.ObjectPointer) error { + var err error + defer reportMetrics("Remove", time.Now(), nil, &err) + bucket, key, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + err = a.client.Bucket(bucket).Object(key).Delete(ctx) + if err != nil { + return fmt.Errorf("Object(%q).Delete: %w", key, err) + } + return nil +} + +func (a *Adapter) Copy(ctx context.Context, sourceObj, destinationObj block.ObjectPointer) error { + var err error + defer reportMetrics("Copy", time.Now(), nil, &err) + dstBucket, dstKey, err := a.extractParamsFromObj(destinationObj) + if err != nil { + return fmt.Errorf("resolve destination: %w", err) + } + srcBucket, srcKey, err := a.extractParamsFromObj(sourceObj) + if err != nil { + return fmt.Errorf("resolve source: %w", err) + } + destinationObjectHandle := a.client.Bucket(dstBucket).Object(dstKey) + sourceObjectHandle := a.client.Bucket(srcBucket).Object(srcKey) + _, err = destinationObjectHandle.CopierFrom(sourceObjectHandle).Run(ctx) + if err != nil { + return fmt.Errorf("copy: %w", err) + } + return nil +} + +func (a *Adapter) CreateMultiPartUpload(ctx context.Context, obj block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + var err error + defer reportMetrics("CreateMultiPartUpload", time.Now(), nil, &err) + bucket, uploadID, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + // we keep a marker file to identify multipart in progress + objName := formatMultipartMarkerFilename(uploadID) + o := a.client.Bucket(bucket).Object(objName) + w := o.NewWriter(ctx) + _, err = io.WriteString(w, uploadID) + if err != nil { + return nil, fmt.Errorf("io.WriteString: %w", err) + } + err = w.Close() + if err != nil { + return nil, fmt.Errorf("writer.Close: %w", err) + } + // log information + log.With( + "upload_id", uploadID, + "qualified_ns", bucket, + "qualified_key", uploadID, + "key", obj.Identifier, + ).Debug("created multipart upload") + return &block.CreateMultiPartUploadResponse{ + UploadID: uploadID, + }, nil +} + +func (a *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, sizeBytes int64, reader io.Reader, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadPart", time.Now(), &sizeBytes, &err) + bucket, _, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + objName := formatMultipartFilename(uploadID, partNumber) + o := a.client.Bucket(bucket).Object(objName) + w := o.NewWriter(ctx) + _, err = io.Copy(w, reader) + if err != nil { + return nil, fmt.Errorf("io.Copy: %w", err) + } + err = w.Close() + if err != nil { + return nil, fmt.Errorf("writer.Close: %w", err) + } + attrs, err := o.Attrs(ctx) + if err != nil { + return nil, fmt.Errorf("object.Attrs: %w", err) + } + return &block.UploadPartResponse{ + ETag: attrs.Etag, + }, nil +} + +func (a *Adapter) UploadCopyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadCopyPart", time.Now(), nil, &err) + bucket, _, err := a.extractParamsFromObj(destinationObj) + if err != nil { + return nil, err + } + objName := formatMultipartFilename(uploadID, partNumber) + o := a.client.Bucket(bucket).Object(objName) + + srcBucket, srcKey, err := a.extractParamsFromObj(sourceObj) + if err != nil { + return nil, fmt.Errorf("resolve source: %w", err) + } + sourceObjectHandle := a.client.Bucket(srcBucket).Object(srcKey) + + attrs, err := o.CopierFrom(sourceObjectHandle).Run(ctx) + if err != nil { + return nil, fmt.Errorf("CopierFrom: %w", err) + } + return &block.UploadPartResponse{ + ETag: attrs.Etag, + }, nil +} + +func (a *Adapter) UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadCopyPartRange", time.Now(), nil, &err) + bucket, _, err := a.extractParamsFromObj(destinationObj) + if err != nil { + return nil, err + } + objName := formatMultipartFilename(uploadID, partNumber) + o := a.client.Bucket(bucket).Object(objName) + + reader, err := a.GetRange(ctx, sourceObj, startPosition, endPosition) + if err != nil { + return nil, fmt.Errorf("GetRange: %w", err) + } + w := o.NewWriter(ctx) + _, err = io.Copy(w, reader) + if err != nil { + return nil, fmt.Errorf("copy: %w", err) + } + err = w.Close() + if err != nil { + _ = reader.Close() + return nil, fmt.Errorf("WriterClose: %w", err) + } + err = reader.Close() + if err != nil { + return nil, fmt.Errorf("ReaderClose: %w", err) + } + + attrs, err := o.Attrs(ctx) + if err != nil { + return nil, fmt.Errorf("object.Attrs: %w", err) + } + return &block.UploadPartResponse{ + ETag: attrs.Etag, + }, nil +} + +func (a *Adapter) AbortMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string) error { + var err error + defer reportMetrics("AbortMultiPartUpload", time.Now(), nil, &err) + bucketName, _, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + bucket := a.client.Bucket(bucketName) + + // delete all related files by listing the prefix + it := bucket.Objects(ctx, &storage.Query{ + Prefix: uploadID, + Delimiter: delimiter, + }) + for { + attrs, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + return fmt.Errorf("bucket(%s).Objects(): %w", bucketName, err) + } + if err := bucket.Object(attrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("bucket(%s).object(%s).Delete(): %w", bucketName, attrs.Name, err) + } + } + return nil +} + +func (a *Adapter) CompleteMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string, multipartList *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + var err error + defer reportMetrics("CompleteMultiPartUpload", time.Now(), nil, &err) + bucketName, key, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + + lg := log.With( + "upload_id", uploadID, + "qualified_ns", bucketName, + "qualified_key", key, + "key", obj.Identifier, + ) + + // list bucket parts and validate request match + bucketParts, err := a.listMultipartUploadParts(ctx, bucketName, uploadID) + if err != nil { + return nil, err + } + // validate bucketParts match the request multipartList + err = a.validateMultipartUploadParts(uploadID, multipartList, bucketParts) + if err != nil { + return nil, err + } + + // prepare names + parts := make([]string, len(bucketParts)) + for i, part := range bucketParts { + parts[i] = part.Name + } + + // compose target object + targetAttrs, err := a.composeMultipartUploadParts(ctx, bucketName, uploadID, parts) + if err != nil { + lg.Errorf("CompleteMultipartUpload failed %v", err) + return nil, err + } + + // delete marker + bucket := a.client.Bucket(bucketName) + objMarker := bucket.Object(formatMultipartMarkerFilename(uploadID)) + if err := objMarker.Delete(ctx); err != nil { + lg.Warnf("Failed to delete multipart upload marker %v", err) + } + lg.Debug("completed multipart upload") + return &block.CompleteMultiPartUploadResponse{ + ETag: targetAttrs.Etag, + ContentLength: targetAttrs.Size, + }, nil +} + +func (a *Adapter) validateMultipartUploadParts(uploadID string, multipartList *block.MultipartUploadCompletion, bucketParts []*storage.ObjectAttrs) error { + if len(multipartList.Part) != len(bucketParts) { + return ErrPartListMismatch + } + for i, p := range multipartList.Part { + objName := formatMultipartFilename(uploadID, p.PartNumber) + if objName != bucketParts[i].Name { + return fmt.Errorf("invalid part at position %d: %w", i, ErrMismatchPartName) + } + if p.ETag != bucketParts[i].Etag { + return fmt.Errorf("invalid part at position %d: %w", i, ErrMismatchPartETag) + } + } + return nil +} + +func (a *Adapter) listMultipartUploadParts(ctx context.Context, bucketName string, uploadID string) ([]*storage.ObjectAttrs, error) { + bucket := a.client.Bucket(bucketName) + var bucketParts []*storage.ObjectAttrs + it := bucket.Objects(ctx, &storage.Query{ + Delimiter: delimiter, + Prefix: uploadID + partSuffix, + }) + for { + attrs, err := it.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + return nil, fmt.Errorf("listing bucket '%s' upload '%s': %w", bucketName, uploadID, err) + } + bucketParts = append(bucketParts, attrs) + if len(bucketParts) > MaxMultipartObjects { + return nil, fmt.Errorf("listing bucket '%s' upload '%s': %w", bucketName, uploadID, ErrMaxMultipartObjects) + } + } + // sort by name - assume natual sort order + sort.Slice(bucketParts, func(i, j int) bool { + return bucketParts[i].Name < bucketParts[j].Name + }) + return bucketParts, nil +} + +func (a *Adapter) composeMultipartUploadParts(ctx context.Context, bucketName string, uploadID string, parts []string) (*storage.ObjectAttrs, error) { + // compose target from all parts + bucket := a.client.Bucket(bucketName) + var targetAttrs *storage.ObjectAttrs + err := ComposeAll(uploadID, parts, func(target string, parts []string) error { + objs := make([]*storage.ObjectHandle, len(parts)) + for i := range parts { + objs[i] = bucket.Object(parts[i]) + } + // compose target from parts + attrs, err := bucket.Object(target).ComposerFrom(objs...).Run(ctx) + if err != nil { + return err + } + if target == uploadID { + targetAttrs = attrs + } + // delete parts + for _, o := range objs { + if err := o.Delete(ctx); err != nil { + log.With( + "bucket", bucketName, + "parts", parts, + ).Warnf("Failed to delete multipart upload part while compose %v", err) + } + } + return nil + }) + if err == nil && targetAttrs == nil { + return nil, ErrMissingTargetAttrs + } + if err != nil { + return nil, err + } + return targetAttrs, nil +} + +func (a *Adapter) Close() error { + return a.client.Close() +} + +func (a *Adapter) BlockstoreType() string { + return block.BlockstoreTypeGS +} + +func (a *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeGS) + if a.disablePreSigned { + info.PreSignSupport = false + } + if !(a.disablePreSignedUI || a.disablePreSigned) { + info.PreSignSupportUI = true + } + return info +} + +func (a *Adapter) extractParamsFromObj(obj block.ObjectPointer) (string, string, error) { + qk, err := a.ResolveNamespace(obj.StorageNamespace, obj.Identifier, obj.IdentifierType) + if err != nil { + return "", "", err + } + bucket, prefix, _ := strings.Cut(qk.GetStorageNamespace(), "/") + key := qk.GetKey() + if len(prefix) > 0 { // Avoid situations where prefix is empty or "/" + key = prefix + "/" + key + } + return bucket, key, nil +} + +func (a *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + qualifiedKey, err := block.DefaultResolveNamespace(storageNamespace, key, identifierType) + if err != nil { + return qualifiedKey, err + } + if qualifiedKey.GetStorageType() != block.StorageTypeGS { + return qualifiedKey, fmt.Errorf("expected storage type gs: %w", block.ErrInvalidAddress) + } + return qualifiedKey, nil +} + +func (a *Adapter) RuntimeStats() map[string]string { + return nil +} + +func formatMultipartFilename(uploadID string, partNumber int) string { + // keep natural sort order with zero padding + return fmt.Sprintf("%s"+partSuffix+"%05d", uploadID, partNumber) +} + +func formatMultipartMarkerFilename(uploadID string) string { + return uploadID + markerSuffix +} diff --git a/block/gs/adapter_test.go b/block/gs/adapter_test.go new file mode 100644 index 00000000..a73ac35c --- /dev/null +++ b/block/gs/adapter_test.go @@ -0,0 +1,78 @@ +package gs_test + +import ( + "net/url" + "regexp" + "testing" + + "github.com/jiaozifs/jiaozifs/block/blocktest" + "github.com/jiaozifs/jiaozifs/block/gs" + "github.com/stretchr/testify/require" +) + +func newAdapter() *gs.Adapter { + return gs.NewAdapter(client) +} + +func TestAdapter(t *testing.T) { + basePath, err := url.JoinPath("gs://", bucketName) + require.NoError(t, err) + localPath, err := url.JoinPath(basePath, "lakefs") + require.NoError(t, err) + externalPath, err := url.JoinPath(basePath, "external") + require.NoError(t, err) + + adapter := newAdapter() + defer func() { + require.NoError(t, adapter.Close()) + }() + + blocktest.AdapterTest(t, adapter, localPath, externalPath) +} + +func TestAdapterNamespace(t *testing.T) { + adapter := newAdapter() + defer func() { + require.NoError(t, adapter.Close()) + }() + + expr, err := regexp.Compile(adapter.GetStorageNamespaceInfo().ValidityRegex) + require.NoError(t, err) + + tests := []struct { + Name string + Namespace string + Success bool + }{ + { + Name: "valid_path", + Namespace: "gs://bucket/path/to/repo1", + Success: true, + }, + { + Name: "double_slash", + Namespace: "gs://bucket/path//to/repo1", + Success: true, + }, + { + Name: "invalid_schema", + Namespace: "s3:/test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_path", + Namespace: "https://test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_string", + Namespace: "this is a bad string", + Success: false, + }, + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + require.Equal(t, tt.Success, expr.MatchString(tt.Namespace)) + }) + } +} diff --git a/block/gs/compose.go b/block/gs/compose.go new file mode 100644 index 00000000..33d6d744 --- /dev/null +++ b/block/gs/compose.go @@ -0,0 +1,33 @@ +package gs + +import ( + "fmt" +) + +const MaxPartsInCompose = 32 + +type ComposeFunc func(target string, parts []string) error + +func ComposeAll(target string, parts []string, composeFunc ComposeFunc) error { + for layer := 1; len(parts) > MaxPartsInCompose; layer++ { + var nextParts []string + for i := 0; i < len(parts); i += MaxPartsInCompose { + chunkSize := len(parts) - i + if chunkSize > MaxPartsInCompose { + chunkSize = MaxPartsInCompose + } + chunk := parts[i : i+chunkSize] + if chunkSize == 1 || (chunkSize < MaxPartsInCompose && len(nextParts)+chunkSize <= MaxPartsInCompose) { + nextParts = append(nextParts, chunk...) + } else { + targetName := fmt.Sprintf("%s_%d", chunk[0], layer) + if err := composeFunc(targetName, chunk); err != nil { + return err + } + nextParts = append(nextParts, targetName) + } + } + parts = nextParts + } + return composeFunc(target, parts) +} diff --git a/block/gs/compose_test.go b/block/gs/compose_test.go new file mode 100644 index 00000000..ebb816f7 --- /dev/null +++ b/block/gs/compose_test.go @@ -0,0 +1,51 @@ +package gs + +import ( + "fmt" + "strconv" + "testing" +) + +func TestComposeAll(t *testing.T) { + const targetFile = "data.file" + numberOfPartsTests := []int{1, 10, 10000} + for _, numberOfParts := range numberOfPartsTests { + t.Run("compose_"+strconv.Itoa(numberOfParts), func(t *testing.T) { + // prepare data + parts := make([]string, numberOfParts) + for i := 0; i < numberOfParts; i++ { + parts[i] = fmt.Sprintf("part%d", i) + } + + // map to track + usedParts := make(map[string]struct{}) + usedTargets := make(map[string]struct{}) + + // compose + err := ComposeAll(targetFile, parts, func(target string, parts []string) error { + for _, part := range parts { + if _, found := usedParts[part]; found { + t.Errorf("Part '%s' already composed", part) + } + usedParts[part] = struct{}{} + } + if _, found := usedTargets[target]; found { + t.Errorf("Target '%s' already composed with %s", target, parts) + } + usedTargets[target] = struct{}{} + return nil + }) + if err != nil { + t.Fatal(err) + } + for _, part := range parts { + if _, ok := usedParts[part]; !ok { + t.Error("Missing part:", part) + } + } + if _, ok := usedTargets[targetFile]; !ok { + t.Error("Missing target:", targetFile) + } + }) + } +} diff --git a/block/gs/main_test.go b/block/gs/main_test.go new file mode 100644 index 00000000..7242b1d3 --- /dev/null +++ b/block/gs/main_test.go @@ -0,0 +1,85 @@ +package gs_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "cloud.google.com/go/storage" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "google.golang.org/api/option" +) + +const bucketName = "bucket1" + +var client *storage.Client + +func TestMain(m *testing.M) { + const ( + emulatorContainerTimeoutSeconds = 10 * 60 // 10 min + emulatorTestEndpoint = "127.0.0.1" + emulatorTestPort = "4443" + gcsProjectID = "testProject" + ) + + ctx := context.Background() + // External port required for '-public-host' configuration in docker cmd + endpoint := fmt.Sprintf("%s:%s", emulatorTestEndpoint, emulatorTestPort) + pool, err := dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to Docker: %s", err) + } + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "fsouza/fake-gcs-server", + Tag: "1.47.4", + Cmd: []string{ + "-scheme", + "http", + "-backend", + "memory", + "-public-host", + endpoint, + }, + ExposedPorts: []string{emulatorTestPort}, + PortBindings: map[docker.Port][]docker.PortBinding{ + docker.Port(fmt.Sprintf("%s/tcp", emulatorTestPort)): { + {HostIP: emulatorTestPort, HostPort: fmt.Sprintf("%s/tcp", emulatorTestPort)}, + }, + }, + }) + if err != nil { + log.Fatalf("Could not start fake-gcs-server: %s", err) + } + + // set cleanup + closer := func() { + err = pool.Purge(resource) + if err != nil { + log.Fatalf("Could not purge fake-gcs-server: %s", err) + } + } + + // expire, just to make sure + err = resource.Expire(emulatorContainerTimeoutSeconds) + if err != nil { + log.Fatalf("Could not expire fake-gcs-server: %s", err) + } + + // Create the test client and bucket + blockURL := fmt.Sprintf("http://%s/storage/v1/", endpoint) + client, err = storage.NewClient(ctx, option.WithEndpoint(blockURL), option.WithoutAuthentication()) + if err != nil { + log.Fatalf("Could not create gs client: %s", err) + } + + if err := client.Bucket(bucketName).Create(ctx, gcsProjectID, nil); err != nil { + log.Fatalf("Could not create bucket '%s': %s", bucketName, err) + } + + code := m.Run() + closer() + os.Exit(code) +} diff --git a/block/gs/stats.go b/block/gs/stats.go new file mode 100644 index 00000000..e6bb4080 --- /dev/null +++ b/block/gs/stats.go @@ -0,0 +1,31 @@ +package gs + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var durationHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "gs_operation_duration_seconds", + Help: "durations of outgoing gs operations", + }, + []string{"operation", "error"}) + +var requestSizeHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "gs_operation_size_bytes", + Help: "handled sizes of outgoing gs operations", + Buckets: prometheus.ExponentialBuckets(1, 10, 10), //nolint: gomnd + }, []string{"operation", "error"}) + +func reportMetrics(operation string, start time.Time, sizeBytes *int64, err *error) { + isErrStr := strconv.FormatBool(*err != nil) + durationHistograms.WithLabelValues(operation, isErrStr).Observe(time.Since(start).Seconds()) + if sizeBytes != nil { + requestSizeHistograms.WithLabelValues(operation, isErrStr).Observe(float64(*sizeBytes)) + } +} diff --git a/block/gs/walker.go b/block/gs/walker.go new file mode 100644 index 00000000..9be907e4 --- /dev/null +++ b/block/gs/walker.go @@ -0,0 +1,81 @@ +package gs + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "net/url" + "strings" + + "cloud.google.com/go/storage" + "github.com/jiaozifs/jiaozifs/block" + "google.golang.org/api/iterator" +) + +type GCSWalker struct { + client *storage.Client + mark block.Mark +} + +func NewGCSWalker(client *storage.Client) *GCSWalker { + return &GCSWalker{client: client} +} + +func (w *GCSWalker) Walk(ctx context.Context, storageURI *url.URL, op block.WalkOptions, walkFn func(e block.ObjectStoreEntry) error) error { + prefix := strings.TrimLeft(storageURI.Path, "/") + var basePath string + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + basePath = prefix[:idx+1] + } + iter := w.client. + Bucket(storageURI.Host). + Objects(ctx, &storage.Query{ + Prefix: prefix, + StartOffset: op.After, + }) + + for { + attrs, err := iter.Next() + if errors.Is(err, iterator.Done) { + break + } + if err != nil { + return fmt.Errorf("error listing objects at storage uri %s: %w", storageURI, err) + } + + // skipping first key (without forgetting the possible empty string key!) + if op.After != "" && attrs.Name <= op.After { + continue + } + + w.mark = block.Mark{ + LastKey: attrs.Name, + HasMore: true, + } + if err := walkFn(block.ObjectStoreEntry{ + FullKey: attrs.Name, + RelativeKey: strings.TrimPrefix(attrs.Name, basePath), + Address: fmt.Sprintf("gs://%s/%s", attrs.Bucket, attrs.Name), + ETag: hex.EncodeToString(attrs.MD5), + Mtime: attrs.Updated, + Size: attrs.Size, + }); err != nil { + return err + } + } + w.mark = block.Mark{ + LastKey: "", + HasMore: false, + } + + return nil +} + +func (w *GCSWalker) Marker() block.Mark { + return w.mark +} + +func (w *GCSWalker) GetSkippedEntries() []block.ObjectStoreEntry { + return nil +} diff --git a/block/hashing_reader.go b/block/hashing_reader.go new file mode 100644 index 00000000..8907d8d2 --- /dev/null +++ b/block/hashing_reader.go @@ -0,0 +1,57 @@ +package block + +import ( + "crypto/md5" //nolint:gosec + "crypto/sha256" + "hash" + "io" + "strconv" +) + +const ( + HashFunctionMD5 = iota + HashFunctionSHA256 +) + +type HashingReader struct { + Md5 hash.Hash + Sha256 hash.Hash + originalReader io.Reader + CopiedSize int64 +} + +func (s *HashingReader) Read(p []byte) (int, error) { + nb, err := s.originalReader.Read(p) + s.CopiedSize += int64(nb) + if s.Md5 != nil { + if _, err2 := s.Md5.Write(p[0:nb]); err2 != nil { + return nb, err2 + } + } + if s.Sha256 != nil { + if _, err2 := s.Sha256.Write(p[0:nb]); err2 != nil { + return nb, err2 + } + } + return nb, err +} + +func NewHashingReader(body io.Reader, hashTypes ...int) *HashingReader { + s := new(HashingReader) + s.originalReader = body + for hashType := range hashTypes { + switch hashType { + case HashFunctionMD5: + if s.Md5 == nil { + s.Md5 = md5.New() //nolint:gosec + } + case HashFunctionSHA256: + if s.Sha256 == nil { + s.Sha256 = sha256.New() + } + default: + panic("wrong hash type number " + strconv.Itoa(hashType)) + } + } + return s +} diff --git a/block/local/adapter.go b/block/local/adapter.go new file mode 100644 index 00000000..9d02c90c --- /dev/null +++ b/block/local/adapter.go @@ -0,0 +1,583 @@ +package local + +import ( + "context" + "crypto/md5" //nolint:gosec + "encoding/hex" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strconv" + "strings" + "time" + + "github.com/google/uuid" + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/params" + "golang.org/x/exp/slices" +) + +const DefaultNamespacePrefix = block.BlockstoreTypeLocal + "://" + +type Adapter struct { + path string + removeEmptyDir bool + allowedExternalPrefixes []string + importEnabled bool +} + +var ( + ErrPathNotWritable = errors.New("path provided is not writable") + ErrInvalidUploadIDFormat = errors.New("invalid upload id format") + ErrBadPath = errors.New("bad path traversal blocked") +) + +type QualifiedKey struct { + block.CommonQualifiedKey + path string +} + +func (qk QualifiedKey) Format() string { + p := path.Join(qk.path, qk.GetStorageNamespace(), qk.GetKey()) + return qk.GetStorageType().Scheme() + "://" + p +} + +func (qk QualifiedKey) GetStorageType() block.StorageType { + return qk.CommonQualifiedKey.GetStorageType() +} + +func (qk QualifiedKey) GetStorageNamespace() string { + return qk.CommonQualifiedKey.GetStorageNamespace() +} + +func (qk QualifiedKey) GetKey() string { + return qk.CommonQualifiedKey.GetKey() +} + +func WithAllowedExternalPrefixes(prefixes []string) func(a *Adapter) { + return func(a *Adapter) { + a.allowedExternalPrefixes = prefixes + } +} + +func WithImportEnabled(b bool) func(a *Adapter) { + return func(a *Adapter) { + a.importEnabled = b + } +} + +func WithRemoveEmptyDir(b bool) func(a *Adapter) { + return func(a *Adapter) { + a.removeEmptyDir = b + } +} + +func NewAdapter(path string, opts ...func(a *Adapter)) (*Adapter, error) { + // Clean() the path so that misconfiguration does not allow path traversal. + path = filepath.Clean(path) + err := os.MkdirAll(path, 0o700) //nolint: gomnd + if err != nil { + return nil, err + } + if !isDirectoryWritable(path) { + return nil, ErrPathNotWritable + } + localAdapter := &Adapter{ + path: path, + removeEmptyDir: true, + } + for _, opt := range opts { + opt(localAdapter) + } + return localAdapter, nil +} + +func (l *Adapter) GetPreSignedURL(_ context.Context, _ block.ObjectPointer, _ block.PreSignMode) (string, time.Time, error) { + return "", time.Time{}, fmt.Errorf("local adapter presigned URL: %w", block.ErrOperationNotSupported) +} + +// verifyRelPath ensures that p is under the directory controlled by this adapter. It does not +// examine the filesystem and can mistakenly error out when symbolic links are involved. +func (l *Adapter) verifyRelPath(p string) error { + if !strings.HasPrefix(filepath.Clean(p), l.path) { + return fmt.Errorf("%s: %w", p, ErrBadPath) + } + return nil +} + +func (l *Adapter) extractParamsFromObj(ptr block.ObjectPointer) (string, error) { + if strings.HasPrefix(ptr.Identifier, DefaultNamespacePrefix) { + // check abs path + p := ptr.Identifier[len(DefaultNamespacePrefix):] + if err := VerifyAbsPath(p, l.path, l.allowedExternalPrefixes); err != nil { + return "", err + } + return p, nil + } + // relative path + if !strings.HasPrefix(ptr.StorageNamespace, DefaultNamespacePrefix) { + return "", fmt.Errorf("%w: storage namespace", ErrBadPath) + } + p := path.Join(l.path, ptr.StorageNamespace[len(DefaultNamespacePrefix):], ptr.Identifier) + if err := l.verifyRelPath(p); err != nil { + return "", err + } + return p, nil +} + +// maybeMkdir verifies path is allowed and runs f(path), but if f fails due to file-not-found +// MkdirAll's its dir and then runs it again. +func (l *Adapter) maybeMkdir(path string, f func(p string) (*os.File, error)) (*os.File, error) { + if err := l.verifyRelPath(path); err != nil { + return nil, err + } + ret, err := f(path) + if !errors.Is(err, os.ErrNotExist) { + return ret, err + } + d := filepath.Dir(filepath.Clean(path)) + if err = os.MkdirAll(d, 0o750); err != nil { //nolint: gomnd + return nil, err + } + return f(path) +} + +func (l *Adapter) Path() string { + return l.path +} + +func (l *Adapter) Put(_ context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, _ block.PutOpts) error { + p, err := l.extractParamsFromObj(obj) + if err != nil { + return err + } + p = filepath.Clean(p) + f, err := l.maybeMkdir(p, os.Create) + if err != nil { + return err + } + defer func() { + _ = f.Close() + }() + _, err = io.Copy(f, reader) + return err +} + +func (l *Adapter) Remove(_ context.Context, obj block.ObjectPointer) error { + p, err := l.extractParamsFromObj(obj) + if err != nil { + return err + } + p = filepath.Clean(p) + err = os.Remove(p) + if err != nil { + return err + } + if l.removeEmptyDir { + dir := filepath.Dir(p) + repoRoot := obj.StorageNamespace[len(DefaultNamespacePrefix):] + removeEmptyDirUntil(dir, path.Join(l.path, repoRoot)) + } + return nil +} + +func removeEmptyDirUntil(dir string, stopAt string) { + if stopAt == "" { + return + } + if !strings.HasSuffix(stopAt, "/") { + stopAt += "/" + } + for strings.HasPrefix(dir, stopAt) && dir != stopAt { + err := os.Remove(dir) + if err != nil { + break + } + dir = filepath.Dir(dir) + if dir == "/" { + break + } + } +} + +func (l *Adapter) Copy(_ context.Context, sourceObj, destinationObj block.ObjectPointer) error { + source, err := l.extractParamsFromObj(sourceObj) + if err != nil { + return err + } + sourceFile, err := os.Open(filepath.Clean(source)) + defer func() { + _ = sourceFile.Close() + }() + if err != nil { + return err + } + dest, err := l.extractParamsFromObj(destinationObj) + if err != nil { + return err + } + destinationFile, err := l.maybeMkdir(dest, os.Create) + if err != nil { + return err + } + defer func() { + _ = destinationFile.Close() + }() + _, err = io.Copy(destinationFile, sourceFile) + return err +} + +func (l *Adapter) UploadCopyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + if err := isValidUploadID(uploadID); err != nil { + return nil, err + } + r, err := l.Get(ctx, sourceObj, 0) + if err != nil { + return nil, err + } + md5Read := block.NewHashingReader(r, block.HashFunctionMD5) + fName := uploadID + fmt.Sprintf("-%05d", partNumber) + err = l.Put(ctx, block.ObjectPointer{StorageNamespace: destinationObj.StorageNamespace, Identifier: fName}, -1, md5Read, block.PutOpts{}) + if err != nil { + return nil, err + } + etag := hex.EncodeToString(md5Read.Md5.Sum(nil)) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (l *Adapter) UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + if err := isValidUploadID(uploadID); err != nil { + return nil, err + } + r, err := l.GetRange(ctx, sourceObj, startPosition, endPosition) + if err != nil { + return nil, err + } + md5Read := block.NewHashingReader(r, block.HashFunctionMD5) + fName := uploadID + fmt.Sprintf("-%05d", partNumber) + err = l.Put(ctx, block.ObjectPointer{StorageNamespace: destinationObj.StorageNamespace, Identifier: fName}, -1, md5Read, block.PutOpts{}) + if err != nil { + return nil, err + } + etag := hex.EncodeToString(md5Read.Md5.Sum(nil)) + return &block.UploadPartResponse{ + ETag: etag, + }, err +} + +func (l *Adapter) Get(_ context.Context, obj block.ObjectPointer, _ int64) (reader io.ReadCloser, err error) { + p, err := l.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + f, err := os.OpenFile(filepath.Clean(p), os.O_RDONLY, 0o600) //nolint: gomnd + if os.IsNotExist(err) { + return nil, block.ErrDataNotFound + } + if err != nil { + return nil, err + } + return f, nil +} + +func (l *Adapter) GetWalker(uri *url.URL) (block.Walker, error) { + if err := block.ValidateStorageType(uri, block.StorageTypeLocal); err != nil { + return nil, err + } + + err := VerifyAbsPath(uri.Path, l.path, l.allowedExternalPrefixes) + if err != nil { + return nil, err + } + return NewLocalWalker(params.Local{ + Path: l.path, + ImportEnabled: l.importEnabled, + AllowedExternalPrefixes: l.allowedExternalPrefixes, + }), nil +} + +func (l *Adapter) Exists(_ context.Context, obj block.ObjectPointer) (bool, error) { + p, err := l.extractParamsFromObj(obj) + if err != nil { + return false, err + } + _, err = os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} + +func (l *Adapter) GetRange(_ context.Context, obj block.ObjectPointer, start int64, end int64) (io.ReadCloser, error) { + if start < 0 || end < start { + return nil, block.ErrBadIndex + } + p, err := l.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + f, err := os.Open(filepath.Clean(p)) + if err != nil { + if os.IsNotExist(err) { + return nil, block.ErrDataNotFound + } + return nil, err + } + return &struct { + io.Reader + io.Closer + }{ + Reader: io.NewSectionReader(f, start, end-start+1), + Closer: f, + }, nil +} + +func (l *Adapter) GetProperties(_ context.Context, obj block.ObjectPointer) (block.Properties, error) { + p, err := l.extractParamsFromObj(obj) + if err != nil { + return block.Properties{}, err + } + _, err = os.Stat(p) + if err != nil { + return block.Properties{}, err + } + // No properties, just return that it exists + return block.Properties{}, nil +} + +// isDirectoryWritable tests that pth, which must not be controllable by user input, is a +// writable directory. As there is no simple way to test this in windows, I prefer the "brute +// force" method of creating s dummy file. Will work in any OS. speed is not an issue, as +// this will be activated very few times during startup. +func isDirectoryWritable(pth string) bool { + f, err := os.CreateTemp(pth, "dummy") + if err != nil { + return false + } + _ = f.Close() + _ = os.Remove(f.Name()) + return true +} + +func (l *Adapter) CreateMultiPartUpload(_ context.Context, obj block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + if strings.Contains(obj.Identifier, "/") { + fullPath, err := l.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + fullDir := path.Dir(fullPath) + err = os.MkdirAll(fullDir, 0o750) //nolint: gomnd + if err != nil { + return nil, err + } + } + uidBytes := uuid.New() + uploadID := hex.EncodeToString(uidBytes[:]) + return &block.CreateMultiPartUploadResponse{ + UploadID: uploadID, + }, nil +} + +func (l *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + if err := isValidUploadID(uploadID); err != nil { + return nil, err + } + md5Read := block.NewHashingReader(reader, block.HashFunctionMD5) + fName := uploadID + fmt.Sprintf("-%05d", partNumber) + err := l.Put(ctx, block.ObjectPointer{StorageNamespace: obj.StorageNamespace, Identifier: fName}, -1, md5Read, block.PutOpts{}) + etag := hex.EncodeToString(md5Read.Md5.Sum(nil)) + return &block.UploadPartResponse{ + ETag: etag, + }, err +} + +func (l *Adapter) AbortMultiPartUpload(_ context.Context, obj block.ObjectPointer, uploadID string) error { + if err := isValidUploadID(uploadID); err != nil { + return err + } + files, err := l.getPartFiles(uploadID, obj) + if err != nil { + return err + } + if err = l.removePartFiles(files); err != nil { + return err + } + return nil +} + +func (l *Adapter) CompleteMultiPartUpload(_ context.Context, obj block.ObjectPointer, uploadID string, multipartList *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + if err := isValidUploadID(uploadID); err != nil { + return nil, err + } + etag := computeETag(multipartList.Part) + "-" + strconv.Itoa(len(multipartList.Part)) + partFiles, err := l.getPartFiles(uploadID, obj) + if err != nil { + return nil, fmt.Errorf("part files not found for %s: %w", uploadID, err) + } + size, err := l.unitePartFiles(obj, partFiles) + if err != nil { + return nil, fmt.Errorf("multipart upload unite for %s: %w", uploadID, err) + } + if err = l.removePartFiles(partFiles); err != nil { + return nil, err + } + return &block.CompleteMultiPartUploadResponse{ + ETag: etag, + ContentLength: size, + }, nil +} + +func computeETag(parts []block.MultipartPart) string { + var etagHex []string + for _, p := range parts { + e := strings.Trim(p.ETag, `"`) + etagHex = append(etagHex, e) + } + s := strings.Join(etagHex, "") + b, _ := hex.DecodeString(s) + md5res := md5.Sum(b) //nolint:gosec + csm := hex.EncodeToString(md5res[:]) + return csm +} + +func (l *Adapter) unitePartFiles(identifier block.ObjectPointer, filenames []string) (int64, error) { + p, err := l.extractParamsFromObj(identifier) + if err != nil { + return 0, err + } + unitedFile, err := os.Create(p) + if err != nil { + return 0, fmt.Errorf("create path %s: %w", p, err) + } + files := make([]*os.File, 0, len(filenames)) + defer func() { + _ = unitedFile.Close() + for _, f := range files { + _ = f.Close() + } + }() + for _, name := range filenames { + if err := l.verifyRelPath(name); err != nil { + return 0, err + } + f, err := os.Open(filepath.Clean(name)) + if err != nil { + return 0, fmt.Errorf("open file %s: %w", name, err) + } + files = append(files, f) + } + // convert slice file files to readers + readers := make([]io.Reader, len(files)) + for i := range files { + readers[i] = files[i] + } + unitedReader := io.MultiReader(readers...) + return io.Copy(unitedFile, unitedReader) +} + +func (l *Adapter) removePartFiles(files []string) error { + var firstErr error + for _, name := range files { + if err := l.verifyRelPath(name); err != nil { + if firstErr == nil { + firstErr = err + } + } + // If removal fails prefer to skip the error: "only" wasted space. + _ = os.Remove(name) + } + return firstErr +} + +func (l *Adapter) getPartFiles(uploadID string, obj block.ObjectPointer) ([]string, error) { + newObj := block.ObjectPointer{ + StorageNamespace: obj.StorageNamespace, + Identifier: uploadID, + } + globPathPattern, err := l.extractParamsFromObj(newObj) + if err != nil { + return nil, err + } + globPathPattern += "*" + names, err := filepath.Glob(globPathPattern) + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +func (l *Adapter) BlockstoreType() string { + return block.BlockstoreTypeLocal +} + +func (l *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeLocal) + info.PreSignSupport = false + info.DefaultNamespacePrefix = DefaultNamespacePrefix + info.ImportSupport = l.importEnabled + return info +} + +func (l *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + qk, err := block.DefaultResolveNamespace(storageNamespace, key, identifierType) + if err != nil { + return nil, err + } + + // Check if path allowed and return error if path is not allowed + _, err = l.extractParamsFromObj(block.ObjectPointer{ + StorageNamespace: storageNamespace, + Identifier: key, + IdentifierType: identifierType, + }) + if err != nil { + return nil, err + } + + return QualifiedKey{ + CommonQualifiedKey: qk, + path: l.path, + }, nil +} + +func (l *Adapter) RuntimeStats() map[string]string { + return nil +} + +func VerifyAbsPath(absPath, adapterPath string, allowedPrefixes []string) error { + // check we have a valid abs path + if !path.IsAbs(absPath) || path.Clean(absPath) != absPath { + return ErrBadPath + } + // point to storage namespace + if strings.HasPrefix(absPath, adapterPath) { + return nil + } + // allowed places + if !slices.ContainsFunc(allowedPrefixes, func(prefix string) bool { + return strings.HasPrefix(absPath, prefix) + }) { + return block.ErrForbidden + } + return nil +} + +func isValidUploadID(uploadID string) error { + _, err := hex.DecodeString(uploadID) + if err != nil { + return fmt.Errorf("%w: %s", ErrInvalidUploadIDFormat, err) + } + return nil +} diff --git a/block/local/adapter_test.go b/block/local/adapter_test.go new file mode 100644 index 00000000..a193ddcc --- /dev/null +++ b/block/local/adapter_test.go @@ -0,0 +1,66 @@ +package local_test + +import ( + "path" + "regexp" + "testing" + + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/blocktest" + "github.com/jiaozifs/jiaozifs/block/local" + "github.com/stretchr/testify/require" +) + +const testStorageNamespace = "local://test" + +func TestLocalAdapter(t *testing.T) { + tmpDir := t.TempDir() + localPath := path.Join(tmpDir, "lakefs") + externalPath := block.BlockstoreTypeLocal + "://" + path.Join(tmpDir, "lakefs", "external") + adapter, err := local.NewAdapter(localPath, local.WithRemoveEmptyDir(false)) + if err != nil { + t.Fatal("Failed to create new adapter", err) + } + blocktest.AdapterTest(t, adapter, testStorageNamespace, externalPath) +} + +func TestAdapterNamespace(t *testing.T) { + tmpDir := t.TempDir() + localPath := path.Join(tmpDir, "lakefs") + adapter, err := local.NewAdapter(localPath, local.WithRemoveEmptyDir(false)) + require.NoError(t, err, "create new adapter") + expr, err := regexp.Compile(adapter.GetStorageNamespaceInfo().ValidityRegex) + require.NoError(t, err) + + tests := []struct { + Name string + Namespace string + Success bool + }{ + { + Name: "valid_path", + Namespace: "local://test/path/to/repo1", + Success: true, + }, + { + Name: "invalid_path", + Namespace: "~/test/path/to/repo1", + Success: false, + }, + { + Name: "s3", + Namespace: "s3://test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_string", + Namespace: "this is a bad string", + Success: false, + }, + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + require.Equal(t, tt.Success, expr.MatchString(tt.Namespace)) + }) + } +} diff --git a/block/local/etag_test.go b/block/local/etag_test.go new file mode 100644 index 00000000..84612d2e --- /dev/null +++ b/block/local/etag_test.go @@ -0,0 +1,27 @@ +package local + +import ( + "encoding/hex" + "testing" + + "github.com/jiaozifs/jiaozifs/block" +) + +const PartsNo = 30 + +func TestEtag(t *testing.T) { + var base [16]byte + b := base[:] + parts := make([]block.MultipartPart, PartsNo) + for i := 0; i < PartsNo; i++ { + for j := 0; j < len(b); j++ { + b[j] = byte(32 + i + j) + } + parts[i].PartNumber = i + 1 + parts[i].ETag = hex.EncodeToString(b) + } + etag := computeETag(parts) + if etag != "9cae1a3b7e97542c261cf2e1b50ba482" { + t.Fatalf("ETag value '%s' not as expected", etag) + } +} diff --git a/block/local/walker.go b/block/local/walker.go new file mode 100644 index 00000000..85b9ab89 --- /dev/null +++ b/block/local/walker.go @@ -0,0 +1,191 @@ +package local + +import ( + "context" + "crypto/md5" //nolint:gosec + "encoding/hex" + "encoding/json" + "io" + "io/fs" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "strings" + + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/params" + gonanoid "github.com/matoous/go-nanoid/v2" +) + +const cacheDirName = "_lakefs_cache" + +type Walker struct { + mark block.Mark + importHidden bool + allowedPrefixes []string + cacheLocation string + path string +} + +func NewLocalWalker(params params.Local) *Walker { + // without Path, we do not keep cache - will make walker very slow + var cacheLocation string + if params.Path != "" { + cacheLocation = filepath.Join(params.Path, cacheDirName) + } + return &Walker{ + mark: block.Mark{HasMore: true}, + importHidden: params.ImportHidden, + allowedPrefixes: params.AllowedExternalPrefixes, + cacheLocation: cacheLocation, + path: params.Path, + } +} + +func (l *Walker) Walk(_ context.Context, storageURI *url.URL, options block.WalkOptions, walkFn func(e block.ObjectStoreEntry) error) error { + if storageURI.Scheme != "local" { + return path.ErrBadPattern + } + root := path.Join(storageURI.Host, storageURI.Path) + if err := VerifyAbsPath(root, l.path, l.allowedPrefixes); err != nil { + return err + } + + var entries []*block.ObjectStoreEntry + // verify and use cache - location is stored in continuation token + if options.ContinuationToken != "" && strings.HasPrefix(options.ContinuationToken, l.cacheLocation) { + cacheData, err := os.ReadFile(options.ContinuationToken) + if err == nil { + err = json.Unmarshal(cacheData, &entries) + if err != nil { + entries = nil + } else { + l.mark.ContinuationToken = options.ContinuationToken + } + } + } + + // if needed scan all entries to import and calc etag + if entries == nil { + var err error + entries, err = l.scanEntries(root, options) + if err != nil { + return err + } + + // store entries to cache file + if l.cacheLocation != "" { + jsonData, err := json.Marshal(entries) + if err != nil { + return err + } + const dirPerm = 0o755 + _ = os.MkdirAll(l.cacheLocation, dirPerm) + cacheName := filepath.Join(l.cacheLocation, gonanoid.Must()+"-import.json") + const cachePerm = 0o644 + if err := os.WriteFile(cacheName, jsonData, cachePerm); err != nil { + _ = os.Remove(cacheName) + return err + } + l.mark.ContinuationToken = cacheName + } + } + + // search start position base on Last key + startIndex := sort.Search(len(entries), func(i int) bool { + return entries[i].FullKey > options.After + }) + for i := startIndex; i < len(entries); i++ { + ent := *entries[i] + etag, err := calcFileETag(ent) + if err != nil { + return err + } + + ent.ETag = etag + l.mark.LastKey = ent.FullKey + if err := walkFn(ent); err != nil { + return err + } + } + // delete cache in case we completed the iteration + if l.mark.ContinuationToken != "" { + if err := os.Remove(l.mark.ContinuationToken); err != nil { + return err + } + } + l.mark = block.Mark{} + return nil +} + +func (l *Walker) scanEntries(root string, options block.WalkOptions) ([]*block.ObjectStoreEntry, error) { + var entries []*block.ObjectStoreEntry + if err := filepath.Walk(root, func(p string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + + // skip hidden files and directories + if !l.importHidden && strings.HasPrefix(info.Name(), ".") { + if info.IsDir() { + return fs.SkipDir + } + return nil + } + + key := filepath.ToSlash(p) + if key < options.After { + return nil + } + if !info.Mode().IsRegular() { + return nil + } + + addr := "local://" + key + relativePath, err := filepath.Rel(root, p) + if err != nil { + return err + } + // etag is calculated during iteration + ent := &block.ObjectStoreEntry{ + FullKey: key, + RelativeKey: filepath.ToSlash(relativePath), + Address: addr, + Mtime: info.ModTime(), + Size: info.Size(), + } + entries = append(entries, ent) + return nil + }); err != nil { + return nil, err + } + sort.Slice(entries, func(i, j int) bool { + return entries[i].FullKey < entries[j].FullKey + }) + return entries, nil +} + +func calcFileETag(ent block.ObjectStoreEntry) (string, error) { + f, err := os.Open(ent.FullKey) + if err != nil { + return "", err + } + defer func() { _ = f.Close() }() + hash := md5.New() //nolint:gosec + _, err = io.Copy(hash, f) + if err != nil { + return "", err + } + etag := hex.EncodeToString(hash.Sum(nil)) + return etag, nil +} + +func (l *Walker) Marker() block.Mark { + return l.mark +} + +func (l *Walker) GetSkippedEntries() []block.ObjectStoreEntry { + return nil +} diff --git a/block/mem/adapter.go b/block/mem/adapter.go new file mode 100644 index 00000000..240c2b48 --- /dev/null +++ b/block/mem/adapter.go @@ -0,0 +1,355 @@ +package mem + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strings" + "sync" + "time" + + "github.com/google/uuid" + "github.com/jiaozifs/jiaozifs/block" +) + +var ( + ErrNoDataForKey = fmt.Errorf("no data for key: %w", block.ErrDataNotFound) + ErrMultiPartNotFound = fmt.Errorf("multipart ID not found") + ErrNoPropertiesForKey = fmt.Errorf("no properties for key") +) + +type mpu struct { + id string + parts map[int][]byte +} + +func newMPU() *mpu { + uid := uuid.New() + uploadID := hex.EncodeToString(uid[:]) + return &mpu{ + id: uploadID, + parts: make(map[int][]byte), + } +} + +func (m *mpu) get() []byte { + buf := bytes.NewBuffer(nil) + keys := make([]int, len(m.parts)) + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + for _, part := range keys { + buf.Write(m.parts[part]) + } + return buf.Bytes() +} + +type Adapter struct { + data map[string][]byte + mpu map[string]*mpu + properties map[string]block.Properties + mutex *sync.RWMutex +} + +func New(_ context.Context, opts ...func(a *Adapter)) *Adapter { + a := &Adapter{ + data: make(map[string][]byte), + mpu: make(map[string]*mpu), + properties: make(map[string]block.Properties), + mutex: &sync.RWMutex{}, + } + for _, opt := range opts { + opt(a) + } + return a +} + +func getKey(obj block.ObjectPointer) string { + // TODO (niro): Fix mem storage path resolution + if obj.IdentifierType == block.IdentifierTypeFull { + return obj.Identifier + } + return fmt.Sprintf("%s:%s", obj.StorageNamespace, obj.Identifier) +} + +func (a *Adapter) Put(_ context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, opts block.PutOpts) error { + if err := verifyObjectPointer(obj); err != nil { + return err + } + a.mutex.Lock() + defer a.mutex.Unlock() + data, err := io.ReadAll(reader) + if err != nil { + return err + } + key := getKey(obj) + a.data[key] = data + a.properties[key] = block.Properties(opts) + return nil +} + +func (a *Adapter) Get(_ context.Context, obj block.ObjectPointer, _ int64) (io.ReadCloser, error) { + if err := verifyObjectPointer(obj); err != nil { + return nil, err + } + a.mutex.RLock() + defer a.mutex.RUnlock() + key := getKey(obj) + data, ok := a.data[key] + if !ok { + return nil, ErrNoDataForKey + } + return io.NopCloser(bytes.NewReader(data)), nil +} + +func verifyObjectPointer(obj block.ObjectPointer) error { + const prefix = "mem://" + if obj.StorageNamespace == "" { + if !strings.HasPrefix(obj.Identifier, prefix) { + return fmt.Errorf("mem block adapter: %w identifier: %s", block.ErrInvalidAddress, obj.Identifier) + } + } else if !strings.HasPrefix(obj.StorageNamespace, prefix) { + return fmt.Errorf("mem block adapter: %w storage namespace: %s", block.ErrInvalidAddress, obj.StorageNamespace) + } + return nil +} + +func (a *Adapter) GetWalker(_ *url.URL) (block.Walker, error) { + return nil, fmt.Errorf("mem block adapter: %w", block.ErrOperationNotSupported) +} + +func (a *Adapter) GetPreSignedURL(_ context.Context, obj block.ObjectPointer, _ block.PreSignMode) (string, time.Time, error) { + if err := verifyObjectPointer(obj); err != nil { + return "", time.Time{}, err + } + return "", time.Time{}, fmt.Errorf("mem block adapter: %w", block.ErrOperationNotSupported) +} + +func (a *Adapter) Exists(_ context.Context, obj block.ObjectPointer) (bool, error) { + if err := verifyObjectPointer(obj); err != nil { + return false, err + } + a.mutex.RLock() + defer a.mutex.RUnlock() + _, ok := a.data[getKey(obj)] + return ok, nil +} + +func (a *Adapter) GetRange(_ context.Context, obj block.ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) { + if err := verifyObjectPointer(obj); err != nil { + return nil, err + } + a.mutex.RLock() + defer a.mutex.RUnlock() + data, ok := a.data[getKey(obj)] + if !ok { + return nil, ErrNoDataForKey + } + return io.NopCloser(io.NewSectionReader(bytes.NewReader(data), startPosition, endPosition-startPosition+1)), nil +} + +func (a *Adapter) GetProperties(_ context.Context, obj block.ObjectPointer) (block.Properties, error) { + if err := verifyObjectPointer(obj); err != nil { + return block.Properties{}, err + } + a.mutex.RLock() + defer a.mutex.RUnlock() + props, ok := a.properties[getKey(obj)] + if !ok { + return block.Properties{}, ErrNoPropertiesForKey + } + return props, nil +} + +func (a *Adapter) Remove(_ context.Context, obj block.ObjectPointer) error { + if err := verifyObjectPointer(obj); err != nil { + return err + } + a.mutex.Lock() + defer a.mutex.Unlock() + delete(a.data, getKey(obj)) + return nil +} + +func (a *Adapter) Copy(_ context.Context, sourceObj, destinationObj block.ObjectPointer) error { + if err := verifyObjectPointer(sourceObj); err != nil { + return err + } + if err := verifyObjectPointer(destinationObj); err != nil { + return err + } + a.mutex.Lock() + defer a.mutex.Unlock() + destinationKey := getKey(destinationObj) + sourceKey := getKey(sourceObj) + a.data[destinationKey] = a.data[sourceKey] + a.properties[destinationKey] = a.properties[sourceKey] + return nil +} + +func (a *Adapter) UploadCopyPart(ctx context.Context, sourceObj, _ block.ObjectPointer, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + if err := verifyObjectPointer(sourceObj); err != nil { + return nil, err + } + a.mutex.Lock() + defer a.mutex.Unlock() + mpu, ok := a.mpu[uploadID] + if !ok { + return nil, ErrMultiPartNotFound + } + entry, err := a.Get(ctx, sourceObj, 0) + if err != nil { + return nil, err + } + data, err := io.ReadAll(entry) + if err != nil { + return nil, err + } + h := sha256.New() + _, err = h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + mpu.parts[partNumber] = data + etag := fmt.Sprintf("%x", code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) UploadCopyPartRange(_ context.Context, sourceObj, _ block.ObjectPointer, uploadID string, partNumber int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + if err := verifyObjectPointer(sourceObj); err != nil { + return nil, err + } + a.mutex.Lock() + defer a.mutex.Unlock() + mpu, ok := a.mpu[uploadID] + if !ok { + return nil, ErrMultiPartNotFound + } + data, ok := a.data[getKey(sourceObj)] + if !ok { + return nil, ErrNoDataForKey + } + reader := io.NewSectionReader(bytes.NewReader(data), startPosition, endPosition-startPosition+1) + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + h := sha256.New() + _, err = h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + mpu.parts[partNumber] = data + etag := fmt.Sprintf("%x", code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) CreateMultiPartUpload(_ context.Context, obj block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + if err := verifyObjectPointer(obj); err != nil { + return nil, err + } + a.mutex.Lock() + defer a.mutex.Unlock() + mpu := newMPU() + a.mpu[mpu.id] = mpu + return &block.CreateMultiPartUploadResponse{ + UploadID: mpu.id, + }, nil +} + +func (a *Adapter) UploadPart(_ context.Context, obj block.ObjectPointer, _ int64, reader io.Reader, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + if err := verifyObjectPointer(obj); err != nil { + return nil, err + } + a.mutex.Lock() + defer a.mutex.Unlock() + mpu, ok := a.mpu[uploadID] + if !ok { + return nil, ErrMultiPartNotFound + } + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + h := sha256.New() + _, err = h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + mpu.parts[partNumber] = data + etag := fmt.Sprintf("%x", code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) AbortMultiPartUpload(_ context.Context, obj block.ObjectPointer, uploadID string) error { + if err := verifyObjectPointer(obj); err != nil { + return err + } + a.mutex.Lock() + defer a.mutex.Unlock() + _, ok := a.mpu[uploadID] + if !ok { + return ErrMultiPartNotFound + } + delete(a.mpu, uploadID) + return nil +} + +func (a *Adapter) CompleteMultiPartUpload(_ context.Context, obj block.ObjectPointer, uploadID string, _ *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + if err := verifyObjectPointer(obj); err != nil { + return nil, err + } + a.mutex.Lock() + defer a.mutex.Unlock() + mpu, ok := a.mpu[uploadID] + if !ok { + return nil, ErrMultiPartNotFound + } + data := mpu.get() + h := sha256.New() + _, err := h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + hexCode := fmt.Sprintf("%x", code) + a.data[getKey(obj)] = data + return &block.CompleteMultiPartUploadResponse{ + ETag: hexCode, + ContentLength: int64(len(data)), + }, nil +} + +func (a *Adapter) BlockstoreType() string { + return block.BlockstoreTypeMem +} + +func (a *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeMem) + info.PreSignSupport = false + info.ImportSupport = false + return info +} + +func (a *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + return block.DefaultResolveNamespace(storageNamespace, key, identifierType) +} + +func (a *Adapter) RuntimeStats() map[string]string { + return nil +} diff --git a/block/mem/adapter_test.go b/block/mem/adapter_test.go new file mode 100644 index 00000000..0e316216 --- /dev/null +++ b/block/mem/adapter_test.go @@ -0,0 +1,7 @@ +package mem_test + +// TODO (niro): Need to enable +//func TestMemAdapter(t *testing.T) { +// adapter := mem.New() +// blocktest.TestAdapter(t, adapter, "") +//} diff --git a/block/namespace.go b/block/namespace.go new file mode 100644 index 00000000..9b2ef40f --- /dev/null +++ b/block/namespace.go @@ -0,0 +1,197 @@ +package block + +import ( + "fmt" + "net/url" + "strings" +) + +type StorageType int + +const ( + StorageTypeMem = iota + StorageTypeLocal + StorageTypeS3 + StorageTypeGS + StorageTypeAzure +) + +func (s StorageType) BlockstoreType() string { + switch s { + case StorageTypeAzure: + return "azure" + default: + return s.Scheme() + } +} + +func (s StorageType) Scheme() string { + scheme := "" + switch s { + case StorageTypeMem: + scheme = "mem" + case StorageTypeLocal: + scheme = "local" + case StorageTypeGS: + scheme = "gs" + case StorageTypeS3: + scheme = "s3" + case StorageTypeAzure: + scheme = "https" + default: + panic("unknown storage type") + } + return scheme +} + +type StorageNamespaceInfo struct { + ValidityRegex string // regex pattern that could be used to validate the namespace + Example string // example of a valid namespace + DefaultNamespacePrefix string // when a repo is created from the UI, suggest a default storage namespace under this prefix + PreSignSupport bool + PreSignSupportUI bool + ImportSupport bool + ImportValidityRegex string +} + +type QualifiedKey interface { + Format() string + GetStorageType() StorageType + GetStorageNamespace() string + GetKey() string +} + +type CommonQualifiedKey struct { + StorageType StorageType + StorageNamespace string + Key string +} + +func (qk CommonQualifiedKey) Format() string { + return qk.StorageType.Scheme() + "://" + formatPathWithNamespace(qk.StorageNamespace, qk.Key) +} + +func (qk CommonQualifiedKey) GetStorageType() StorageType { + return qk.StorageType +} + +func (qk CommonQualifiedKey) GetKey() string { + return qk.Key +} + +func (qk CommonQualifiedKey) GetStorageNamespace() string { + return qk.StorageNamespace +} + +func GetStorageType(namespaceURL *url.URL) (StorageType, error) { + var st StorageType + switch namespaceURL.Scheme { + case "s3": + return StorageTypeS3, nil + case "mem", "memory": + return StorageTypeMem, nil + case "local": + return StorageTypeLocal, nil + case "gs": + return StorageTypeGS, nil + case "http", "https": + return StorageTypeAzure, nil + default: + return st, fmt.Errorf("invalid storage scheme %s: %w", namespaceURL.Scheme, ErrInvalidAddress) + } +} + +func ValidateStorageType(uri *url.URL, expectedStorage StorageType) error { + storage, err := GetStorageType(uri) + if err != nil { + return err + } + + if storage != expectedStorage { + return fmt.Errorf("expected storage type %s: %w", expectedStorage.Scheme(), ErrInvalidAddress) + } + return nil +} + +func formatPathWithNamespace(namespacePath, keyPath string) string { + namespacePath = strings.Trim(namespacePath, "/") + if len(namespacePath) == 0 { + return strings.TrimPrefix(keyPath, "/") + } + return namespacePath + "/" + keyPath +} + +func DefaultResolveNamespace(defaultNamespace, key string, identifierType IdentifierType) (CommonQualifiedKey, error) { + switch identifierType { + case IdentifierTypeRelative: + return resolveRelative(defaultNamespace, key) + case IdentifierTypeFull: + return resolveFull(key) + default: + panic(fmt.Sprintf("unknown identifier type: %d", identifierType)) + } +} + +func resolveFull(key string) (CommonQualifiedKey, error) { + parsedKey, err := url.ParseRequestURI(key) + if err != nil { + return CommonQualifiedKey{}, fmt.Errorf("could not parse URI: %w", err) + } + // extract its scheme + storageType, err := GetStorageType(parsedKey) + if err != nil { + return CommonQualifiedKey{}, err + } + return CommonQualifiedKey{ + StorageType: storageType, + StorageNamespace: parsedKey.Host, + Key: formatPathWithNamespace("", parsedKey.Path), + }, nil +} + +func resolveRelative(defaultNamespace, key string) (CommonQualifiedKey, error) { + // is not fully qualified, treat as key only + // if we don't have a trailing slash for the namespace, add it. + parsedNS, err := url.ParseRequestURI(defaultNamespace) + if err != nil { + return CommonQualifiedKey{}, fmt.Errorf("default namespace %s: %w", defaultNamespace, ErrInvalidAddress) + } + storageType, err := GetStorageType(parsedNS) + if err != nil { + return CommonQualifiedKey{}, fmt.Errorf("no storage type for %s: %w", parsedNS, err) + } + + return CommonQualifiedKey{ + StorageType: storageType, + StorageNamespace: strings.TrimSuffix(parsedNS.Host+parsedNS.Path, "/"), + Key: key, + }, nil +} + +func resolveNamespaceUnknown(defaultNamespace, key string) (CommonQualifiedKey, error) { //nolint + // first try to treat key as a full path + if qk, err := resolveFull(key); err == nil { + return qk, nil + } + + // else, treat it as a relative path + return resolveRelative(defaultNamespace, key) +} + +func DefaultExample(scheme string) string { + return scheme + "://example-bucket/" +} + +func DefaultValidationRegex(scheme string) string { + return fmt.Sprintf("^%s://", scheme) +} + +func DefaultStorageNamespaceInfo(scheme string) StorageNamespaceInfo { + return StorageNamespaceInfo{ + ValidityRegex: DefaultValidationRegex(scheme), + Example: DefaultExample(scheme), + PreSignSupport: true, + ImportSupport: true, + ImportValidityRegex: DefaultValidationRegex(scheme), + } +} diff --git a/block/namespace_test.go b/block/namespace_test.go new file mode 100644 index 00000000..48325440 --- /dev/null +++ b/block/namespace_test.go @@ -0,0 +1,234 @@ +package block_test + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "github.com/jiaozifs/jiaozifs/block" +) + +func TestResolveNamespace(t *testing.T) { + cases := []struct { + Name string + DefaultNamespace string + Key string + Type block.IdentifierType + ExpectedErr error + Expected block.CommonQualifiedKey + }{ + { + Name: "valid_namespace_no_trailing_slash", + DefaultNamespace: "s3://foo", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "foo", + Key: "bar/baz", + }, + }, + { + Name: "valid_namespace_with_trailing_slash", + DefaultNamespace: "s3://foo/", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "foo", + Key: "bar/baz", + }, + }, + { + Name: "valid_namespace_mem_with_trailing_slash", + DefaultNamespace: "mem://foo/", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeMem, + StorageNamespace: "foo", + Key: "bar/baz", + }, + }, + { + Name: "valid_namespace_with_prefix_and_trailing_slash", + DefaultNamespace: "gs://foo/bla/", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeGS, + StorageNamespace: "foo/bla", + Key: "bar/baz", + }, + }, + { + Name: "valid_namespace_with_prefix_and_no_trailing_slash", + DefaultNamespace: "gs://foo/bla", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeGS, + StorageNamespace: "foo/bla", + Key: "bar/baz", + }, + }, + { + Name: "valid_namespace_with_prefix_and_leading_key_slash", + DefaultNamespace: "gs://foo/bla", + Key: "/bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeGS, + StorageNamespace: "foo/bla", + Key: "/bar/baz", + }, + }, + { + Name: "valid_fq_key", + DefaultNamespace: "mem://foo/", + Key: "s3://example/bar/baz", + Type: block.IdentifierTypeFull, + ExpectedErr: nil, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "example", + Key: "bar/baz", + }, + }, + { + Name: "invalid_namespace_wrong_scheme", + DefaultNamespace: "memzzzz://foo/", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: block.ErrInvalidAddress, + Expected: block.CommonQualifiedKey{}, + }, + { + Name: "invalid_namespace_invalid_uri", + DefaultNamespace: "foo", + Key: "bar/baz", + Type: block.IdentifierTypeRelative, + ExpectedErr: block.ErrInvalidAddress, + Expected: block.CommonQualifiedKey{}, + }, + { + Name: "invalid_key_wrong_scheme", + DefaultNamespace: "s3://foo/", + Key: "s4://bar/baz", + Type: block.IdentifierTypeFull, + ExpectedErr: block.ErrInvalidAddress, + Expected: block.CommonQualifiedKey{}, + }, + { + Name: "key_weird_format", + DefaultNamespace: "s3://foo/", + Key: "://invalid/baz", + Type: block.IdentifierTypeRelative, + Expected: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "foo", + Key: "://invalid/baz", + }, + }, + } + + for _, cas := range cases { + for _, r := range []block.IdentifierType{cas.Type} { + relativeName := "" + switch r { + case block.IdentifierTypeRelative: + relativeName = "relative" + case block.IdentifierTypeFull: + relativeName = "full" + } + t.Run(fmt.Sprintf("%s/%s", cas.Name, relativeName), func(t *testing.T) { + resolved, err := block.DefaultResolveNamespace(cas.DefaultNamespace, cas.Key, r) + if err != nil && !errors.Is(err, cas.ExpectedErr) { + t.Fatalf("got unexpected error :%v - expected %v", err, cas.ExpectedErr) + } + if cas.ExpectedErr == nil && !reflect.DeepEqual(resolved, cas.Expected) { + t.Fatalf("expected %v got %v", cas.Expected, resolved) + } + }) + } + } +} + +func TestFormatQualifiedKey(t *testing.T) { + cases := []struct { + Name string + QualifiedKey block.CommonQualifiedKey + Expected string + }{ + { + Name: "simple_path", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeGS, + StorageNamespace: "some-bucket", + Key: "path", + }, + Expected: "gs://some-bucket/path", + }, + { + Name: "path_with_prefix", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "some-bucket/", + Key: "path/to/file", + }, + Expected: "s3://some-bucket/path/to/file", + }, + { + Name: "bucket_with_prefix", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "some-bucket/prefix/", + Key: "path/to/file", + }, + Expected: "s3://some-bucket/prefix/path/to/file", + }, + { + Name: "path_with_prefix_leading_slash", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "some-bucket", + Key: "/path/to/file", + }, + Expected: "s3://some-bucket//path/to/file", + }, + { + Name: "bucket_with_prefix_leading_slash", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "some-bucket/prefix", + Key: "/path/to/file", + }, + Expected: "s3://some-bucket/prefix//path/to/file", + }, + { + Name: "dont_eliminate_dots", + QualifiedKey: block.CommonQualifiedKey{ + StorageType: block.StorageTypeS3, + StorageNamespace: "some-bucket/prefix/", + Key: "path/to/../file", + }, + Expected: "s3://some-bucket/prefix/path/to/../file", + }, + } + + for _, cas := range cases { + t.Run(cas.Name, func(t *testing.T) { + formatted := cas.QualifiedKey.Format() + if formatted != cas.Expected { + t.Fatalf("Format() got '%s', expected '%s'", formatted, cas.Expected) + } + }) + } +} diff --git a/block/params/block.go b/block/params/block.go new file mode 100644 index 00000000..7a84e5e6 --- /dev/null +++ b/block/params/block.go @@ -0,0 +1,81 @@ +package params + +import ( + "time" +) + +// AdapterConfig configures a block adapter. +type AdapterConfig interface { + BlockstoreType() string + BlockstoreLocalParams() (Local, error) + BlockstoreS3Params() (S3, error) + BlockstoreGSParams() (GS, error) + BlockstoreAzureParams() (Azure, error) +} + +type Mem struct{} + +type Local struct { + Path string + ImportEnabled bool + ImportHidden bool + AllowedExternalPrefixes []string +} + +// S3WebIdentity contains parameters for customizing S3 web identity. This +// is also used when configuring S3 with IRSA in EKS (Kubernetes). +type S3WebIdentity struct { + // SessionDuration is the duration WebIdentityRoleProvider will + // request for a token for its assumed role. It can be 1 hour or + // more, but its maximum is configurable on AWS. + SessionDuration time.Duration + + // SessionExpiryWindow is the time before credentials expiry that + // the WebIdentityRoleProvider may request a fresh token. + SessionExpiryWindow time.Duration +} + +type S3Credentials struct { + AccessKeyID string + SecretAccessKey string + SessionToken string +} + +type S3 struct { + Region string + Profile string + CredentialsFile string + Credentials S3Credentials + MaxRetries int + Endpoint string + ForcePathStyle bool + DiscoverBucketRegion bool + SkipVerifyCertificateTestOnly bool + ServerSideEncryption string + ServerSideEncryptionKmsKeyID string + PreSignedExpiry time.Duration + DisablePreSigned bool + DisablePreSignedUI bool + ClientLogRetries bool + ClientLogRequest bool + WebIdentity *S3WebIdentity +} + +type GS struct { + CredentialsFile string + CredentialsJSON string + PreSignedExpiry time.Duration + DisablePreSigned bool + DisablePreSignedUI bool +} + +type Azure struct { + StorageAccount string + StorageAccessKey string + TryTimeout time.Duration + PreSignedExpiry time.Duration + DisablePreSigned bool + DisablePreSignedUI bool + // TestEndpointURL - For testing purposes, provide a custom URL to override the default URL template + TestEndpointURL string +} diff --git a/block/path.go b/block/path.go new file mode 100644 index 00000000..4a790409 --- /dev/null +++ b/block/path.go @@ -0,0 +1,119 @@ +package block + +import ( + "fmt" + "strings" +) + +const ( + Separator = "/" + + EntryTypeTree = "tree" + EntryTypeObject = "object" +) + +type Path struct { + str string + entryType string +} + +var RootPath = NewPath("", EntryTypeTree) + +func NewPath(str, entryType string) *Path { + return &Path{str, entryType} +} + +func (p *Path) String() string { + if p == nil { + return "" + } + joined := JoinPathParts(p.Split()) + return strings.TrimPrefix(joined, Separator) +} + +func (p *Path) Equals(other *Path) bool { + if p == nil && other == nil { + return true + } + if other == nil { + return false + } + if p.entryType != other.entryType { + return false + } + mine := p.Split() + theirs := other.Split() + if len(mine) != len(theirs) { + return false + } + for i, part := range mine { + if !strings.EqualFold(part, theirs[i]) { + return false + } + } + return true +} + +func (p *Path) Split() []string { + // trim first / if it exists + parts := strings.Split(p.str, Separator) + if len(parts) >= 2 && len(parts[0]) == 0 { + parts = parts[1:] + } + suffixedParts := make([]string, len(parts)) + for i, part := range parts { + suffixedPart := part + if i < len(parts)-1 { + suffixedPart = fmt.Sprintf("%s%s", part, Separator) + } + suffixedParts[i] = suffixedPart + } + if len(suffixedParts) >= 2 && p.entryType == EntryTypeTree && len(suffixedParts[len(suffixedParts)-1]) == 0 { + // remove empty suffix for tree type + suffixedParts = suffixedParts[:len(suffixedParts)-1] + } + return suffixedParts +} + +func (p *Path) BaseName() string { + var baseName string + parts := p.Split() + if len(parts) > 0 { + if len(parts) > 1 && len(parts[len(parts)-1]) == 0 && p.entryType == EntryTypeTree { + baseName = parts[len(parts)-2] + } else { + baseName = parts[len(parts)-1] + } + } + return baseName +} + +func (p *Path) ParentPath() string { + if p.IsRoot() { + return "" + } + parts := p.Split() + if len(parts) <= 1 { + return "" + } + if len(parts[len(parts)-1]) == 0 && p.entryType == EntryTypeTree { + return JoinPathParts(parts[:len(parts)-2]) + } + return JoinPathParts(parts[:len(parts)-1]) +} + +func (p *Path) IsRoot() bool { + return p.Equals(RootPath) +} + +func JoinPathParts(parts []string) string { + var buf strings.Builder + for pos, part := range parts { + buf.WriteString(part) + if pos != len(parts)-1 && !strings.HasSuffix(part, Separator) { + // if it's not the last part, and there's no separator at the end, add it + buf.WriteString(Separator) + } + } + return buf.String() +} diff --git a/block/path_test.go b/block/path_test.go new file mode 100644 index 00000000..edcedb60 --- /dev/null +++ b/block/path_test.go @@ -0,0 +1,156 @@ +package block_test + +import ( + "strings" + "testing" + + "github.com/davecgh/go-spew/spew" + "github.com/jiaozifs/jiaozifs/block" +) + +func equalStrings(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +func TestPath_SplitParts_Objects(t *testing.T) { + testData := []struct { + Path string + Parts []string + }{ + {"/foo/bar", []string{"foo/", "bar"}}, + {"foo/bar/", []string{"foo/", "bar/", ""}}, + {"/foo///bar", []string{"foo/", "/", "/", "bar"}}, + {"/foo///bar/", []string{"foo/", "/", "/", "bar/", ""}}, + {"/foo///bar////", []string{"foo/", "/", "/", "bar/", "/", "/", "/", ""}}, + {"////foo", []string{"/", "/", "/", "foo"}}, + {"//", []string{"/", ""}}, + {"/", []string{""}}, + {"", []string{""}}, + {"/hello/world/another/level", []string{"hello/", "world/", "another/", "level"}}, + {"/hello/world/another/level/", []string{"hello/", "world/", "another/", "level/", ""}}, + } + for i, test := range testData { + p := block.NewPath(test.Path, block.EntryTypeObject) + if !equalStrings(p.Split(), test.Parts) { + t.Fatalf("expected (%d): %s, got %s for path: %s", i, spew.Sdump(test.Parts), spew.Sdump(p.Split()), test.Path) + } + } +} + +func TestPath_SplitParts_Trees(t *testing.T) { + testData := []struct { + Path string + Parts []string + }{ + {"//", []string{"/"}}, + {"/", []string{""}}, + {"", []string{""}}, + {"/foo/bar", []string{"foo/", "bar"}}, + {"foo/bar/", []string{"foo/", "bar/"}}, + {"/hello/world/another/level", []string{"hello/", "world/", "another/", "level"}}, + {"/hello/world/another/level/", []string{"hello/", "world/", "another/", "level/"}}, + } + for i, test := range testData { + p := block.NewPath(test.Path, block.EntryTypeTree) + if !equalStrings(p.Split(), test.Parts) { + t.Fatalf("expected (%d): %s, got %s for path: %s", i, spew.Sdump(test.Parts), spew.Sdump(p.Split()), test.Path) + } + } +} + +func TestPath_String(t *testing.T) { + var nilPath *block.Path + testData := []struct { + Path *block.Path + String string + }{ + {block.NewPath("hello/world/another/level", block.EntryTypeObject), "hello/world/another/level"}, + {block.NewPath("/hello/world/another/level", block.EntryTypeObject), "hello/world/another/level"}, + {block.NewPath("/hello/world/another/level/", block.EntryTypeTree), "hello/world/another/level/"}, + {nilPath, ""}, + } + for i, test := range testData { + if !strings.EqualFold(test.Path.String(), test.String) { + t.Fatalf("expected (%d): \"%s\", got \"%s\" for path: \"%s\"", i, test.String, test.Path.String(), test.Path) + } + } +} + +func TestJoin(t *testing.T) { + testData := []struct { + parts []string + expected string + }{ + {[]string{"foo/bar", "baz"}, "foo/bar/baz"}, + {[]string{"foo/bar/", "baz"}, "foo/bar/baz"}, + {[]string{"foo/bar", "", "baz"}, "foo/bar//baz"}, + {[]string{"foo//bar", "baz"}, "foo//bar/baz"}, + {[]string{"foo/bar", ""}, "foo/bar/"}, + {[]string{"foo/bar/", ""}, "foo/bar/"}, + } + for i, test := range testData { + got := block.JoinPathParts(test.parts) + if !strings.EqualFold(got, test.expected) { + t.Fatalf("expected (%d): '%s', got '%s' for %v", i, test.expected, got, test.parts) + } + } +} + +func TestPath_BaseName(t *testing.T) { + testData := []struct { + Path string + BaseName string + EntryType string + }{ + {"/foo", "foo", block.EntryTypeObject}, + {"/foo/bar", "bar", block.EntryTypeObject}, + {"", "", block.EntryTypeTree}, + {"/", "", block.EntryTypeTree}, + {"foo/bar", "bar", block.EntryTypeObject}, + {"foo/bar/", "", block.EntryTypeObject}, + {"foo/bar", "bar", block.EntryTypeTree}, + {"foo/bar/", "bar/", block.EntryTypeTree}, + } + for _, test := range testData { + p := block.NewPath(test.Path, test.EntryType) + if p.BaseName() != test.BaseName { + t.Fatalf("expected BaseName to return %s, got %s for input: %s", test.BaseName, p.BaseName(), test.Path) + } + } +} + +func TestPath_ParentPath(t *testing.T) { + testData := []struct { + Path string + ParentPath string + EntryType string + }{ + {"/", "", block.EntryTypeTree}, + {"foo", "", block.EntryTypeObject}, + {"/foo", "", block.EntryTypeObject}, + {"foo/", "", block.EntryTypeTree}, + {"foo/", "foo/", block.EntryTypeObject}, + {"/foo/bar", "foo/", block.EntryTypeObject}, + {"foo/bar", "foo/", block.EntryTypeObject}, + {"/foo/bar/", "foo/", block.EntryTypeTree}, + {"foo/bar/", "foo/bar/", block.EntryTypeObject}, + {"/foo/bar/baz", "foo/bar/", block.EntryTypeObject}, + {"foo/bar/baz", "foo/bar/", block.EntryTypeObject}, + {"/foo/bar/baz/", "foo/bar/", block.EntryTypeTree}, + {"/foo/bar/baz", "foo/bar/", block.EntryTypeTree}, + } + for _, test := range testData { + p := block.NewPath(test.Path, test.EntryType) + if p.ParentPath() != test.ParentPath { + t.Fatalf("expected ParentPath to return %s, got %s for input: %s", test.ParentPath, p.ParentPath(), test.Path) + } + } +} diff --git a/block/s3/adapter.go b/block/s3/adapter.go new file mode 100644 index 00000000..9df24ded --- /dev/null +++ b/block/s3/adapter.go @@ -0,0 +1,814 @@ +package s3 + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "strings" + "sync/atomic" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/credentials/stscreds" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/aws/smithy-go/middleware" + smithyhttp "github.com/aws/smithy-go/transport/http" + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/params" +) + +var ( + ErrS3 = errors.New("s3 error") + ErrMissingETag = fmt.Errorf("%w: missing ETag", ErrS3) +) + +type Adapter struct { + clients *ClientCache + respServer atomic.Pointer[string] + ServerSideEncryption string + ServerSideEncryptionKmsKeyID string + preSignedExpiry time.Duration + sessionExpiryWindow time.Duration + disablePreSigned bool + disablePreSignedUI bool +} + +func WithDiscoverBucketRegion(b bool) func(a *Adapter) { + return func(a *Adapter) { + a.clients.DiscoverBucketRegion(b) + } +} + +func WithPreSignedExpiry(v time.Duration) func(a *Adapter) { + return func(a *Adapter) { + a.preSignedExpiry = v + } +} + +func WithDisablePreSigned(b bool) func(a *Adapter) { + return func(a *Adapter) { + if b { + a.disablePreSigned = true + } + } +} + +func WithDisablePreSignedUI(b bool) func(a *Adapter) { + return func(a *Adapter) { + if b { + a.disablePreSignedUI = true + } + } +} + +func WithServerSideEncryption(s string) func(a *Adapter) { + return func(a *Adapter) { + a.ServerSideEncryption = s + } +} + +func WithServerSideEncryptionKmsKeyID(s string) func(a *Adapter) { + return func(a *Adapter) { + a.ServerSideEncryptionKmsKeyID = s + } +} + +type AdapterOption func(a *Adapter) + +func NewAdapter(ctx context.Context, params params.S3, opts ...AdapterOption) (*Adapter, error) { + cfg, err := LoadConfig(ctx, params) + if err != nil { + return nil, err + } + var sessionExpiryWindow time.Duration + if params.WebIdentity != nil { + sessionExpiryWindow = params.WebIdentity.SessionExpiryWindow + } + a := &Adapter{ + clients: NewClientCache(cfg, params), + preSignedExpiry: block.DefaultPreSignExpiryDuration, + sessionExpiryWindow: sessionExpiryWindow, + } + for _, opt := range opts { + opt(a) + } + return a, nil +} + +func LoadConfig(ctx context.Context, params params.S3) (aws.Config, error) { + var opts []func(*config.LoadOptions) error + + //opts = append(opts, config.WithLogger(log.With("sdk", "aws"))) todo adopt logger + var logMode aws.ClientLogMode + if params.ClientLogRetries { + logMode |= aws.LogRetries + } + if params.ClientLogRequest { + logMode |= aws.LogRequest + } + if logMode != 0 { + opts = append(opts, config.WithClientLogMode(logMode)) + } + if params.Region != "" { + opts = append(opts, config.WithRegion(params.Region)) + } + if params.Profile != "" { + opts = append(opts, config.WithSharedConfigProfile(params.Profile)) + } + if params.CredentialsFile != "" { + opts = append(opts, config.WithSharedCredentialsFiles([]string{params.CredentialsFile})) + } + if params.Credentials.AccessKeyID != "" { + opts = append(opts, config.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + params.Credentials.AccessKeyID, + params.Credentials.SecretAccessKey, + params.Credentials.SessionToken, + ), + )) + } + if params.MaxRetries > 0 { + opts = append(opts, config.WithRetryMaxAttempts(params.MaxRetries)) + } + if params.SkipVerifyCertificateTestOnly { + tr := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + } + opts = append(opts, config.WithHTTPClient(&http.Client{Transport: tr})) + } + if params.WebIdentity != nil { + wi := *params.WebIdentity // Copy WebIdentity: it will be used asynchronously. + if wi.SessionDuration > 0 { + opts = append(opts, config.WithWebIdentityRoleCredentialOptions( + func(options *stscreds.WebIdentityRoleOptions) { + options.Duration = wi.SessionDuration + }), + ) + } + if wi.SessionExpiryWindow > 0 { + opts = append(opts, config.WithCredentialsCacheOptions( + func(options *aws.CredentialsCacheOptions) { + options.ExpiryWindow = wi.SessionExpiryWindow + }), + ) + } + } + return config.LoadDefaultConfig(ctx, opts...) +} + +func WithClientParams(params params.S3) func(options *s3.Options) { + return func(options *s3.Options) { + if params.Endpoint != "" { + options.BaseEndpoint = aws.String(params.Endpoint) + } + if params.ForcePathStyle { + options.UsePathStyle = true + } + } +} + +func (a *Adapter) Put(ctx context.Context, obj block.ObjectPointer, sizeBytes int64, reader io.Reader, opts block.PutOpts) error { + var err error + defer reportMetrics("Put", time.Now(), &sizeBytes, &err) + + // for unknown size, we assume we like to stream content, will use s3manager to perform the request. + // we assume the caller may not have 1:1 request to s3 put object in this case as it may perform multipart upload + if sizeBytes == -1 { + return a.managerUpload(ctx, obj, reader, opts) + } + + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + + putObject := s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Body: reader, + ContentLength: aws.Int64(sizeBytes), + } + if sizeBytes == 0 { + putObject.Body = http.NoBody + } + if opts.StorageClass != nil { + putObject.StorageClass = types.StorageClass(*opts.StorageClass) + } + if a.ServerSideEncryption != "" { + putObject.ServerSideEncryption = types.ServerSideEncryption(a.ServerSideEncryption) + } + if a.ServerSideEncryptionKmsKeyID != "" { + putObject.SSEKMSKeyId = aws.String(a.ServerSideEncryptionKmsKeyID) + } + + client := a.clients.Get(ctx, bucket) + resp, err := client.PutObject(ctx, &putObject, + retryMaxAttemptsByReader(reader), + s3.WithAPIOptions(v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware), + a.registerCaptureServerMiddleware(), + ) + if err != nil { + return err + } + etag := aws.ToString(resp.ETag) + if etag == "" { + return ErrMissingETag + } + return nil +} + +// retryMaxAttemptsByReader return s3 options function +// setup RetryMaxAttempts - if the reader is not seekable, we can't retry the request +func retryMaxAttemptsByReader(reader io.Reader) func(*s3.Options) { + return func(o *s3.Options) { + if _, ok := reader.(io.Seeker); !ok { + o.RetryMaxAttempts = 1 + } + } +} + +// captureServerDeserializeMiddleware extracts the server name from the response and sets it on the block adapter +func (a *Adapter) captureServerDeserializeMiddleware(ctx context.Context, input middleware.DeserializeInput, handler middleware.DeserializeHandler) (middleware.DeserializeOutput, middleware.Metadata, error) { + output, m, err := handler.HandleDeserialize(ctx, input) + if err == nil { + if rawResponse, ok := output.RawResponse.(*smithyhttp.Response); ok { + s := rawResponse.Header.Get("Server") + if s != "" { + a.respServer.Store(&s) + } + } + } + return output, m, err +} + +func (a *Adapter) UploadPart(ctx context.Context, obj block.ObjectPointer, sizeBytes int64, reader io.Reader, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadPart", time.Now(), &sizeBytes, &err) + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + + uploadPartInput := &s3.UploadPartInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + PartNumber: aws.Int32(int32(partNumber)), + UploadId: aws.String(uploadID), + Body: reader, + ContentLength: aws.Int64(sizeBytes), + } + if a.ServerSideEncryption != "" { + uploadPartInput.SSECustomerAlgorithm = &a.ServerSideEncryption + } + if a.ServerSideEncryptionKmsKeyID != "" { + uploadPartInput.SSECustomerKey = &a.ServerSideEncryptionKmsKeyID + } + + client := a.clients.Get(ctx, bucket) + resp, err := client.UploadPart(ctx, uploadPartInput, + retryMaxAttemptsByReader(reader), + s3.WithAPIOptions(v4.SwapComputePayloadSHA256ForUnsignedPayloadMiddleware), + a.registerCaptureServerMiddleware(), + ) + if err != nil { + return nil, err + } + etag := aws.ToString(resp.ETag) + if etag == "" { + return nil, ErrMissingETag + } + return &block.UploadPartResponse{ + ETag: strings.Trim(etag, `"`), + ServerSideHeader: extractSSHeaderUploadPart(resp), + }, nil +} + +func isErrNotFound(err error) bool { + var ( + errNoSuchKey *types.NoSuchKey + errNotFound *types.NotFound + ) + return errors.As(err, &errNoSuchKey) || errors.As(err, &errNotFound) +} + +func (a *Adapter) Get(ctx context.Context, obj block.ObjectPointer, _ int64) (io.ReadCloser, error) { + var err error + var sizeBytes int64 + defer reportMetrics("Get", time.Now(), &sizeBytes, &err) + log := log.With("operation", "GetObject") + bucket, key, qualifiedKey, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + + getObjectInput := s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + client := a.clients.Get(ctx, bucket) + objectOutput, err := client.GetObject(ctx, &getObjectInput) + if isErrNotFound(err) { + return nil, block.ErrDataNotFound + } + if err != nil { + log.Errorf("failed to get S3 object bucket %s key %s %v", qualifiedKey.GetStorageNamespace(), qualifiedKey.GetKey(), err) + return nil, err + } + sizeBytes = *objectOutput.ContentLength + return objectOutput.Body, nil +} + +func (a *Adapter) GetWalker(uri *url.URL) (block.Walker, error) { + if err := block.ValidateStorageType(uri, block.StorageTypeS3); err != nil { + return nil, err + } + return NewS3Walker(a.clients.GetDefault()), nil +} + +type CaptureExpiresPresigner struct { + Presigner s3.HTTPPresignerV4 + CredentialsCanExpire bool + CredentialsExpireAt time.Time +} + +func (c *CaptureExpiresPresigner) PresignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) (url string, signedHeader http.Header, err error) { + // capture credentials expiry + c.CredentialsCanExpire = credentials.CanExpire + c.CredentialsExpireAt = credentials.Expires + return c.Presigner.PresignHTTP(ctx, credentials, r, payloadHash, service, region, signingTime, optFns...) +} + +func (a *Adapter) GetPreSignedURL(ctx context.Context, obj block.ObjectPointer, mode block.PreSignMode) (string, time.Time, error) { + if a.disablePreSigned { + return "", time.Time{}, block.ErrOperationNotSupported + } + + expiry := time.Now().Add(a.preSignedExpiry) + + log := log.With( + "operation", "GetPreSignedURL", + "namespace", obj.StorageNamespace, + "identifier", obj.Identifier, + "ttl", time.Until(expiry), + ) + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + log.Errorf("could not resolve namespace %v", err) + return "", time.Time{}, err + } + + client := a.clients.Get(ctx, bucket) + presigner := s3.NewPresignClient(client, + func(options *s3.PresignOptions) { + options.Expires = a.preSignedExpiry + }) + + captureExpiresPresigner := &CaptureExpiresPresigner{} + var req *v4.PresignedHTTPRequest + if mode == block.PreSignModeWrite { + putObjectInput := &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + req, err = presigner.PresignPutObject(ctx, putObjectInput, func(o *s3.PresignOptions) { + captureExpiresPresigner.Presigner = o.Presigner + o.Presigner = captureExpiresPresigner + }) + } else { + getObjectInput := &s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + req, err = presigner.PresignGetObject(ctx, getObjectInput, func(o *s3.PresignOptions) { + captureExpiresPresigner.Presigner = o.Presigner + o.Presigner = captureExpiresPresigner + }) + } + if err != nil { + log.Errorf("could not pre-sign request %v", err) + return "", time.Time{}, err + } + + // In case the credentials can expire, we need to use the earliest expiry time + // we assume that session expiry window is used and adjust the expiry time accordingly. + // AWS Go SDK v2 stores the time to renew credentials in `CredentialsExpireAt`. This is + // a.sessionExpiryWindow before actual credentials expiry. + if captureExpiresPresigner.CredentialsCanExpire && captureExpiresPresigner.CredentialsExpireAt.Before(expiry) { + expiry = captureExpiresPresigner.CredentialsExpireAt.Add(a.sessionExpiryWindow) + } + return req.URL, expiry, nil +} + +func (a *Adapter) Exists(ctx context.Context, obj block.ObjectPointer) (bool, error) { + var err error + defer reportMetrics("Exists", time.Now(), nil, &err) + log := log.With("operation", "HeadObject") + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return false, err + } + + input := s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + client := a.clients.Get(ctx, bucket) + _, err = client.HeadObject(ctx, &input) + if isErrNotFound(err) { + return false, nil + } + if err != nil { + log.Errorf("failed to stat S3 object %v", err) + return false, err + } + return true, nil +} + +func (a *Adapter) GetRange(ctx context.Context, obj block.ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) { + var err error + var sizeBytes int64 + defer reportMetrics("GetRange", time.Now(), &sizeBytes, &err) + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + log := log.With("operation", "GetObjectRange") + getObjectInput := s3.GetObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Range: aws.String(fmt.Sprintf("bytes=%d-%d", startPosition, endPosition)), + } + client := a.clients.Get(ctx, bucket) + objectOutput, err := client.GetObject(ctx, &getObjectInput) + if isErrNotFound(err) { + return nil, block.ErrDataNotFound + } + if err != nil { + log.With( + "start_position", startPosition, + "end_position", endPosition, + ).Errorf("failed to get S3 object range %v", err) + return nil, err + } + sizeBytes = *objectOutput.ContentLength + return objectOutput.Body, nil +} + +func (a *Adapter) GetProperties(ctx context.Context, obj block.ObjectPointer) (block.Properties, error) { + var err error + defer reportMetrics("GetProperties", time.Now(), nil, &err) + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return block.Properties{}, err + } + + headObjectParams := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + client := a.clients.Get(ctx, bucket) + s3Props, err := client.HeadObject(ctx, headObjectParams) + if err != nil { + return block.Properties{}, err + } + return block.Properties{ + StorageClass: aws.String(string(s3Props.StorageClass)), + }, nil +} + +func (a *Adapter) Remove(ctx context.Context, obj block.ObjectPointer) error { + var err error + defer reportMetrics("Remove", time.Now(), nil, &err) + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + + deleteInput := &s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + client := a.clients.Get(ctx, bucket) + _, err = client.DeleteObject(ctx, deleteInput) + if err != nil { + log.Errorf("failed to delete S3 object %v", err) + return err + } + + headInput := &s3.HeadObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + } + const maxWaitDur = 100 * time.Second + waiter := s3.NewObjectNotExistsWaiter(client) + return waiter.Wait(ctx, headInput, maxWaitDur) +} + +func (a *Adapter) copyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int, byteRange *string) (*block.UploadPartResponse, error) { + srcKey, err := resolveNamespace(sourceObj) + if err != nil { + return nil, err + } + + bucket, key, _, err := a.extractParamsFromObj(destinationObj) + if err != nil { + return nil, err + } + + uploadPartCopyObject := s3.UploadPartCopyInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + PartNumber: aws.Int32(int32(partNumber)), + UploadId: aws.String(uploadID), + CopySource: aws.String(fmt.Sprintf("%s/%s", srcKey.GetStorageNamespace(), srcKey.GetKey())), + } + if byteRange != nil { + uploadPartCopyObject.CopySourceRange = byteRange + } + client := a.clients.Get(ctx, bucket) + resp, err := client.UploadPartCopy(ctx, &uploadPartCopyObject) + if err != nil { + return nil, err + } + if resp == nil || resp.CopyPartResult == nil || resp.CopyPartResult.ETag == nil { + return nil, ErrMissingETag + } + + etag := strings.Trim(*resp.CopyPartResult.ETag, `"`) + return &block.UploadPartResponse{ + ETag: etag, + ServerSideHeader: extractSSHeaderUploadPartCopy(resp), + }, nil +} + +func (a *Adapter) UploadCopyPart(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadCopyPart", time.Now(), nil, &err) + return a.copyPart(ctx, sourceObj, destinationObj, uploadID, partNumber, nil) +} + +func (a *Adapter) UploadCopyPartRange(ctx context.Context, sourceObj, destinationObj block.ObjectPointer, uploadID string, partNumber int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + var err error + defer reportMetrics("UploadCopyPartRange", time.Now(), nil, &err) + return a.copyPart(ctx, + sourceObj, destinationObj, uploadID, partNumber, + aws.String(fmt.Sprintf("bytes=%d-%d", startPosition, endPosition))) +} + +func (a *Adapter) Copy(ctx context.Context, sourceObj, destinationObj block.ObjectPointer) error { + var err error + defer reportMetrics("Copy", time.Now(), nil, &err) + qualifiedSourceKey, err := resolveNamespace(sourceObj) + if err != nil { + return err + } + + destBucket, destKey, _, err := a.extractParamsFromObj(destinationObj) + if err != nil { + return err + } + + copyObjectInput := &s3.CopyObjectInput{ + Bucket: aws.String(destBucket), + Key: aws.String(destKey), + CopySource: aws.String(qualifiedSourceKey.GetStorageNamespace() + "/" + qualifiedSourceKey.GetKey()), + } + if a.ServerSideEncryption != "" { + copyObjectInput.ServerSideEncryption = types.ServerSideEncryption(a.ServerSideEncryption) + } + if a.ServerSideEncryptionKmsKeyID != "" { + copyObjectInput.SSEKMSKeyId = aws.String(a.ServerSideEncryptionKmsKeyID) + } + _, err = a.clients.Get(ctx, destBucket).CopyObject(ctx, copyObjectInput) + if err != nil { + log.Errorf("failed to copy S3 object %v", err) + } + return err +} + +func (a *Adapter) CreateMultiPartUpload(ctx context.Context, obj block.ObjectPointer, _ *http.Request, opts block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + var err error + defer reportMetrics("CreateMultiPartUpload", time.Now(), nil, &err) + bucket, key, qualifiedKey, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + + input := &s3.CreateMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + ContentType: aws.String(""), + } + if opts.StorageClass != nil { + input.StorageClass = types.StorageClass(*opts.StorageClass) + } + if a.ServerSideEncryption != "" { + input.ServerSideEncryption = types.ServerSideEncryption(a.ServerSideEncryption) + } + if a.ServerSideEncryptionKmsKeyID != "" { + input.SSEKMSKeyId = &a.ServerSideEncryptionKmsKeyID + } + client := a.clients.Get(ctx, bucket) + resp, err := client.CreateMultipartUpload(ctx, input) + if err != nil { + return nil, err + } + uploadID := aws.ToString(resp.UploadId) + log.With( + "upload_id", uploadID, + "qualified_ns", qualifiedKey.GetStorageNamespace(), + "qualified_key", qualifiedKey.GetKey(), + "key", obj.Identifier, + ).Debug("created multipart upload") + return &block.CreateMultiPartUploadResponse{ + UploadID: uploadID, + ServerSideHeader: extractSSHeaderCreateMultipartUpload(resp), + }, err +} + +func (a *Adapter) AbortMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string) error { + var err error + defer reportMetrics("AbortMultiPartUpload", time.Now(), nil, &err) + bucket, key, qualifiedKey, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + input := &s3.AbortMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + } + + client := a.clients.Get(ctx, bucket) + _, err = client.AbortMultipartUpload(ctx, input) + lg := log.With( + "upload_id", uploadID, + "qualified_ns", qualifiedKey.GetStorageNamespace(), + "qualified_key", qualifiedKey.GetKey(), + "key", obj.Identifier, + ) + if err != nil { + lg.Error("Failed to abort multipart upload") + return err + } + lg.Debug("aborted multipart upload") + return nil +} + +func convertFromBlockMultipartUploadCompletion(multipartList *block.MultipartUploadCompletion) *types.CompletedMultipartUpload { + parts := make([]types.CompletedPart, 0, len(multipartList.Part)) + for _, p := range multipartList.Part { + parts = append(parts, types.CompletedPart{ + ETag: aws.String(p.ETag), + PartNumber: aws.Int32(int32(p.PartNumber)), + }) + } + return &types.CompletedMultipartUpload{Parts: parts} +} + +func (a *Adapter) CompleteMultiPartUpload(ctx context.Context, obj block.ObjectPointer, uploadID string, multipartList *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + var err error + defer reportMetrics("CompleteMultiPartUpload", time.Now(), nil, &err) + bucket, key, qualifiedKey, err := a.extractParamsFromObj(obj) + if err != nil { + return nil, err + } + input := &s3.CompleteMultipartUploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + UploadId: aws.String(uploadID), + MultipartUpload: convertFromBlockMultipartUploadCompletion(multipartList), + } + lg := log.With( + "upload_id", uploadID, + "qualified_ns", qualifiedKey.GetStorageNamespace(), + "qualified_key", qualifiedKey.GetKey(), + "key", obj.Identifier, + ) + client := a.clients.Get(ctx, bucket) + resp, err := client.CompleteMultipartUpload(ctx, input) + if err != nil { + lg.Errorf("CompleteMultipartUpload failed %v", err) + return nil, err + } + lg.Debug("completed multipart upload") + headInput := &s3.HeadObjectInput{Bucket: &bucket, Key: &key} + headResp, err := client.HeadObject(ctx, headInput) + if err != nil { + return nil, err + } + + etag := strings.Trim(aws.ToString(resp.ETag), `"`) + return &block.CompleteMultiPartUploadResponse{ + ETag: etag, + ContentLength: *headResp.ContentLength, + ServerSideHeader: extractSSHeaderCompleteMultipartUpload(resp), + }, nil +} + +func (a *Adapter) BlockstoreType() string { + return block.BlockstoreTypeS3 +} + +func (a *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeS3) + if a.disablePreSigned { + info.PreSignSupport = false + } + if !(a.disablePreSignedUI || a.disablePreSigned) { + info.PreSignSupportUI = true + } + return info +} + +func resolveNamespace(obj block.ObjectPointer) (block.CommonQualifiedKey, error) { + qualifiedKey, err := block.DefaultResolveNamespace(obj.StorageNamespace, obj.Identifier, obj.IdentifierType) + if err != nil { + return qualifiedKey, err + } + if qualifiedKey.GetStorageType() != block.StorageTypeS3 { + return qualifiedKey, fmt.Errorf("expected storage type s3: %w", block.ErrInvalidAddress) + } + return qualifiedKey, nil +} + +func (a *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + return block.DefaultResolveNamespace(storageNamespace, key, identifierType) +} + +func (a *Adapter) RuntimeStats() map[string]string { + respServer := aws.ToString(a.respServer.Load()) + if respServer == "" { + return nil + } + return map[string]string{ + "resp_server": respServer, + } +} + +func (a *Adapter) managerUpload(ctx context.Context, obj block.ObjectPointer, reader io.Reader, opts block.PutOpts) error { + bucket, key, _, err := a.extractParamsFromObj(obj) + if err != nil { + return err + } + + client := a.clients.Get(ctx, bucket) + uploader := manager.NewUploader(client) + input := &s3.PutObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Body: reader, + } + if opts.StorageClass != nil { + input.StorageClass = types.StorageClass(*opts.StorageClass) + } + if a.ServerSideEncryption != "" { + input.ServerSideEncryption = types.ServerSideEncryption(a.ServerSideEncryption) + } + if a.ServerSideEncryptionKmsKeyID != "" { + input.SSEKMSKeyId = aws.String(a.ServerSideEncryptionKmsKeyID) + } + + output, err := uploader.Upload(ctx, input) + if err != nil { + return err + } + if aws.ToString(output.ETag) == "" { + return ErrMissingETag + } + return nil +} + +func (a *Adapter) extractParamsFromObj(obj block.ObjectPointer) (string, string, block.QualifiedKey, error) { + qk, err := a.ResolveNamespace(obj.StorageNamespace, obj.Identifier, obj.IdentifierType) + if err != nil { + return "", "", nil, err + } + bucket, key := ExtractParamsFromQK(qk) + return bucket, key, qk, nil +} + +func (a *Adapter) registerCaptureServerMiddleware() func(*s3.Options) { + fn := middleware.DeserializeMiddlewareFunc("ResponseServerValue", a.captureServerDeserializeMiddleware) + return s3.WithAPIOptions(func(stack *middleware.Stack) error { + return stack.Deserialize.Add(fn, middleware.After) + }) +} + +func ExtractParamsFromQK(qk block.QualifiedKey) (string, string) { + bucket, prefix, _ := strings.Cut(qk.GetStorageNamespace(), "/") + key := qk.GetKey() + if len(prefix) > 0 { // Avoid situations where prefix is empty or "/" + key = prefix + "/" + key + } + return bucket, key +} diff --git a/block/s3/adapter_test.go b/block/s3/adapter_test.go new file mode 100644 index 00000000..6a368dae --- /dev/null +++ b/block/s3/adapter_test.go @@ -0,0 +1,86 @@ +package s3_test + +import ( + "context" + "net/url" + "regexp" + "testing" + + "github.com/jiaozifs/jiaozifs/block/blocktest" + "github.com/jiaozifs/jiaozifs/block/params" + "github.com/jiaozifs/jiaozifs/block/s3" + "github.com/stretchr/testify/require" +) + +func getS3BlockAdapter(t *testing.T) *s3.Adapter { + s3params := params.S3{ + Region: "us-east-1", + Endpoint: blockURL, + ForcePathStyle: true, + DiscoverBucketRegion: false, + Credentials: params.S3Credentials{ + AccessKeyID: minioTestAccessKeyID, + SecretAccessKey: minioTestSecretAccessKey, + }, + } + adapter, err := s3.NewAdapter(context.Background(), s3params) + if err != nil { + t.Fatal("cannot create s3 adapter: ", err) + } + return adapter +} + +func TestS3Adapter(t *testing.T) { + basePath, err := url.JoinPath("s3://", bucketName) + require.NoError(t, err) + localPath, err := url.JoinPath(basePath, "lakefs") + require.NoError(t, err) + externalPath, err := url.JoinPath(basePath, "external") + require.NoError(t, err) + + adapter := getS3BlockAdapter(t) + blocktest.AdapterTest(t, adapter, localPath, externalPath) +} + +func TestAdapterNamespace(t *testing.T) { + adapter := getS3BlockAdapter(t) + expr, err := regexp.Compile(adapter.GetStorageNamespaceInfo().ValidityRegex) + require.NoError(t, err) + + tests := []struct { + Name string + Namespace string + Success bool + }{ + { + Name: "valid_path", + Namespace: "s3://bucket/path/to/repo1", + Success: true, + }, + { + Name: "double_slash", + Namespace: "s3://bucket/path//to/repo1", + Success: true, + }, + { + Name: "invalid_schema", + Namespace: "s3:/test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_path", + Namespace: "https://test/adls/core/windows/net", + Success: false, + }, + { + Name: "invalid_string", + Namespace: "this is a bad string", + Success: false, + }, + } + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + require.Equal(t, tt.Success, expr.MatchString(tt.Namespace)) + }) + } +} diff --git a/block/s3/client_cache.go b/block/s3/client_cache.go new file mode 100644 index 00000000..d2ae08f6 --- /dev/null +++ b/block/s3/client_cache.go @@ -0,0 +1,147 @@ +package s3 + +import ( + "context" + "sync" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + logging "github.com/ipfs/go-log/v2" + "github.com/jiaozifs/jiaozifs/block/params" +) + +var log = logging.Logger("s3") + +type ( + clientFactory func(region string) *s3.Client + s3RegionGetter func(ctx context.Context, bucket string) (string, error) +) + +type ClientCache struct { + mu sync.Mutex + regionClient map[string]*s3.Client + bucketRegion map[string]string + awsConfig aws.Config + defaultClient *s3.Client + clientFactory clientFactory + s3RegionGetter s3RegionGetter +} + +func NewClientCache(awsConfig aws.Config, params params.S3) *ClientCache { + clientFactory := newClientFactory(awsConfig, WithClientParams(params)) + defaultClient := clientFactory(awsConfig.Region) + clientCache := &ClientCache{ + regionClient: make(map[string]*s3.Client), + bucketRegion: make(map[string]string), + awsConfig: awsConfig, + defaultClient: defaultClient, + clientFactory: clientFactory, + } + clientCache.DiscoverBucketRegion(true) + return clientCache +} + +// newClientFactory returns a function that creates a new S3 client with the given region. +// accepts aws configuration and list of s3 options functions to apply with the s3 client. +// the factory function is used to create a new client for a region when it is not cached. +func newClientFactory(awsConfig aws.Config, s3OptFns ...func(options *s3.Options)) clientFactory { + return func(region string) *s3.Client { + return s3.NewFromConfig(awsConfig, func(options *s3.Options) { + for _, opts := range s3OptFns { + opts(options) + } + options.Region = region + }) + } +} + +func (c *ClientCache) SetClientFactory(clientFactory clientFactory) { + c.clientFactory = clientFactory +} + +func (c *ClientCache) SetS3RegionGetter(s3RegionGetter s3RegionGetter) { + c.s3RegionGetter = s3RegionGetter +} + +func (c *ClientCache) DiscoverBucketRegion(b bool) { + if b { + c.s3RegionGetter = c.getBucketRegionFromAWS + } else { + c.s3RegionGetter = c.getBucketRegionDefault + } +} + +func (c *ClientCache) getBucketRegionFromAWS(ctx context.Context, bucket string) (string, error) { + return manager.GetBucketRegion(ctx, c.defaultClient, bucket) +} + +func (c *ClientCache) getBucketRegionDefault(_ context.Context, _ string) (string, error) { + return c.awsConfig.Region, nil +} + +func (c *ClientCache) Get(ctx context.Context, bucket string) *s3.Client { + client, region := c.cachedClientByBucket(bucket) + if client != nil { + return client + } + + // lookup region if needed + if region == "" { + region = c.refreshBucketRegion(ctx, bucket) + if client, ok := c.cachedClientByRegion(region); ok { + return client + } + } + + // create client and update cache + log.With("region", region).Debug("creating client for region") + client = c.clientFactory(region) + + // re-check if a client was created by another goroutine + // keep using the existing client and discard the new one + c.mu.Lock() + existingClient, existingFound := c.regionClient[region] + if existingFound { + client = existingClient + } else { + c.regionClient[region] = client + } + c.mu.Unlock() + return client +} + +func (c *ClientCache) cachedClientByBucket(bucket string) (*s3.Client, string) { + c.mu.Lock() + defer c.mu.Unlock() + if region, ok := c.bucketRegion[bucket]; ok { + return c.regionClient[region], region + } + return nil, "" +} + +func (c *ClientCache) cachedClientByRegion(region string) (*s3.Client, bool) { + c.mu.Lock() + defer c.mu.Unlock() + client, ok := c.regionClient[region] + return client, ok +} + +func (c *ClientCache) refreshBucketRegion(ctx context.Context, bucket string) string { + region, err := c.s3RegionGetter(ctx, bucket) + if err != nil { + // fallback to default region + region = c.awsConfig.Region + log.With("default_region", region). + Error("Failed to get region for bucket, falling back to default region") + } + // update bucket to region cache + c.mu.Lock() + c.bucketRegion[bucket] = region + c.mu.Unlock() + return region +} + +func (c *ClientCache) GetDefault() *s3.Client { + return c.defaultClient +} diff --git a/block/s3/client_cache_test.go b/block/s3/client_cache_test.go new file mode 100644 index 00000000..3c9255e2 --- /dev/null +++ b/block/s3/client_cache_test.go @@ -0,0 +1,114 @@ +package s3_test + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/aws/aws-sdk-go-v2/config" + awss3 "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/go-test/deep" + "github.com/jiaozifs/jiaozifs/block/params" + "github.com/jiaozifs/jiaozifs/block/s3" +) + +var errRegion = errors.New("failed to get region") + +func TestClientCache(t *testing.T) { + const defaultRegion = "us-west-2" + ctx := context.Background() + cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(defaultRegion)) + require.NoError(t, err) + + tests := []struct { + name string + bucketToRegion map[string]string + bucketCalls []string + regionErrorsIndexes map[int]bool + }{ + { + name: "two_buckets_two_regions", + bucketToRegion: map[string]string{"us-bucket": "us-east-1", "eu-bucket": "eu-west-1"}, + bucketCalls: []string{"us-bucket", "us-bucket", "us-bucket", "eu-bucket", "eu-bucket", "eu-bucket"}, + }, + { + name: "multiple_buckets_two_regions", + bucketToRegion: map[string]string{"us-bucket-1": "us-east-1", "us-bucket-2": "us-east-1", "us-bucket-3": "us-east-1", "eu-bucket-1": "eu-west-1", "eu-bucket-2": "eu-west-1"}, + bucketCalls: []string{"us-bucket-1", "us-bucket-2", "us-bucket-3", "eu-bucket-1", "eu-bucket-2"}, + }, + { + name: "error_on_get_region", + bucketToRegion: map[string]string{"us-bucket": "us-east-1", "eu-bucket": "eu-west-1"}, + bucketCalls: []string{"us-bucket", "us-bucket", "us-bucket", "eu-bucket", "eu-bucket", "eu-bucket"}, + regionErrorsIndexes: map[int]bool{3: true}, + }, + { + name: "all_errors", + bucketToRegion: map[string]string{"us-bucket-1": "us-east-1", "us-bucket-2": "us-east-1", "us-bucket-3": "us-east-1", "eu-bucket-1": "eu-west-1", "eu-bucket-2": "eu-west-1"}, + bucketCalls: []string{"us-bucket-1", "us-bucket-2", "us-bucket-3", "eu-bucket-1", "eu-bucket-2"}, + regionErrorsIndexes: map[int]bool{0: true, 1: true, 2: true, 3: true, 4: true}, + }, + { + name: "alternating_regions", + bucketToRegion: map[string]string{"us-bucket-1": "us-east-1", "us-bucket-2": "us-east-1", "us-bucket-3": "us-east-1", "eu-bucket-1": "eu-west-1", "eu-bucket-2": "eu-west-1"}, + bucketCalls: []string{"us-bucket-1", "eu-bucket-1", "us-bucket-2", "eu-bucket-2", "us-bucket-3", "us-bucket-1", "eu-bucket-1", "us-bucket-2", "eu-bucket-2", "us-bucket-3"}, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var callIdx int + var bucket string + actualClientsCreated := make(map[string]bool) + expectedClientsCreated := make(map[string]bool) + actualRegionFetch := make(map[string]bool) + expectedRegionFetch := make(map[string]bool) + + c := s3.NewClientCache(cfg, params.S3{}) // params are ignored as we use custom client factory + + c.SetClientFactory(func(region string) *awss3.Client { + if actualClientsCreated[region] { + t.Fatalf("client created more than once for a region") + } + actualClientsCreated[region] = true + return awss3.NewFromConfig(cfg, func(o *awss3.Options) { + o.Region = region + }) + }) + + c.SetS3RegionGetter(func(ctx context.Context, bucket string) (string, error) { + if actualRegionFetch[bucket] { + t.Fatalf("region fetched more than once for bucket") + } + actualRegionFetch[bucket] = true + if test.regionErrorsIndexes[callIdx] { + return "", errRegion + } + return test.bucketToRegion[bucket], nil + }) + + alreadyCalled := make(map[string]bool) + for callIdx, bucket = range test.bucketCalls { + expectedRegionFetch[bucket] = true // for every bucket, there should be exactly one region fetch + if _, ok := alreadyCalled[bucket]; !ok { + if test.regionErrorsIndexes[callIdx] { + // if there's an error, a client should be created for the default region + expectedClientsCreated[defaultRegion] = true + } else { + // for every region, a client is created exactly once + expectedClientsCreated[test.bucketToRegion[bucket]] = true + } + } + alreadyCalled[bucket] = true + c.Get(ctx, bucket) + } + if diff := deep.Equal(expectedClientsCreated, actualClientsCreated); diff != nil { + t.Fatal("unexpected client creation count: ", diff) + } + if diff := deep.Equal(expectedRegionFetch, actualRegionFetch); diff != nil { + t.Fatal("unexpected region fetch count. diff: ", diff) + } + }) + } +} diff --git a/block/s3/extract_sse.go b/block/s3/extract_sse.go new file mode 100644 index 00000000..dc0169dc --- /dev/null +++ b/block/s3/extract_sse.go @@ -0,0 +1,84 @@ +package s3 + +import ( + "net/http" + + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +// extractSSHeaderUploadPart extracts the x-amz-server-side-* headers from the given +// UploadPartOutput response. +func extractSSHeaderUploadPart(resp *s3.UploadPartOutput) http.Header { + // x-amz-server-side-* headers + headers := make(http.Header) + if resp.SSECustomerAlgorithm != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Algorithm", *resp.SSECustomerAlgorithm) + } + if resp.SSECustomerKeyMD5 != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Key-Md5", *resp.SSECustomerKeyMD5) + } + if resp.SSEKMSKeyId != nil { + headers.Set("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id", *resp.SSEKMSKeyId) + } + if resp.ServerSideEncryption != "" { + headers.Set("X-Amz-Server-Side-Encryption", string(resp.ServerSideEncryption)) + } + return headers +} + +// extractSSHeaderUploadPartCopy extracts the x-amz-server-side-* headers from the given +// UploadPartCopyOutput response. +func extractSSHeaderUploadPartCopy(resp *s3.UploadPartCopyOutput) http.Header { + // x-amz-server-side-* headers + headers := make(http.Header) + if resp.SSECustomerAlgorithm != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Algorithm", *resp.SSECustomerAlgorithm) + } + if resp.SSECustomerKeyMD5 != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Key-Md5", *resp.SSECustomerKeyMD5) + } + if resp.SSEKMSKeyId != nil { + headers.Set("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id", *resp.SSEKMSKeyId) + } + if resp.ServerSideEncryption != "" { + headers.Set("X-Amz-Server-Side-Encryption", string(resp.ServerSideEncryption)) + } + return headers +} + +// extractSSHeaderCreateMultipartUpload extracts the x-amz-server-side-* headers from the given +// CreateMultipartUploadOutput response. +func extractSSHeaderCreateMultipartUpload(resp *s3.CreateMultipartUploadOutput) http.Header { + // x-amz-server-side-* headers + headers := make(http.Header) + if resp.SSECustomerAlgorithm != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Algorithm", *resp.SSECustomerAlgorithm) + } + if resp.SSECustomerKeyMD5 != nil { + headers.Set("X-Amz-Server-Side-Encryption-Customer-Key-Md5", *resp.SSECustomerKeyMD5) + } + if resp.SSEKMSKeyId != nil { + headers.Set("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id", *resp.SSEKMSKeyId) + } + if resp.ServerSideEncryption != "" { + headers.Set("X-Amz-Server-Side-Encryption", string(resp.ServerSideEncryption)) + } + if resp.SSEKMSEncryptionContext != nil { + headers.Set("X-Amz-Server-Side-Encryption-Context", *resp.SSEKMSEncryptionContext) + } + return headers +} + +// extractSSHeaderCompleteMultipartUpload extracts the x-amz-server-side-* headers from the given +// CompleteMultipartUploadOutput response. +func extractSSHeaderCompleteMultipartUpload(resp *s3.CompleteMultipartUploadOutput) http.Header { + // x-amz-server-side-* headers + headers := make(http.Header) + if resp.SSEKMSKeyId != nil { + headers.Set("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id", *resp.SSEKMSKeyId) + } + if resp.ServerSideEncryption != "" { + headers.Set("X-Amz-Server-Side-Encryption", string(resp.ServerSideEncryption)) + } + return headers +} diff --git a/block/s3/main_test.go b/block/s3/main_test.go new file mode 100644 index 00000000..8fd91a5f --- /dev/null +++ b/block/s3/main_test.go @@ -0,0 +1,86 @@ +package s3_test + +import ( + "context" + "fmt" + "log" + "os" + "testing" + + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/ory/dockertest/v3" +) + +const ( + minioContainerTimeoutSeconds = 10 * 60 // 10 min + bucketName = "bucket1" + minioTestEndpoint = "127.0.0.1" + minioTestAccessKeyID = "Q3AM3UQ867SPQQA43P2F" + minioTestSecretAccessKey = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" +) + +var ( + blockURL string + pool *dockertest.Pool +) + +func newClient(port string) (*minio.Client, error) { + creds := credentials.NewStaticV4(minioTestAccessKeyID, minioTestSecretAccessKey, "") + return minio.New(fmt.Sprintf("%s:%s", minioTestEndpoint, port), &minio.Options{Creds: creds}) +} + +func TestMain(m *testing.M) { + var err error + pool, err = dockertest.NewPool("") + if err != nil { + log.Fatalf("Could not connect to Docker: %s", err) + } + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "minio/minio", + Tag: "RELEASE.2023-06-09T07-32-12Z", + Env: []string{ + fmt.Sprintf("MINIO_ROOT_USER=%s", minioTestAccessKeyID), + fmt.Sprintf("MINIO_ROOT_PASSWORD=%s", minioTestSecretAccessKey), + }, + Cmd: []string{ + "server", + "start", + }, + }) + if err != nil { + panic(err) + } + + // set cleanup + closer := func() { + err := pool.Purge(resource) + if err != nil { + panic("could not purge minio container: " + err.Error()) + } + } + + // expire, just to make sure + err = resource.Expire(minioContainerTimeoutSeconds) + if err != nil { + panic("could not expire minio container: " + err.Error()) + } + + // Create a test client and bucket + client, err := newClient(resource.GetPort("9000/tcp")) + if err != nil { + log.Fatalf("create client: %s", err) + } + blockURL = client.EndpointURL().String() + + err = client.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{ + Region: "us-east-1", + }) + if err != nil { + log.Fatalf("create bucket: %s", err) + } + + code := m.Run() + closer() + os.Exit(code) +} diff --git a/block/s3/stats.go b/block/s3/stats.go new file mode 100644 index 00000000..571a1ba3 --- /dev/null +++ b/block/s3/stats.go @@ -0,0 +1,31 @@ +package s3 + +import ( + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var durationHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "s3_operation_duration_seconds", + Help: "durations of outgoing s3 operations", + }, + []string{"operation", "error"}) + +var requestSizeHistograms = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "s3_operation_size_bytes", + Help: "handled sizes of outgoing s3 operations", + Buckets: prometheus.ExponentialBuckets(1, 10, 10), //nolint: gomnd + }, []string{"operation", "error"}) + +func reportMetrics(operation string, start time.Time, sizeBytes *int64, err *error) { + isErrStr := strconv.FormatBool(*err != nil) + durationHistograms.WithLabelValues(operation, isErrStr).Observe(time.Since(start).Seconds()) + if sizeBytes != nil { + requestSizeHistograms.WithLabelValues(operation, isErrStr).Observe(float64(*sizeBytes)) + } +} diff --git a/block/s3/testdata/chunk250_data500.input b/block/s3/testdata/chunk250_data500.input new file mode 100755 index 00000000..3e858b7d --- /dev/null +++ b/block/s3/testdata/chunk250_data500.input @@ -0,0 +1 @@ +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA \ No newline at end of file diff --git a/block/s3/testdata/chunk250_data500.output b/block/s3/testdata/chunk250_data500.output new file mode 100755 index 00000000..f3e8104e --- /dev/null +++ b/block/s3/testdata/chunk250_data500.output @@ -0,0 +1,6 @@ +fa;chunk-signature=4ffd0dae90275680909132c7a3dc53619e81f5b80b0c6a3a37d1f4c1645ff9ba +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +fa;chunk-signature=88a17fee80f2b96bed975fae93cc365aec4d4a5ee99c7ec3423f688a4c752b7e +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +0;chunk-signature=e35c492bbd038bca16cf479b3cf20ae206969e66ee75cae87a590cf697723504 + diff --git a/block/s3/testdata/chunk250_data510.input b/block/s3/testdata/chunk250_data510.input new file mode 100755 index 00000000..44e098b2 --- /dev/null +++ b/block/s3/testdata/chunk250_data510.input @@ -0,0 +1 @@ +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA \ No newline at end of file diff --git a/block/s3/testdata/chunk250_data510.output b/block/s3/testdata/chunk250_data510.output new file mode 100755 index 00000000..c0faf0e2 --- /dev/null +++ b/block/s3/testdata/chunk250_data510.output @@ -0,0 +1,8 @@ +fa;chunk-signature=38734a4c4d2c4be07584b860262067890ea7ff8ddd90a75956484bc3988b2c08 +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +fa;chunk-signature=0b005f753b4247ca45b1005da539ad6264792219cfefa011018295df70a4f82a +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +a;chunk-signature=4b888cb2aa55e61e204229e384cce455af2439fe7a9e8f82880a6e4f6104a715 +AAAAAAAAAA +0;chunk-signature=62ef81f37a6656d2d4d45eded4c77efaeacd0e92a1ca7a1acc4ecfe6e0670685 + diff --git a/block/s3/testdata/chunk3000_data10.input b/block/s3/testdata/chunk3000_data10.input new file mode 100755 index 00000000..83cfb41e --- /dev/null +++ b/block/s3/testdata/chunk3000_data10.input @@ -0,0 +1 @@ +AAAAAAAAAA \ No newline at end of file diff --git a/block/s3/testdata/chunk3000_data10.output b/block/s3/testdata/chunk3000_data10.output new file mode 100755 index 00000000..aa0a7141 --- /dev/null +++ b/block/s3/testdata/chunk3000_data10.output @@ -0,0 +1,4 @@ +a;chunk-signature=82b3811727818699ea3bfcd68a8fd8c011c9dd72d4e04560b920440b166512c2 +AAAAAAAAAA +0;chunk-signature=966f15e070055c94efba15867341775173656745331cf57b7e5a54dd3a49a94b + diff --git a/block/s3/testdata/chunk5_data0.input b/block/s3/testdata/chunk5_data0.input new file mode 100755 index 00000000..e69de29b diff --git a/block/s3/testdata/chunk5_data0.output b/block/s3/testdata/chunk5_data0.output new file mode 100755 index 00000000..7a4394ba --- /dev/null +++ b/block/s3/testdata/chunk5_data0.output @@ -0,0 +1,2 @@ +0;chunk-signature=6533eceb4501215c93de7db8623da836c1d3308a00dada9f171ba3066aba782e + diff --git a/block/s3/testdata/chunk5_data10.input b/block/s3/testdata/chunk5_data10.input new file mode 100755 index 00000000..83cfb41e --- /dev/null +++ b/block/s3/testdata/chunk5_data10.input @@ -0,0 +1 @@ +AAAAAAAAAA \ No newline at end of file diff --git a/block/s3/testdata/chunk5_data10.output b/block/s3/testdata/chunk5_data10.output new file mode 100755 index 00000000..37f79158 --- /dev/null +++ b/block/s3/testdata/chunk5_data10.output @@ -0,0 +1,6 @@ +5;chunk-signature=688f3581d7837cc4bd0bf9b6c08685fbc4c8ff8c3f97d66ae9d947f50560a116 +AAAAA +5;chunk-signature=b931a89dff2f10bf1e825e6619dd675cc97648b1bb79100ff2477f91291aa34f +AAAAA +0;chunk-signature=c48cf0da01fe0ba5ce242ac82354b44223f7235455f2191bb4b29f2e28bfbe46 + diff --git a/block/s3/testdata/chunk600_data240.input b/block/s3/testdata/chunk600_data240.input new file mode 100755 index 00000000..ec3e9248 --- /dev/null +++ b/block/s3/testdata/chunk600_data240.input @@ -0,0 +1 @@ +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA \ No newline at end of file diff --git a/block/s3/testdata/chunk600_data240.output b/block/s3/testdata/chunk600_data240.output new file mode 100755 index 00000000..5cea9aaa --- /dev/null +++ b/block/s3/testdata/chunk600_data240.output @@ -0,0 +1,4 @@ +f0;chunk-signature=8f8478f8eb60b9c93164b7ed36b408f587d09b90bcde4b1081572488344f1f28 +AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA +0;chunk-signature=ee8146fb5aca93e210892961e513d7e003402ea7e89722bdfbd7a9a88ec03ce1 + diff --git a/block/s3/walker.go b/block/s3/walker.go new file mode 100644 index 00000000..5a5de76d --- /dev/null +++ b/block/s3/walker.go @@ -0,0 +1,97 @@ +package s3 + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/jiaozifs/jiaozifs/block" +) + +type Walker struct { + client *s3.Client + mark block.Mark +} + +func NewS3Walker(client *s3.Client) *Walker { + return &Walker{ + client: client, + mark: block.Mark{HasMore: true}, + } +} + +func (s *Walker) Walk(ctx context.Context, storageURI *url.URL, op block.WalkOptions, walkFn func(e block.ObjectStoreEntry) error) error { + var continuation *string + const maxKeys = 1000 + prefix := strings.TrimLeft(storageURI.Path, "/") + + // basePath is the path relative to which the walk is done. The key of the resulting entries will be relative to this path. + // As the original prefix might not end with a separator, it cannot be used for the + // trim purpose, as this will create partial "folder" names. When the basePath is + // trimmed from the key, the remains will be the object name. + // Example: + // Say we have the following keys: + // pref/object + // pref/obj/another + // If we specify prefix="pref/obj" (both keys will be listed) then basePath="pref/" and the trim result + // for the keys will be: + // object + // obj/another + var basePath string + if idx := strings.LastIndex(prefix, "/"); idx != -1 { + basePath = prefix[:idx+1] + } + bucket := storageURI.Host + for { + result, err := s.client.ListObjectsV2(ctx, &s3.ListObjectsV2Input{ + Bucket: aws.String(bucket), + ContinuationToken: continuation, + MaxKeys: aws.Int32(maxKeys), + Prefix: aws.String(prefix), + StartAfter: aws.String(op.After), + }) + if continuation != nil { + s.mark.ContinuationToken = *continuation + } + if err != nil { + return err + } + for _, record := range result.Contents { + key := aws.ToString(record.Key) + addr := fmt.Sprintf("s3://%s/%s", bucket, key) + ent := block.ObjectStoreEntry{ + FullKey: key, + RelativeKey: strings.TrimPrefix(key, basePath), + Address: addr, + ETag: strings.Trim(aws.ToString(record.ETag), "\""), + Mtime: aws.ToTime(record.LastModified), + Size: *record.Size, + } + s.mark.LastKey = key + err := walkFn(ent) + if err != nil { + return err + } + } + if !*result.IsTruncated { //todo maybe panic need to know more about this field + break + } + continuation = result.NextContinuationToken + } + s.mark = block.Mark{ + LastKey: "", + HasMore: false, + } + return nil +} + +func (s *Walker) Marker() block.Mark { + return s.mark +} + +func (s *Walker) GetSkippedEntries() []block.ObjectStoreEntry { + return nil +} diff --git a/block/transient/adapter.go b/block/transient/adapter.go new file mode 100644 index 00000000..090bf0a2 --- /dev/null +++ b/block/transient/adapter.go @@ -0,0 +1,160 @@ +package transient + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io" + "net/http" + "net/url" + "time" + + "github.com/google/uuid" + "github.com/jiaozifs/jiaozifs/block" +) + +type Adapter struct{} + +func New(_ context.Context) *Adapter { + return &Adapter{} +} + +func (a *Adapter) Put(_ context.Context, _ block.ObjectPointer, _ int64, reader io.Reader, _ block.PutOpts) error { + _, err := io.Copy(io.Discard, reader) + return err +} + +func (a *Adapter) Get(_ context.Context, _ block.ObjectPointer, expectedSize int64) (io.ReadCloser, error) { + if expectedSize < 0 { + return nil, io.ErrUnexpectedEOF + } + return io.NopCloser(&io.LimitedReader{R: rand.Reader, N: expectedSize}), nil +} + +func (a *Adapter) GetWalker(_ *url.URL) (block.Walker, error) { + return nil, block.ErrOperationNotSupported +} + +func (a *Adapter) GetPreSignedURL(_ context.Context, _ block.ObjectPointer, _ block.PreSignMode) (string, time.Time, error) { + return "", time.Time{}, block.ErrOperationNotSupported +} + +func (a *Adapter) Exists(_ context.Context, _ block.ObjectPointer) (bool, error) { + return true, nil +} + +func (a *Adapter) GetRange(_ context.Context, _ block.ObjectPointer, startPosition int64, endPosition int64) (io.ReadCloser, error) { + n := endPosition - startPosition + if n < 0 { + return nil, io.ErrUnexpectedEOF + } + reader := &io.LimitedReader{ + R: rand.Reader, + N: n, + } + return io.NopCloser(reader), nil +} + +func (a *Adapter) GetProperties(_ context.Context, _ block.ObjectPointer) (block.Properties, error) { + return block.Properties{}, nil +} + +func (a *Adapter) Remove(_ context.Context, _ block.ObjectPointer) error { + return nil +} + +func (a *Adapter) Copy(_ context.Context, _, _ block.ObjectPointer) error { + return nil +} + +func (a *Adapter) UploadCopyPart(_ context.Context, _, _ block.ObjectPointer, _ string, _ int) (*block.UploadPartResponse, error) { + h := sha256.New() + code := h.Sum(nil) + etag := hex.EncodeToString(code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) UploadCopyPartRange(_ context.Context, _, _ block.ObjectPointer, _ string, _ int, startPosition, endPosition int64) (*block.UploadPartResponse, error) { + n := endPosition - startPosition + if n < 0 { + return nil, io.ErrUnexpectedEOF + } + h := sha256.New() + code := h.Sum(nil) + etag := hex.EncodeToString(code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) CreateMultiPartUpload(_ context.Context, _ block.ObjectPointer, _ *http.Request, _ block.CreateMultiPartUploadOpts) (*block.CreateMultiPartUploadResponse, error) { + uid := uuid.New() + uploadID := hex.EncodeToString(uid[:]) + return &block.CreateMultiPartUploadResponse{ + UploadID: uploadID, + }, nil +} + +func (a *Adapter) UploadPart(_ context.Context, _ block.ObjectPointer, _ int64, reader io.Reader, _ string, _ int) (*block.UploadPartResponse, error) { + data, err := io.ReadAll(reader) + if err != nil { + return nil, err + } + h := sha256.New() + _, err = h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + etag := hex.EncodeToString(code) + return &block.UploadPartResponse{ + ETag: etag, + }, nil +} + +func (a *Adapter) AbortMultiPartUpload(context.Context, block.ObjectPointer, string) error { + return nil +} + +func (a *Adapter) CompleteMultiPartUpload(context.Context, block.ObjectPointer, string, *block.MultipartUploadCompletion) (*block.CompleteMultiPartUploadResponse, error) { + const dataSize = 1024 + data := make([]byte, dataSize) + if _, err := rand.Read(data); err != nil { + return nil, err + } + + h := sha256.New() + _, err := h.Write(data) + if err != nil { + return nil, err + } + code := h.Sum(nil) + codeHex := hex.EncodeToString(code) + return &block.CompleteMultiPartUploadResponse{ + ETag: codeHex, + ContentLength: dataSize, + }, nil +} + +func (a *Adapter) BlockstoreType() string { + return block.BlockstoreTypeTransient +} + +func (a *Adapter) GetStorageNamespaceInfo() block.StorageNamespaceInfo { + info := block.DefaultStorageNamespaceInfo(block.BlockstoreTypeTransient) + info.PreSignSupport = false + info.PreSignSupportUI = false + info.ImportSupport = false + return info +} + +func (a *Adapter) ResolveNamespace(storageNamespace, key string, identifierType block.IdentifierType) (block.QualifiedKey, error) { + return block.DefaultResolveNamespace(storageNamespace, key, identifierType) +} + +func (a *Adapter) RuntimeStats() map[string]string { + return nil +} diff --git a/block/walker.go b/block/walker.go new file mode 100644 index 00000000..59588657 --- /dev/null +++ b/block/walker.go @@ -0,0 +1,79 @@ +package block + +import ( + "context" + "fmt" + "net/url" + "time" +) + +type Walker interface { + Walk(ctx context.Context, storageURI *url.URL, op WalkOptions, walkFn func(e ObjectStoreEntry) error) error + Marker() Mark + GetSkippedEntries() []ObjectStoreEntry +} + +type ObjectStoreEntry struct { + // FullKey represents the fully qualified path in the object store namespace for the given entry + FullKey string `json:"full_key,omitempty"` + // RelativeKey represents a path relative to prefix (or directory). If none specified, will be identical to FullKey + RelativeKey string `json:"relative_key,omitempty"` + // Address is a full URI for the entry, including the storage namespace (i.e. s3://bucket/path/to/key) + Address string `json:"address,omitempty"` + // ETag represents a hash of the entry's content. Generally as hex encoded MD5, + // but depends on the underlying object store + ETag string `json:"etag,omitempty"` + // Mtime is the last-modified datetime of the entry + Mtime time.Time `json:"mtime,omitempty"` + // Size in bytes + Size int64 `json:"size"` +} + +type WalkOptions struct { + // After - all walked items must be greater than After + After string + + // ContinuationToken is passed to the client for efficient listing. Value is Opaque to the caller. + ContinuationToken string +} + +type WalkerOptions struct { + S3EndpointURL string + StorageURI string + SkipOutOfOrder bool +} + +type WalkerWrapper struct { + walker Walker + uri *url.URL +} + +func NewWrapper(walker Walker, uri *url.URL) *WalkerWrapper { + return &WalkerWrapper{ + walker: walker, + uri: uri, + } +} + +func (ww *WalkerWrapper) Walk(ctx context.Context, opts WalkOptions, walkFn func(e ObjectStoreEntry) error) error { + return ww.walker.Walk(ctx, ww.uri, opts, walkFn) +} + +func (ww *WalkerWrapper) Marker() Mark { + return ww.walker.Marker() +} + +func (ww *WalkerWrapper) GetSkippedEntries() []ObjectStoreEntry { + return ww.walker.GetSkippedEntries() +} + +type Mark struct { + ContinuationToken string + LastKey string + HasMore bool +} + +func (e ObjectStoreEntry) String() string { + return fmt.Sprintf("ObjectStoreEntry: {Address:%s, RelativeKey:%s, ETag:%s, Size:%d, Mtime:%s}", + e.Address, e.RelativeKey, e.ETag, e.Size, e.Mtime) +} diff --git a/cmd/daemon.go b/cmd/daemon.go index 556e1945..fbb6d5cb 100644 --- a/cmd/daemon.go +++ b/cmd/daemon.go @@ -3,6 +3,11 @@ package cmd import ( "context" + "github.com/jiaozifs/jiaozifs/block/params" + + "github.com/jiaozifs/jiaozifs/block" + "github.com/jiaozifs/jiaozifs/block/factory" + logging "github.com/ipfs/go-log/v2" apiImpl "github.com/jiaozifs/jiaozifs/api/api_impl" "github.com/jiaozifs/jiaozifs/config" @@ -41,6 +46,9 @@ var daemonCmd = &cobra.Command{ fx_opt.Override(new(*config.Config), cfg), fx_opt.Override(new(*config.APIConfig), &cfg.API), fx_opt.Override(new(*config.DatabaseConfig), &cfg.Database), + fx_opt.Override(new(params.AdapterConfig), &cfg.Blockstore), + //blockstore + fx_opt.Override(new(block.Adapter), factory.BuildBlockAdapter), //database fx_opt.Override(new(*bun.DB), models.SetupDatabase), fx_opt.Override(fx_opt.NextInvoke(), migrations.MigrateDatabase), diff --git a/cmd/helper.go b/cmd/helper.go new file mode 100644 index 00000000..080a3580 --- /dev/null +++ b/cmd/helper.go @@ -0,0 +1,20 @@ +package cmd + +import ( + "github.com/jiaozifs/jiaozifs/api" + "github.com/jiaozifs/jiaozifs/config" +) + +func GetDefaultClient() (*api.Client, error) { + swagger, err := api.GetSwagger() + if err != nil { + return nil, err + } + + //get runtime version + cfg, err := config.LoadConfig(cfgFile) + if err != nil { + return nil, err + } + return api.NewClient(cfg.API.Listen + swagger.Servers[0].URL) +} diff --git a/cmd/init.go b/cmd/init.go index 3168af92..5b9786b4 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -1,7 +1,10 @@ package cmd import ( + "os" + "github.com/jiaozifs/jiaozifs/config" + "github.com/mitchellh/go-homedir" "github.com/spf13/cobra" "github.com/spf13/viper" ) @@ -16,7 +19,16 @@ var initCmd = &cobra.Command{ return viper.BindPFlag("database.connection", cmd.Flags().Lookup("db")) }, RunE: func(cmd *cobra.Command, args []string) error { - return config.InitConfig() + err := config.InitConfig() + if err != nil { + return err + } + //create a blockstore in home path for default usage + defaultBsPath, err := homedir.Expand(config.DefaultLocalBSPath) + if err != nil { + return err + } + return os.MkdirAll(defaultBsPath, 0755) }, } diff --git a/cmd/version.go b/cmd/version.go index 523361c0..5a36093d 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -3,10 +3,9 @@ package cmd import ( "fmt" - "github.com/jiaozifs/jiaozifs/api" - apiimpl "github.com/jiaozifs/jiaozifs/api/api_impl" - "github.com/jiaozifs/jiaozifs/config" "github.com/jiaozifs/jiaozifs/version" + + "github.com/jiaozifs/jiaozifs/api" "github.com/spf13/cobra" ) @@ -23,12 +22,7 @@ var versionCmd = &cobra.Command{ fmt.Println("Version ", version.UserVersion()) fmt.Println("API Version ", swagger.Info.Version) - //get runtime version - cfg, err := config.LoadConfig(cfgFile) - if err != nil { - return err - } - client, err := api.NewClient(cfg.API.Listen + apiimpl.APIV1Prefix) + client, err := GetDefaultClient() if err != nil { return err } @@ -37,10 +31,12 @@ var versionCmd = &cobra.Command{ if err != nil { return err } + okResp, err := api.ParseGetVersionResponse(versionResp) if err != nil { return err } + if okResp.JSON200 == nil { return fmt.Errorf("request version fail %d %s", okResp.HTTPResponse.StatusCode, okResp.HTTPResponse.Body) } diff --git a/config/blockstore.go b/config/blockstore.go new file mode 100644 index 00000000..704093cf --- /dev/null +++ b/config/blockstore.go @@ -0,0 +1,162 @@ +package config + +import ( + "fmt" + "time" + + "github.com/jiaozifs/jiaozifs/block/params" + + "github.com/mitchellh/go-homedir" +) + +type BlockStoreConfig struct { + Type string `mapstructure:"type" validate:"required"` + DefaultNamespacePrefix *string `mapstructure:"default_namespace_prefix"` + + Local *struct { + Path string `mapstructure:"path"` + ImportEnabled bool `mapstructure:"import_enabled"` + ImportHidden bool `mapstructure:"import_hidden"` + AllowedExternalPrefixes []string `mapstructure:"allowed_external_prefixes"` + } `mapstructure:"local"` + S3 *struct { + S3AuthInfo `mapstructure:",squash"` + Region string `mapstructure:"region"` + Endpoint string `mapstructure:"endpoint"` + MaxRetries int `mapstructure:"max_retries"` + ForcePathStyle bool `mapstructure:"force_path_style"` + DiscoverBucketRegion bool `mapstructure:"discover_bucket_region"` + SkipVerifyCertificateTestOnly bool `mapstructure:"skip_verify_certificate_test_only"` + ServerSideEncryption string `mapstructure:"server_side_encryption"` + ServerSideEncryptionKmsKeyID string `mapstructure:"server_side_encryption_kms_key_id"` + PreSignedExpiry time.Duration `mapstructure:"pre_signed_expiry"` + DisablePreSigned bool `mapstructure:"disable_pre_signed"` + DisablePreSignedUI bool `mapstructure:"disable_pre_signed_ui"` + ClientLogRetries bool `mapstructure:"client_log_retries"` + ClientLogRequest bool `mapstructure:"client_log_request"` + WebIdentity *struct { + SessionDuration time.Duration `mapstructure:"session_duration"` + SessionExpiryWindow time.Duration `mapstructure:"session_expiry_window"` + } `mapstructure:"web_identity"` + } `mapstructure:"s3"` + Azure *struct { + TryTimeout time.Duration `mapstructure:"try_timeout"` + StorageAccount string `mapstructure:"storage_account"` + StorageAccessKey string `mapstructure:"storage_access_key"` + PreSignedExpiry time.Duration `mapstructure:"pre_signed_expiry"` + DisablePreSigned bool `mapstructure:"disable_pre_signed"` + DisablePreSignedUI bool `mapstructure:"disable_pre_signed_ui"` + // TestEndpointURL for testing purposes + TestEndpointURL string `mapstructure:"test_endpoint_url"` + } `mapstructure:"azure"` + GS *struct { + S3Endpoint string `mapstructure:"s3_endpoint"` + CredentialsFile string `mapstructure:"credentials_file"` + CredentialsJSON string `mapstructure:"credentials_json"` + PreSignedExpiry time.Duration `mapstructure:"pre_signed_expiry"` + DisablePreSigned bool `mapstructure:"disable_pre_signed"` + DisablePreSignedUI bool `mapstructure:"disable_pre_signed_ui"` + } `mapstructure:"gs"` +} + +func (c *BlockStoreConfig) BlockstoreType() string { + return c.Type +} + +func (c *BlockStoreConfig) BlockstoreS3Params() (params.S3, error) { + var webIdentity *params.S3WebIdentity + if c.S3.WebIdentity != nil { + webIdentity = ¶ms.S3WebIdentity{ + SessionDuration: c.S3.WebIdentity.SessionDuration, + SessionExpiryWindow: c.S3.WebIdentity.SessionExpiryWindow, + } + } + + var creds params.S3Credentials + if c.S3.Credentials != nil { + creds.AccessKeyID = c.S3.Credentials.AccessKeyID.SecureValue() + creds.SecretAccessKey = c.S3.Credentials.SecretAccessKey.SecureValue() + creds.SessionToken = c.S3.Credentials.SessionToken.SecureValue() + } + + return params.S3{ + Region: c.S3.Region, + Profile: c.S3.Profile, + CredentialsFile: c.S3.CredentialsFile, + Credentials: creds, + MaxRetries: c.S3.MaxRetries, + Endpoint: c.S3.Endpoint, + ForcePathStyle: c.S3.ForcePathStyle, + DiscoverBucketRegion: c.S3.DiscoverBucketRegion, + SkipVerifyCertificateTestOnly: c.S3.SkipVerifyCertificateTestOnly, + ServerSideEncryption: c.S3.ServerSideEncryption, + ServerSideEncryptionKmsKeyID: c.S3.ServerSideEncryptionKmsKeyID, + PreSignedExpiry: c.S3.PreSignedExpiry, + DisablePreSigned: c.S3.DisablePreSigned, + DisablePreSignedUI: c.S3.DisablePreSignedUI, + ClientLogRetries: c.S3.ClientLogRetries, + ClientLogRequest: c.S3.ClientLogRequest, + WebIdentity: webIdentity, + }, nil +} + +func (c *BlockStoreConfig) BlockstoreLocalParams() (params.Local, error) { + localPath := c.Local.Path + path, err := homedir.Expand(localPath) + if err != nil { + return params.Local{}, fmt.Errorf("parse blockstore location URI %s: %w", localPath, err) + } + + params := params.Local(*c.Local) + params.Path = path + return params, nil +} + +func (c *BlockStoreConfig) BlockstoreGSParams() (params.GS, error) { + credPath, err := homedir.Expand(c.GS.CredentialsFile) + if err != nil { + return params.GS{}, fmt.Errorf("parse GS credentials path '%s': %w", c.GS.CredentialsFile, err) + } + return params.GS{ + CredentialsFile: credPath, + CredentialsJSON: c.GS.CredentialsJSON, + PreSignedExpiry: c.GS.PreSignedExpiry, + DisablePreSigned: c.GS.DisablePreSigned, + DisablePreSignedUI: c.GS.DisablePreSignedUI, + }, nil +} + +func (c *BlockStoreConfig) BlockstoreAzureParams() (params.Azure, error) { + return params.Azure{ + StorageAccount: c.Azure.StorageAccount, + StorageAccessKey: c.Azure.StorageAccessKey, + TryTimeout: c.Azure.TryTimeout, + PreSignedExpiry: c.Azure.PreSignedExpiry, + TestEndpointURL: c.Azure.TestEndpointURL, + DisablePreSigned: c.Azure.DisablePreSigned, + DisablePreSignedUI: c.Azure.DisablePreSignedUI, + }, nil +} + +type SecureString string + +// String returns an elided version. It is safe to call for logging. +func (SecureString) String() string { + return "[SECRET]" +} + +// SecureValue returns the actual value of s as a string. +func (s SecureString) SecureValue() string { + return string(s) +} + +// S3AuthInfo holds S3-style authentication. +type S3AuthInfo struct { + CredentialsFile string `mapstructure:"credentials_file"` + Profile string + Credentials *struct { + AccessKeyID SecureString `mapstructure:"access_key_id"` + SecretAccessKey SecureString `mapstructure:"secret_access_key"` + SessionToken SecureString `mapstructure:"session_token"` + } +} diff --git a/config/config.go b/config/config.go index 423619e9..0f034e2c 100644 --- a/config/config.go +++ b/config/config.go @@ -2,7 +2,6 @@ package config import ( "fmt" - "os" "path" @@ -16,6 +15,8 @@ type Config struct { Log LogConfig `mapstructure:"log"` API APIConfig `mapstructure:"api"` Database DatabaseConfig `mapstructure:"database"` + + Blockstore BlockStoreConfig `mapstructure:"blockstore"` } type LogConfig struct { @@ -50,7 +51,7 @@ func InitConfig() error { for k, v := range data { viper.SetDefault(k, v) } - err = os.MkdirAll(jiaoziHome, 0777) + err = os.MkdirAll(jiaoziHome, 0755) if err != nil { return err } diff --git a/config/default.go b/config/default.go index bfbb3263..47d6d8bf 100644 --- a/config/default.go +++ b/config/default.go @@ -1,5 +1,7 @@ package config +var DefaultLocalBSPath = "~/.jiaozifs/blockstore" + var defaultCfg = Config{ Path: "~/.jiaozifs/config.toml", Log: LogConfig{ @@ -8,4 +10,19 @@ var defaultCfg = Config{ API: APIConfig{ Listen: "http://127.0.0.1:34913", }, + Blockstore: BlockStoreConfig{ + Type: "local", + DefaultNamespacePrefix: nil, + Local: (*struct { + Path string `mapstructure:"path"` + ImportEnabled bool `mapstructure:"import_enabled"` + ImportHidden bool `mapstructure:"import_hidden"` + AllowedExternalPrefixes []string `mapstructure:"allowed_external_prefixes"` + })(&struct { + Path string + ImportEnabled bool + ImportHidden bool + AllowedExternalPrefixes []string + }{Path: DefaultLocalBSPath, ImportEnabled: false, ImportHidden: false, AllowedExternalPrefixes: nil}), + }, } diff --git a/go.mod b/go.mod index 6fba1d74..519d3f4b 100644 --- a/go.mod +++ b/go.mod @@ -3,71 +3,167 @@ module github.com/jiaozifs/jiaozifs go 1.20 require ( + cloud.google.com/go/storage v1.33.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 + github.com/aws/aws-sdk-go-v2 v1.23.4 + github.com/aws/aws-sdk-go-v2/config v1.25.10 + github.com/aws/aws-sdk-go-v2/credentials v1.16.8 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.1 + github.com/aws/smithy-go v1.18.1 + github.com/benburkert/dns v0.0.0-20190225204957-d356cf78cdfc + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/deepmap/oapi-codegen/v2 v2.0.1-0.20231120160225-add3126ee845 + github.com/fergusstrange/embedded-postgres v1.25.0 github.com/getkin/kin-openapi v0.118.0 github.com/go-chi/chi/v5 v5.0.10 + github.com/go-chi/cors v1.2.1 + github.com/go-test/deep v1.1.0 + github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.4.0 + github.com/hnlq715/golang-lru v0.4.0 github.com/ipfs/go-log/v2 v2.5.1 + github.com/matoous/go-nanoid/v2 v2.0.0 + github.com/minio/minio-go/v7 v7.0.64 + github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.5.0 github.com/oapi-codegen/nethttp-middleware v1.0.1 + github.com/ory/dockertest/v3 v3.10.0 + github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.17.0 + github.com/puzpuzpuz/xsync v1.5.2 github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.17.0 + github.com/stretchr/testify v1.8.4 + github.com/thanhpk/randstr v1.0.6 github.com/uptrace/bun v1.1.16 github.com/uptrace/bun/dialect/pgdialect v1.1.16 github.com/uptrace/bun/driver/pgdriver v1.1.16 + github.com/uptrace/bun/extra/bundebug v1.1.16 go.uber.org/fx v1.20.1 + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 + golang.org/x/oauth2 v0.13.0 + google.golang.org/api v0.147.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.2 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.8 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.7 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.7 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.1 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/containerd/continuity v0.3.0 // indirect + github.com/docker/cli v23.0.6+incompatible // indirect + github.com/docker/docker v23.0.6+incompatible // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/color v1.15.0 // indirect - github.com/fergusstrange/embedded-postgres v1.25.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-chi/cors v1.2.1 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/swag v0.22.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/go-sql-driver/mysql v1.7.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.15 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lib/pq v1.10.9 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/sha256-simd v1.0.1 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect + github.com/opencontainers/runc v1.1.7 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/perimeterx/marshmallow v1.1.4 // indirect - github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 // indirect + github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.11.1 // indirect + github.com/rs/xid v1.5.0 // indirect github.com/sagikazarmark/locafero v0.3.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/stretchr/testify v1.8.4 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc // indirect - github.com/uptrace/bun/extra/bundebug v1.1.16 // indirect github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect - go.uber.org/atomic v1.9.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.17.0 // indirect go.uber.org/multierr v1.9.0 // indirect go.uber.org/zap v1.23.0 // indirect - golang.org/x/crypto v0.13.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.12.0 // indirect - golang.org/x/sys v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/sync v0.4.0 // indirect + golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.13.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c // indirect + google.golang.org/grpc v1.58.3 // indirect + google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect mellium.im/sasl v0.3.1 // indirect diff --git a/go.sum b/go.sum index 40690bbe..27da620d 100644 --- a/go.sum +++ b/go.sum @@ -17,14 +17,22 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -35,12 +43,77 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.33.0 h1:PVrDOkIC8qQVa1P3SXGpQvfuJhN2LHOoyZvWs8D2X5M= +cloud.google.com/go/storage v1.33.0/go.mod h1:Hhh/dogNRGca7IWv1RC2YqEn0c0G77ctA/OxflYkiD8= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0 h1:d81/ng9rET2YqdVkVwkb6EXeRrLJIwyGnJcAlAWKwhs= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.0/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.2.0 h1:Ma67P/GGprNwsslzEH6+Kb8nybI8jpDTm4Wmzu2ReK8= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 h1:gggzg0SUMs6SQbEw+3LoSsYf9YMjkupeAnHMX8O9mmY= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0/go.mod h1:+6KLcKIVgxoBDMqMO/Nvy7bZ9a0nbU3I1DtFQK3YvB4= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1 h1:WpB/QDNLpMw72xHJc34BNNykqSOeEJDAWkhf0u12/Jk= +github.com/AzureAD/microsoft-authentication-library-for-go v1.1.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/aws/aws-sdk-go-v2 v1.23.4 h1:2P20ZjH0ouSAu/6yZep8oCmTReathLuEu6dwoqEgjts= +github.com/aws/aws-sdk-go-v2 v1.23.4/go.mod h1:t3szzKfP0NeRU27uBFczDivYJjsmSnqI8kIvKyWb9ds= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3 h1:Zx9+31KyB8wQna6SXFWOewlgoY5uGdDAu6PTOEU3OQI= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.3/go.mod h1:zxbEJhRdKTH1nqS2qu6UJ7zGe25xaHxZXaC2CvuQFnA= +github.com/aws/aws-sdk-go-v2/config v1.25.10 h1:qw/e8emDtNufTkrAU86DlQ18DruMyyM7ttW6Lgwp4v0= +github.com/aws/aws-sdk-go-v2/config v1.25.10/go.mod h1:203YiAtb6XyoGxXMPsUVwEcuxCiTQY/r8P27IDjfvMc= +github.com/aws/aws-sdk-go-v2/credentials v1.16.8 h1:phw9nRLy/77bPk6Mfu2SHCOnHwfVB7WWrOa5rZIY2Fc= +github.com/aws/aws-sdk-go-v2/credentials v1.16.8/go.mod h1:MrS4SOin6adbO6wgWhdifyPiq+TX7fPPwyA/ZLC1F5M= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.8 h1:tQZLSPC2Zj2CqZHonLmWEvCsbpMX5tQvaYJWHadcPek= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.8/go.mod h1:5+YpvTHDFffykWr5qAGjqwoh8oVYZOddL3sSrEN7lws= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.3 h1:0Pw2ku539I0EugduMpJ+579WRc+38nv8rZhThWjsuYQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.3/go.mod h1:vQtGu6huTQkoEhNgkDeijtYm9Y8HgpQqvGeKUPoEunY= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7 h1:eMqD7ku6WGdmcWWXPYun9m6yk6feSULLhJlAtN6rYG4= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.7/go.mod h1:0oBIfcDV6LScxEW0VgOqxT3e4aqKRp+SYhB9wAd5E3Q= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7 h1:+XYhWhgWs5F3Zx8oa49CXzNvfXrItaDjZB/M172fcHQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.7/go.mod h1:L6tcSRyCGxcKfDWUrmv2jv8G1cLDU7d0FUpEFpG9bVE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.7 h1:3VaUNB1LclLomv82VnP5QnxAfowG+Ro4m82+af9wjZ4= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.7/go.mod h1:D5i0c+qvEY0LV5F4elFZd+mYnvHQbufCLHNHoBfQR2g= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3 h1:e3PCNeEaev/ZF01cQyNZgmYE9oYYePIMJs2mWSKG514= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.3/go.mod h1:gIeeNyaL8tIEqZrzAnTeyhHcE0yysCtcaP+N9kxLZ+E= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.7 h1:Mft1tmIK1fkFS9l9sYVYiN+OdgXeOcQ9ZS3SxKOh3A4= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.7/go.mod h1:QWI83fhocxDaN3b74N8rrvET60CBaike5lQ+5sm3OcE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.7 h1:dU+ZyhvqMB/T/TxjGagHMCdyUiqaThRIaMu3YvKiSQI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.7/go.mod h1:SGORuNqoXyWfTvTp/gBGJfv8jRvW/+nha0XhnIXVI+o= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.7 h1:ybtGXm0qFVFi0hFUF7eFAVnL3ntl9MO7lrxhhGP7KYU= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.7/go.mod h1:BUyWJUKAnNqoEq1LfyQxy+Eh4U8Y3c5w2C6m21f3yvI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.1 h1:0/W5F+LlXzKZ7KTsRcD8pugasVnsrjUWmhOsN/LdSFY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.1/go.mod h1:TqThLn4bRCn/UYf960hNZgPPjmxc17fQcwmjfuG6D5k= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.1 h1:V40g2daNO3l1J94JYwqfkyvQMYXi5I25fs3fNQW8iDs= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.1/go.mod h1:0ZWQJP/mBOUxkCvZKybZNz1XmdUKSBxoF0dzgfxtvDs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.1 h1:uQrj7SpUNC3r55vc1CDh3qV9wJC66lz546xM9dhSo5s= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.1/go.mod h1:oyaTk5xEAOuPXX1kCD7HmIeuLqdj3Bk5yGkqGXtGi14= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.1 h1:K33V7L0XDdb23FMOZySr8bon1jou5SHn1fiv7NJ1SUg= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.1/go.mod h1:YtXUl/sfnS06VksYhr855hTQf2HphfT1Xv/EwuzbPjg= +github.com/aws/smithy-go v1.18.1 h1:pOdBTUfXNazOlxLrgeYalVnuTpKreACHtc62xLwIB3c= +github.com/aws/smithy-go v1.18.1/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benburkert/dns v0.0.0-20190225204957-d356cf78cdfc h1:eyDlmf21vuKN61WoxV2cQLDH/PBDyyjIhUI4kT2o1yM= +github.com/benburkert/dns v0.0.0-20190225204957-d356cf78cdfc/go.mod h1:6ul4nJKqsreAIBK5lUkibcUn2YBU6CvDzlKDH+dtZsQ= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -48,14 +121,28 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen/v2 v2.0.1-0.20231120160225-add3126ee845 h1:QlRkcr1t+VcHkdk8WJDhuiI94RqmjSgtflB3Q+H8X2k= github.com/deepmap/oapi-codegen/v2 v2.0.1-0.20231120160225-add3126ee845/go.mod h1:pB9cROTwrn6Gj3Rtmcmp5fwV23znquC9tY1rR6+/R3s= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/docker/cli v23.0.6+incompatible h1:CScadyCJ2ZKUDpAMZta6vK8I+6/m60VIjGIV7Wg/Eu4= +github.com/docker/cli v23.0.6+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/docker v23.0.6+incompatible h1:aBD4np894vatVX99UTx/GyOUOK4uEcROwA3+bQhEcoU= +github.com/docker/docker v23.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -85,12 +172,21 @@ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= +github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -112,6 +208,9 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -122,13 +221,17 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -140,11 +243,19 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.1 h1:SBWmZhjUDRorQxrN0nwzf+AHBxnbFjViHQS4P0yVpmQ= +github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -152,8 +263,12 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hnlq715/golang-lru v0.4.0 h1:gyo/wIvLE6Upf1wucAfwTjpR+BQ5Lli2766H2MnNPv0= +github.com/hnlq715/golang-lru v0.4.0/go.mod h1:RBkgDAtlu0SgTPvpb4VW2/RQnkCBMRD3Lr6B9RhsAS8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/invopop/yaml v0.1.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= @@ -163,11 +278,23 @@ github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= +github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -176,6 +303,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= @@ -184,24 +313,54 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matoous/go-nanoid v1.5.0/go.mod h1:zyD2a71IubI24efhpvkJz+ZwfwagzgSO6UNiFsZKN7U= +github.com/matoous/go-nanoid/v2 v2.0.0 h1:d19kur2QuLeHmJBkvYkFdhFBzLoo1XVm2GgTpL+9Tj0= +github.com/matoous/go-nanoid/v2 v2.0.0/go.mod h1:FtS4aGPVfEkxKxhdWPAspZpZSh1cOjtM7Ej/So3hR0g= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.64 h1:Zdza8HwOzkld0ZG/og50w56fKi6AAyfqfifmasD9n2Q= +github.com/minio/minio-go/v7 v7.0.64/go.mod h1:R4WVUR6ZTedlCcGwZRauLMIKjgyaWxhs4Mqi/OMPmEc= +github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= +github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/oapi-codegen/nethttp-middleware v1.0.1 h1:ZWvwfnMU0eloHX1VEJmQscQm3741t0vCm0eSIie1NIo= github.com/oapi-codegen/nethttp-middleware v1.0.1/go.mod h1:P7xtAvpoqNB+5obR9qRCeefH7YlXWSK3KgPs/9WB8tE= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.1.7 h1:y2EZDS8sNng4Ksf0GUYNhKbTShZJPJg1FiXJNH/uoCk= +github.com/opencontainers/runc v1.1.7/go.mod h1:CbUumNnWCuTGFukNXahoo/RFBZvDAgRh/smNYNOhA50= +github.com/ory/dockertest/v3 v3.10.0 h1:4K3z2VMe8Woe++invjaTB7VRyQXQy5UY+loujO4aNE4= +github.com/ory/dockertest/v3 v3.10.0/go.mod h1:nr57ZbRWMqfsdGdFNLHz5jjNdDb7VVFnzAeW1n5N1Lg= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/perimeterx/marshmallow v1.1.4 h1:pZLDH9RjlLGGorbXhcaQLhfuV0pFMNfPO55FuFkxqLw= github.com/perimeterx/marshmallow v1.1.4/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -209,14 +368,28 @@ github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qR github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM= +github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= +github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= +github.com/puzpuzpuz/xsync v1.5.2 h1:yRAP4wqSOZG+/4pxJ08fPTwrfL0IzE/LKQ/cw509qGY= +github.com/puzpuzpuz/xsync v1.5.2/go.mod h1:K98BYhX3k1dQ2M63t1YNVDanbwUPmBCAhNmVrrxfiGg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= @@ -231,7 +404,6 @@ github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -245,6 +417,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/thanhpk/randstr v1.0.6 h1:psAOktJFD4vV9NEVb3qkhRSMvYh4ORRaj1+w/hn4B+o= +github.com/thanhpk/randstr v1.0.6/go.mod h1:M/H2P1eNLZzlDwAzpkkkUvoyNNMbzRGhESZuEQk3r0U= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo= @@ -263,6 +437,13 @@ github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9 github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -276,16 +457,16 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= -go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= @@ -300,8 +481,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -369,11 +550,14 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -383,6 +567,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= +golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -394,6 +580,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -431,12 +619,16 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -488,6 +680,7 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -496,6 +689,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -505,6 +699,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -524,12 +720,15 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.147.0 h1:Can3FaQo9LlVqxJCodNmeZW/ib3/qKAY3rFeXiHo5gc= +google.golang.org/api v0.147.0/go.mod h1:pQ/9j83DcmPd/5C9e2nFOdjjNkDZ1G+zkbK2uvdkJMs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -567,6 +766,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= +google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= +google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c h1:jHkCUWkseRf+W+edG5hMzr/Uh1xkDREY4caybAq4dpY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231009173412-8bfb1ae86b6c/go.mod h1:4cYg8o5yUbm77w8ZX00LhMVNl/YVBFJRYWDc0uYWMs0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -583,6 +788,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= +google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -593,6 +800,10 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -609,6 +820,7 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=