1
0
Fork 0
forked from forgejo/forgejo

Add a storage layer for attachments (#11387)

* Add a storage layer for attachments

* Fix some bug

* fix test

* Fix copyright head and lint

* Fix bug

* Add setting for minio and flags for migrate-storage

* Add documents

* fix lint

* Add test for minio store type on attachments

* fix test

* fix test

* Apply suggestions from code review

Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Add warning when storage migrated successfully

* Fix drone

* fix test

* rebase

* Fix test

* display the error on console

* Move minio test to amd64 since minio docker don't support arm64

* refactor the codes

* add trace

* Fix test

* remove log on xorm

* Fi download bug

* Add a storage layer for attachments

* Add setting for minio and flags for migrate-storage

* fix lint

* Add test for minio store type on attachments

* Apply suggestions from code review

Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Fix drone

* fix test

* Fix test

* display the error on console

* Move minio test to amd64 since minio docker don't support arm64

* refactor the codes

* add trace

* Fix test

* Add URL function to serve attachments directly from S3/Minio

* Add ability to enable/disable redirection in attachment configuration

* Fix typo

* Add a storage layer for attachments

* Add setting for minio and flags for migrate-storage

* fix lint

* Add test for minio store type on attachments

* Apply suggestions from code review

Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>

* Fix drone

* fix test

* Fix test

* display the error on console

* Move minio test to amd64 since minio docker don't support arm64

* don't change unrelated files

* Fix lint

* Fix build

* update go.mod and go.sum

* Use github.com/minio/minio-go/v6

* Remove unused function

* Upgrade minio to v7 and some other improvements

* fix lint

* Fix go mod

Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: Tyler <tystuyfzand@gmail.com>
This commit is contained in:
Lunny Xiao 2020-08-18 12:23:45 +08:00 committed by GitHub
parent 02fbe1e5dc
commit 62e6c9bc6c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
330 changed files with 62099 additions and 331 deletions

View file

@ -0,0 +1,214 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/hex"
"encoding/xml"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/signer"
sha256 "github.com/minio/sha256-simd"
)
// AssumeRoleResponse contains the result of successful AssumeRole request.
type AssumeRoleResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleResponse" json:"-"`
Result AssumeRoleResult `xml:"AssumeRoleResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// AssumeRoleResult - Contains the response to a successful AssumeRole
// request, including temporary credentials that can be used to make
// MinIO API requests.
type AssumeRoleResult struct {
// The identifiers for the temporary security credentials that the operation
// returns.
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
// The temporary security credentials, which include an access key ID, a secret
// access key, and a security (or session) token.
//
// Note: The size of the security token that STS APIs return is not fixed. We
// strongly recommend that you make no assumptions about the maximum size. As
// of this writing, the typical size is less than 4096 bytes, but that can vary.
// Also, future updates to AWS might require larger sizes.
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:",omitempty"`
// A percentage value that indicates the size of the policy in packed form.
// The service rejects any policy with a packed size greater than 100 percent,
// which means the policy exceeded the allowed space.
PackedPolicySize int `xml:",omitempty"`
}
// A STSAssumeRole retrieves credentials from MinIO service, and keeps track if
// those credentials are expired.
type STSAssumeRole struct {
Expiry
// Required http Client to use when connecting to MinIO STS service.
Client *http.Client
// STS endpoint to fetch STS credentials.
STSEndpoint string
// various options for this request.
Options STSAssumeRoleOptions
}
// STSAssumeRoleOptions collection of various input options
// to obtain AssumeRole credentials.
type STSAssumeRoleOptions struct {
// Mandatory inputs.
AccessKey string
SecretKey string
Location string // Optional commonly needed with AWS STS.
DurationSeconds int // Optional defaults to 1 hour.
// Optional only valid if using with AWS STS
RoleARN string
RoleSessionName string
}
// NewSTSAssumeRole returns a pointer to a new
// Credentials object wrapping the STSAssumeRole.
func NewSTSAssumeRole(stsEndpoint string, opts STSAssumeRoleOptions) (*Credentials, error) {
if stsEndpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if opts.AccessKey == "" || opts.SecretKey == "" {
return nil, errors.New("AssumeRole credentials access/secretkey is mandatory")
}
return New(&STSAssumeRole{
Client: &http.Client{
Transport: http.DefaultTransport,
},
STSEndpoint: stsEndpoint,
Options: opts,
}), nil
}
const defaultDurationSeconds = 3600
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
// Subsequently this allows golang http RoundTripper
// to re-use the same connection for future requests.
func closeResponse(resp *http.Response) {
// Callers should close resp.Body when done reading from it.
// If resp.Body is not closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
if resp != nil && resp.Body != nil {
// Drain any remaining Body and then close the connection.
// Without this closing connection would disallow re-using
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
io.Copy(ioutil.Discard, resp.Body)
resp.Body.Close()
}
}
func getAssumeRoleCredentials(clnt *http.Client, endpoint string, opts STSAssumeRoleOptions) (AssumeRoleResponse, error) {
v := url.Values{}
v.Set("Action", "AssumeRole")
v.Set("Version", "2011-06-15")
if opts.RoleARN != "" {
v.Set("RoleArn", opts.RoleARN)
}
if opts.RoleSessionName != "" {
v.Set("RoleSessionName", opts.RoleSessionName)
}
if opts.DurationSeconds > defaultDurationSeconds {
v.Set("DurationSeconds", strconv.Itoa(opts.DurationSeconds))
} else {
v.Set("DurationSeconds", strconv.Itoa(defaultDurationSeconds))
}
u, err := url.Parse(endpoint)
if err != nil {
return AssumeRoleResponse{}, err
}
u.Path = "/"
postBody := strings.NewReader(v.Encode())
hash := sha256.New()
if _, err = io.Copy(hash, postBody); err != nil {
return AssumeRoleResponse{}, err
}
postBody.Seek(0, 0)
req, err := http.NewRequest(http.MethodPost, u.String(), postBody)
if err != nil {
return AssumeRoleResponse{}, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(hash.Sum(nil)))
req = signer.SignV4STS(*req, opts.AccessKey, opts.SecretKey, opts.Location)
resp, err := clnt.Do(req)
if err != nil {
return AssumeRoleResponse{}, err
}
defer closeResponse(resp)
if resp.StatusCode != http.StatusOK {
return AssumeRoleResponse{}, errors.New(resp.Status)
}
a := AssumeRoleResponse{}
if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
return AssumeRoleResponse{}, err
}
return a, nil
}
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSAssumeRole) Retrieve() (Value, error) {
a, err := getAssumeRoleCredentials(m.Client, m.STSEndpoint, m.Options)
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken,
SignerType: SignatureV4,
}, nil
}

View file

@ -0,0 +1,89 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
// A Chain will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
// The Chain provides a way of chaining multiple providers together
// which will pick the first available using priority order of the
// Providers in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
// Retrieve() will return the no credentials value.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
// called again after IsExpired() is true.
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
// &credentials.EnvAWSS3{},
// &credentials.EnvMinio{},
// })
//
// // Usage of ChainCredentials.
// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1")
// if err != nil {
// log.Fatalln(err)
// }
//
type Chain struct {
Providers []Provider
curr Provider
}
// NewChainCredentials returns a pointer to a new Credentials object
// wrapping a chain of providers.
func NewChainCredentials(providers []Provider) *Credentials {
return New(&Chain{
Providers: append([]Provider{}, providers...),
})
}
// Retrieve returns the credentials value, returns no credentials(anonymous)
// if no credentials provider returned any value.
//
// If a provider is found with credentials, it will be cached and any calls
// to IsExpired() will return the expired state of the cached provider.
func (c *Chain) Retrieve() (Value, error) {
for _, p := range c.Providers {
creds, _ := p.Retrieve()
// Always prioritize non-anonymous providers, if any.
if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
continue
}
c.curr = p
return creds, nil
}
// At this point we have exhausted all the providers and
// are left without any credentials return anonymous.
return Value{
SignerType: SignatureAnonymous,
}, nil
}
// IsExpired will returned the expired state of the currently cached provider
// if there is one. If there is no current provider, true will be returned.
func (c *Chain) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
}
return true
}

View file

@ -0,0 +1,17 @@
{
"version": "8",
"hosts": {
"play": {
"url": "https://play.min.io",
"accessKey": "Q3AM3UQ867SPQQA43P2F",
"secretKey": "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
"api": "S3v2"
},
"s3": {
"url": "https://s3.amazonaws.com",
"accessKey": "accessKey",
"secretKey": "secret",
"api": "S3v4"
}
}
}

View file

@ -0,0 +1,175 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"sync"
"time"
)
// A Value is the AWS credentials value for individual credential fields.
type Value struct {
// AWS Access key ID
AccessKeyID string
// AWS Secret Access Key
SecretAccessKey string
// AWS Session Token
SessionToken string
// Signature Type.
SignerType SignatureType
}
// A Provider is the interface for any component which will provide credentials
// Value. A provider is required to manage its own Expired state, and what to
// be expired means.
type Provider interface {
// Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
// IsExpired returns if the credentials are no longer valid, and need
// to be retrieved.
IsExpired() bool
}
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
// The best method to use this struct is as an anonymous field within the
// provider's struct.
//
// Example:
// type IAMCredentialProvider struct {
// Expiry
// ...
// }
type Expiry struct {
// The date/time when to expire on
expiration time.Time
// If set will be used by IsExpired to determine the current time.
// Defaults to time.Now if CurrentTime is not set.
CurrentTime func() time.Time
}
// SetExpiration sets the expiration IsExpired will check when called.
//
// If window is greater than 0 the expiration time will be reduced by the
// window value.
//
// Using a window is helpful to trigger credentials to expire sooner than
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
}
// IsExpired returns if the credentials are expired.
func (e *Expiry) IsExpired() bool {
if e.CurrentTime == nil {
e.CurrentTime = time.Now
}
return e.expiration.Before(e.CurrentTime())
}
// Credentials - A container for synchronous safe retrieval of credentials Value.
// Credentials will cache the credentials value until they expire. Once the value
// expires the next Get will attempt to retrieve valid credentials.
//
// Credentials is safe to use across multiple goroutines and will manage the
// synchronous state so the Providers do not need to implement their own
// synchronization.
//
// The first Credentials.Get() will always call Provider.Retrieve() to get the
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
sync.Mutex
creds Value
forceRefresh bool
provider Provider
}
// New returns a pointer to a new Credentials with the provider set.
func New(provider Provider) *Credentials {
return &Credentials{
provider: provider,
forceRefresh: true,
}
}
// Get returns the credentials value, or error if the credentials Value failed
// to be retrieved.
//
// Will return the cached credentials Value if it has not expired. If the
// credentials Value has expired the Provider's Retrieve() will be called
// to refresh the credentials.
//
// If Credentials.Expire() was called the credentials Value will be force
// expired, and the next call to Get() will cause them to be refreshed.
func (c *Credentials) Get() (Value, error) {
c.Lock()
defer c.Unlock()
if c.isExpired() {
creds, err := c.provider.Retrieve()
if err != nil {
return Value{}, err
}
c.creds = creds
c.forceRefresh = false
}
return c.creds, nil
}
// Expire expires the credentials and forces them to be retrieved on the
// next call to Get().
//
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.Lock()
defer c.Unlock()
c.forceRefresh = true
}
// IsExpired returns if the credentials are no longer valid, and need
// to be refreshed.
//
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
c.Lock()
defer c.Unlock()
return c.isExpired()
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired() bool {
return c.forceRefresh || c.provider.IsExpired()
}

View file

@ -0,0 +1,12 @@
[default]
aws_access_key_id = accessKey
aws_secret_access_key = secret
aws_session_token = token
[no_token]
aws_access_key_id = accessKey
aws_secret_access_key = secret
[with_colon]
aws_access_key_id: accessKey
aws_secret_access_key: secret

View file

@ -0,0 +1,62 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package credentials provides credential retrieval and management
// for S3 compatible object storage.
//
// By default the Credentials.Get() will cache the successful result of a
// Provider's Retrieve() until Provider.IsExpired() returns true. At which
// point Credentials will call Provider's Retrieve() to get new credential Value.
//
// The Provider is responsible for determining when credentials have expired.
// It is also important to note that Credentials will always call Retrieve the
// first time Credentials.Get() is called.
//
// Example of using the environment variable credentials.
//
// creds := NewFromEnv()
// // Retrieve the credentials value
// credValue, err := creds.Get()
// if err != nil {
// // handle error
// }
//
// Example of forcing credentials to expire and be refreshed on the next Get().
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
// creds := NewFromIAM("")
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
//
//
// Custom Provider
//
// Each Provider built into this package also provides a helper method to generate
// a Credentials pointer setup with the provider. To use a custom Provider just
// create a type which satisfies the Provider interface and pass it to the
// NewCredentials method.
//
// type MyProvider struct{}
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
// creds := NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials

View file

@ -0,0 +1,71 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "os"
// A EnvAWS retrieves credentials from the environment variables of the
// running process. EnvAWSironment credentials never expire.
//
// EnvAWSironment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY.
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY.
// * Secret Token: AWS_SESSION_TOKEN.
type EnvAWS struct {
retrieved bool
}
// NewEnvAWS returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvAWS() *Credentials {
return New(&EnvAWS{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvAWS) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("AWS_ACCESS_KEY_ID")
if id == "" {
id = os.Getenv("AWS_ACCESS_KEY")
}
secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if secret == "" {
secret = os.Getenv("AWS_SECRET_KEY")
}
signerType := SignatureV4
if id == "" || secret == "" {
signerType = SignatureAnonymous
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
SignerType: signerType,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvAWS) IsExpired() bool {
return !e.retrieved
}

View file

@ -0,0 +1,62 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "os"
// A EnvMinio retrieves credentials from the environment variables of the
// running process. EnvMinioironment credentials never expire.
//
// EnvMinioironment variables used:
//
// * Access Key ID: MINIO_ACCESS_KEY.
// * Secret Access Key: MINIO_SECRET_KEY.
type EnvMinio struct {
retrieved bool
}
// NewEnvMinio returns a pointer to a new Credentials object
// wrapping the environment variable provider.
func NewEnvMinio() *Credentials {
return New(&EnvMinio{})
}
// Retrieve retrieves the keys from the environment.
func (e *EnvMinio) Retrieve() (Value, error) {
e.retrieved = false
id := os.Getenv("MINIO_ACCESS_KEY")
secret := os.Getenv("MINIO_SECRET_KEY")
signerType := SignatureV4
if id == "" || secret == "" {
signerType = SignatureAnonymous
}
e.retrieved = true
return Value{
AccessKeyID: id,
SecretAccessKey: secret,
SignerType: signerType,
}, nil
}
// IsExpired returns if the credentials have been retrieved.
func (e *EnvMinio) IsExpired() bool {
return !e.retrieved
}

View file

@ -0,0 +1,120 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"os"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
ini "gopkg.in/ini.v1"
)
// A FileAWSCredentials retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Profile ini file example: $HOME/.aws/credentials
type FileAWSCredentials struct {
// Path to the shared credentials file.
//
// If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.aws/credentials"
// Windows: "%USERPROFILE%\.aws\credentials"
Filename string
// AWS Profile to extract credentials from the shared credentials file. If empty
// will default to environment variable "AWS_PROFILE" or "default" if
// environment variable is also not set.
Profile string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewFileAWSCredentials returns a pointer to a new Credentials object
// wrapping the Profile file provider.
func NewFileAWSCredentials(filename string, profile string) *Credentials {
return New(&FileAWSCredentials{
Filename: filename,
Profile: profile,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *FileAWSCredentials) Retrieve() (Value, error) {
if p.Filename == "" {
p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
if p.Filename == "" {
homeDir, err := homedir.Dir()
if err != nil {
return Value{}, err
}
p.Filename = filepath.Join(homeDir, ".aws", "credentials")
}
}
if p.Profile == "" {
p.Profile = os.Getenv("AWS_PROFILE")
if p.Profile == "" {
p.Profile = "default"
}
}
p.retrieved = false
iniProfile, err := loadProfile(p.Filename, p.Profile)
if err != nil {
return Value{}, err
}
// Default to empty string if not found.
id := iniProfile.Key("aws_access_key_id")
// Default to empty string if not found.
secret := iniProfile.Key("aws_secret_access_key")
// Default to empty string if not found.
token := iniProfile.Key("aws_session_token")
p.retrieved = true
return Value{
AccessKeyID: id.String(),
SecretAccessKey: secret.String(),
SessionToken: token.String(),
SignerType: SignatureV4,
}, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *FileAWSCredentials) IsExpired() bool {
return !p.retrieved
}
// loadProfiles loads from the file pointed to by shared credentials filename for profile.
// The credentials retrieved from the profile will be returned or error. Error will be
// returned if it fails to read from the file, or the data is invalid.
func loadProfile(filename, profile string) (*ini.Section, error) {
config, err := ini.Load(filename)
if err != nil {
return nil, err
}
iniProfile, err := config.GetSection(profile)
if err != nil {
return nil, err
}
return iniProfile, nil
}

View file

@ -0,0 +1,135 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
jsoniter "github.com/json-iterator/go"
homedir "github.com/mitchellh/go-homedir"
)
// A FileMinioClient retrieves credentials from the current user's home
// directory, and keeps track if those credentials are expired.
//
// Configuration file example: $HOME/.mc/config.json
type FileMinioClient struct {
// Path to the shared credentials file.
//
// If empty will look for "MINIO_SHARED_CREDENTIALS_FILE" env variable. If the
// env value is empty will default to current user's home directory.
// Linux/OSX: "$HOME/.mc/config.json"
// Windows: "%USERALIAS%\mc\config.json"
Filename string
// MinIO Alias to extract credentials from the shared credentials file. If empty
// will default to environment variable "MINIO_ALIAS" or "default" if
// environment variable is also not set.
Alias string
// retrieved states if the credentials have been successfully retrieved.
retrieved bool
}
// NewFileMinioClient returns a pointer to a new Credentials object
// wrapping the Alias file provider.
func NewFileMinioClient(filename string, alias string) *Credentials {
return New(&FileMinioClient{
Filename: filename,
Alias: alias,
})
}
// Retrieve reads and extracts the shared credentials from the current
// users home directory.
func (p *FileMinioClient) Retrieve() (Value, error) {
if p.Filename == "" {
if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok {
p.Filename = value
} else {
homeDir, err := homedir.Dir()
if err != nil {
return Value{}, err
}
p.Filename = filepath.Join(homeDir, ".mc", "config.json")
if runtime.GOOS == "windows" {
p.Filename = filepath.Join(homeDir, "mc", "config.json")
}
}
}
if p.Alias == "" {
p.Alias = os.Getenv("MINIO_ALIAS")
if p.Alias == "" {
p.Alias = "s3"
}
}
p.retrieved = false
hostCfg, err := loadAlias(p.Filename, p.Alias)
if err != nil {
return Value{}, err
}
p.retrieved = true
return Value{
AccessKeyID: hostCfg.AccessKey,
SecretAccessKey: hostCfg.SecretKey,
SignerType: parseSignatureType(hostCfg.API),
}, nil
}
// IsExpired returns if the shared credentials have expired.
func (p *FileMinioClient) IsExpired() bool {
return !p.retrieved
}
// hostConfig configuration of a host.
type hostConfig struct {
URL string `json:"url"`
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
API string `json:"api"`
}
// config config version.
type config struct {
Version string `json:"version"`
Hosts map[string]hostConfig `json:"hosts"`
}
// loadAliass loads from the file pointed to by shared credentials filename for alias.
// The credentials retrieved from the alias will be returned or error. Error will be
// returned if it fails to read from the file.
func loadAlias(filename, alias string) (hostConfig, error) {
cfg := &config{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
configBytes, err := ioutil.ReadFile(filename)
if err != nil {
return hostConfig{}, err
}
if err = json.Unmarshal(configBytes, cfg); err != nil {
return hostConfig{}, err
}
return cfg.Hosts[alias], nil
}

View file

@ -0,0 +1,326 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"bufio"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"path"
"time"
jsoniter "github.com/json-iterator/go"
)
// DefaultExpiryWindow - Default expiry window.
// ExpiryWindow will allow the credentials to trigger refreshing
// prior to the credentials actually expiring. This is beneficial
// so race conditions with expiring credentials do not cause
// request to fail unexpectedly due to ExpiredTokenException exceptions.
const DefaultExpiryWindow = time.Second * 10 // 10 secs
// A IAM retrieves credentials from the EC2 service, and keeps track if
// those credentials are expired.
type IAM struct {
Expiry
// Required http Client to use when connecting to IAM metadata service.
Client *http.Client
// Custom endpoint to fetch IAM role credentials.
endpoint string
}
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
const (
defaultIAMRoleEndpoint = "http://169.254.169.254"
defaultECSRoleEndpoint = "http://169.254.170.2"
defaultSTSRoleEndpoint = "https://sts.amazonaws.com"
defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials/"
)
// NewIAM returns a pointer to a new Credentials object wrapping the IAM.
func NewIAM(endpoint string) *Credentials {
p := &IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
endpoint: endpoint,
}
return New(p)
}
// Retrieve retrieves credentials from the EC2 service.
// Error will be returned if the request fails, or unable to extract
// the desired
func (m *IAM) Retrieve() (Value, error) {
var roleCreds ec2RoleCredRespBody
var err error
endpoint := m.endpoint
switch {
case len(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE")) > 0:
if len(endpoint) == 0 {
if len(os.Getenv("AWS_REGION")) > 0 {
endpoint = "https://sts." + os.Getenv("AWS_REGION") + ".amazonaws.com"
} else {
endpoint = defaultSTSRoleEndpoint
}
}
creds := &STSWebIdentity{
Client: m.Client,
stsEndpoint: endpoint,
roleARN: os.Getenv("AWS_ROLE_ARN"),
roleSessionName: os.Getenv("AWS_ROLE_SESSION_NAME"),
getWebIDTokenExpiry: func() (*WebIdentityToken, error) {
token, err := ioutil.ReadFile(os.Getenv("AWS_WEB_IDENTITY_TOKEN_FILE"))
if err != nil {
return nil, err
}
return &WebIdentityToken{Token: string(token)}, nil
},
}
stsWebIdentityCreds, err := creds.Retrieve()
if err == nil {
m.SetExpiration(creds.Expiration(), DefaultExpiryWindow)
}
return stsWebIdentityCreds, err
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")) > 0:
if len(endpoint) == 0 {
endpoint = fmt.Sprintf("%s%s", defaultECSRoleEndpoint,
os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"))
}
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint)
case len(os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")) > 0:
if len(endpoint) == 0 {
endpoint = os.Getenv("AWS_CONTAINER_CREDENTIALS_FULL_URI")
var ok bool
if ok, err = isLoopback(endpoint); !ok {
if err == nil {
err = fmt.Errorf("uri host is not a loopback address: %s", endpoint)
}
break
}
}
roleCreds, err = getEcsTaskCredentials(m.Client, endpoint)
default:
roleCreds, err = getCredentials(m.Client, endpoint)
}
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: roleCreds.AccessKeyID,
SecretAccessKey: roleCreds.SecretAccessKey,
SessionToken: roleCreds.Token,
SignerType: SignatureV4,
}, nil
}
// A ec2RoleCredRespBody provides the shape for unmarshaling credential
// request responses.
type ec2RoleCredRespBody struct {
// Success State
Expiration time.Time
AccessKeyID string
SecretAccessKey string
Token string
// Error state
Code string
Message string
// Unused params.
LastUpdated time.Time
Type string
}
// Get the final IAM role URL where the request will
// be sent to fetch the rolling access credentials.
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
func getIAMRoleURL(endpoint string) (*url.URL, error) {
if endpoint == "" {
endpoint = defaultIAMRoleEndpoint
}
u, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
u.Path = defaultIAMSecurityCredsPath
return u, nil
}
// listRoleNames lists of credential role names associated
// with the current EC2 service. If there are no credentials,
// or there is an error making or receiving the request.
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
func listRoleNames(client *http.Client, u *url.URL) ([]string, error) {
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return nil, err
}
resp, err := client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New(resp.Status)
}
credsList := []string{}
s := bufio.NewScanner(resp.Body)
for s.Scan() {
credsList = append(credsList, s.Text())
}
if err := s.Err(); err != nil {
return nil, err
}
return credsList, nil
}
func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
req, err := http.NewRequest(http.MethodGet, endpoint, nil)
if err != nil {
return ec2RoleCredRespBody{}, err
}
resp, err := client.Do(req)
if err != nil {
return ec2RoleCredRespBody{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ec2RoleCredRespBody{}, errors.New(resp.Status)
}
respCreds := ec2RoleCredRespBody{}
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{}, err
}
return respCreds, nil
}
// getCredentials - obtains the credentials from the IAM role name associated with
// the current EC2 service.
//
// If the credentials cannot be found, or there is an error
// reading the response an error will be returned.
func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) {
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
u, err := getIAMRoleURL(endpoint)
if err != nil {
return ec2RoleCredRespBody{}, err
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
roleNames, err := listRoleNames(client, u)
if err != nil {
return ec2RoleCredRespBody{}, err
}
if len(roleNames) == 0 {
return ec2RoleCredRespBody{}, errors.New("No IAM roles attached to this EC2 service")
}
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
// - An instance profile can contain only one IAM role. This limit cannot be increased.
roleName := roleNames[0]
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
// The following command retrieves the security credentials for an
// IAM role named `s3access`.
//
// $ curl http://169.254.169.254/latest/meta-data/iam/security-credentials/s3access
//
u.Path = path.Join(u.Path, roleName)
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return ec2RoleCredRespBody{}, err
}
resp, err := client.Do(req)
if err != nil {
return ec2RoleCredRespBody{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return ec2RoleCredRespBody{}, errors.New(resp.Status)
}
respCreds := ec2RoleCredRespBody{}
if err := jsoniter.NewDecoder(resp.Body).Decode(&respCreds); err != nil {
return ec2RoleCredRespBody{}, err
}
if respCreds.Code != "Success" {
// If an error code was returned something failed requesting the role.
return ec2RoleCredRespBody{}, errors.New(respCreds.Message)
}
return respCreds, nil
}
// isLoopback identifies if a uri's host is on a loopback address
func isLoopback(uri string) (bool, error) {
u, err := url.Parse(uri)
if err != nil {
return false, err
}
host := u.Hostname()
if len(host) == 0 {
return false, fmt.Errorf("can't parse host from uri: %s", uri)
}
ips, err := net.LookupHost(host)
if err != nil {
return false, err
}
for _, ip := range ips {
if !net.ParseIP(ip).IsLoopback() {
return false, nil
}
}
return true, nil
}

View file

@ -0,0 +1,77 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import "strings"
// SignatureType is type of Authorization requested for a given HTTP request.
type SignatureType int
// Different types of supported signatures - default is SignatureV4 or SignatureDefault.
const (
// SignatureDefault is always set to v4.
SignatureDefault SignatureType = iota
SignatureV4
SignatureV2
SignatureV4Streaming
SignatureAnonymous // Anonymous signature signifies, no signature.
)
// IsV2 - is signature SignatureV2?
func (s SignatureType) IsV2() bool {
return s == SignatureV2
}
// IsV4 - is signature SignatureV4?
func (s SignatureType) IsV4() bool {
return s == SignatureV4 || s == SignatureDefault
}
// IsStreamingV4 - is signature SignatureV4Streaming?
func (s SignatureType) IsStreamingV4() bool {
return s == SignatureV4Streaming
}
// IsAnonymous - is signature empty?
func (s SignatureType) IsAnonymous() bool {
return s == SignatureAnonymous
}
// Stringer humanized version of signature type,
// strings returned here are case insensitive.
func (s SignatureType) String() string {
if s.IsV2() {
return "S3v2"
} else if s.IsV4() {
return "S3v4"
} else if s.IsStreamingV4() {
return "S3v4Streaming"
}
return "Anonymous"
}
func parseSignatureType(str string) SignatureType {
if strings.EqualFold(str, "S3v4") {
return SignatureV4
} else if strings.EqualFold(str, "S3v2") {
return SignatureV2
} else if strings.EqualFold(str, "S3v4Streaming") {
return SignatureV4Streaming
}
return SignatureAnonymous
}

View file

@ -0,0 +1,67 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
// A Static is a set of credentials which are set programmatically,
// and will never expire.
type Static struct {
Value
}
// NewStaticV2 returns a pointer to a new Credentials object
// wrapping a static credentials value provider, signature is
// set to v2. If access and secret are not specified then
// regardless of signature type set it Value will return
// as anonymous.
func NewStaticV2(id, secret, token string) *Credentials {
return NewStatic(id, secret, token, SignatureV2)
}
// NewStaticV4 is similar to NewStaticV2 with similar considerations.
func NewStaticV4(id, secret, token string) *Credentials {
return NewStatic(id, secret, token, SignatureV4)
}
// NewStatic returns a pointer to a new Credentials object
// wrapping a static credentials value provider.
func NewStatic(id, secret, token string, signerType SignatureType) *Credentials {
return New(&Static{
Value: Value{
AccessKeyID: id,
SecretAccessKey: secret,
SessionToken: token,
SignerType: signerType,
},
})
}
// Retrieve returns the static credentials.
func (s *Static) Retrieve() (Value, error) {
if s.AccessKeyID == "" || s.SecretAccessKey == "" {
// Anonymous is not an error
return Value{SignerType: SignatureAnonymous}, nil
}
return s.Value, nil
}
// IsExpired returns if the credentials are expired.
//
// For Static, the credentials never expired.
func (s *Static) IsExpired() bool {
return false
}

View file

@ -0,0 +1,162 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/xml"
"errors"
"fmt"
"net/http"
"net/url"
"time"
)
// AssumedRoleUser - The identifiers for the temporary security credentials that
// the operation returns. Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser
type AssumedRoleUser struct {
Arn string
AssumedRoleID string `xml:"AssumeRoleId"`
}
// AssumeRoleWithClientGrantsResponse contains the result of successful AssumeRoleWithClientGrants request.
type AssumeRoleWithClientGrantsResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithClientGrantsResponse" json:"-"`
Result ClientGrantsResult `xml:"AssumeRoleWithClientGrantsResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// ClientGrantsResult - Contains the response to a successful AssumeRoleWithClientGrants
// request, including temporary credentials that can be used to make MinIO API requests.
type ClientGrantsResult struct {
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
Audience string `xml:",omitempty"`
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:",omitempty"`
PackedPolicySize int `xml:",omitempty"`
Provider string `xml:",omitempty"`
SubjectFromClientGrantsToken string `xml:",omitempty"`
}
// ClientGrantsToken - client grants token with expiry.
type ClientGrantsToken struct {
Token string
Expiry int
}
// A STSClientGrants retrieves credentials from MinIO service, and keeps track if
// those credentials are expired.
type STSClientGrants struct {
Expiry
// Required http Client to use when connecting to MinIO STS service.
Client *http.Client
// MinIO endpoint to fetch STS credentials.
stsEndpoint string
// getClientGrantsTokenExpiry function to retrieve tokens
// from IDP This function should return two values one is
// accessToken which is a self contained access token (JWT)
// and second return value is the expiry associated with
// this token. This is a customer provided function and
// is mandatory.
getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)
}
// NewSTSClientGrants returns a pointer to a new
// Credentials object wrapping the STSClientGrants.
func NewSTSClientGrants(stsEndpoint string, getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (*Credentials, error) {
if stsEndpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if getClientGrantsTokenExpiry == nil {
return nil, errors.New("Client grants access token and expiry retrieval function should be defined")
}
return New(&STSClientGrants{
Client: &http.Client{
Transport: http.DefaultTransport,
},
stsEndpoint: stsEndpoint,
getClientGrantsTokenExpiry: getClientGrantsTokenExpiry,
}), nil
}
func getClientGrantsCredentials(clnt *http.Client, endpoint string,
getClientGrantsTokenExpiry func() (*ClientGrantsToken, error)) (AssumeRoleWithClientGrantsResponse, error) {
accessToken, err := getClientGrantsTokenExpiry()
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
v := url.Values{}
v.Set("Action", "AssumeRoleWithClientGrants")
v.Set("Token", accessToken.Token)
v.Set("DurationSeconds", fmt.Sprintf("%d", accessToken.Expiry))
v.Set("Version", "2011-06-15")
u, err := url.Parse(endpoint)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
resp, err := clnt.Do(req)
if err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return AssumeRoleWithClientGrantsResponse{}, errors.New(resp.Status)
}
a := AssumeRoleWithClientGrantsResponse{}
if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
return AssumeRoleWithClientGrantsResponse{}, err
}
return a, nil
}
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSClientGrants) Retrieve() (Value, error) {
a, err := getClientGrantsCredentials(m.Client, m.stsEndpoint, m.getClientGrantsTokenExpiry)
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken,
SignerType: SignatureV4,
}, nil
}

View file

@ -0,0 +1,119 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/xml"
"errors"
"net/http"
"net/url"
"time"
)
// AssumeRoleWithLDAPResponse contains the result of successful
// AssumeRoleWithLDAPIdentity request
type AssumeRoleWithLDAPResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithLDAPIdentityResponse" json:"-"`
Result LDAPIdentityResult `xml:"AssumeRoleWithLDAPIdentityResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// LDAPIdentityResult - contains credentials for a successful
// AssumeRoleWithLDAPIdentity request.
type LDAPIdentityResult struct {
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:",omitempty"`
SubjectFromToken string `xml:",omitempty"`
}
// LDAPIdentity retrieves credentials from MinIO
type LDAPIdentity struct {
Expiry
stsEndpoint string
ldapUsername, ldapPassword string
}
// NewLDAPIdentity returns new credentials object that uses LDAP
// Identity.
func NewLDAPIdentity(stsEndpoint, ldapUsername, ldapPassword string) (*Credentials, error) {
return New(&LDAPIdentity{
stsEndpoint: stsEndpoint,
ldapUsername: ldapUsername,
ldapPassword: ldapPassword,
}), nil
}
// Retrieve gets the credential by calling the MinIO STS API for
// LDAP on the configured stsEndpoint.
func (k *LDAPIdentity) Retrieve() (value Value, err error) {
u, kerr := url.Parse(k.stsEndpoint)
if kerr != nil {
err = kerr
return
}
clnt := &http.Client{Transport: http.DefaultTransport}
v := url.Values{}
v.Set("Action", "AssumeRoleWithLDAPIdentity")
v.Set("Version", "2011-06-15")
v.Set("LDAPUsername", k.ldapUsername)
v.Set("LDAPPassword", k.ldapPassword)
u.RawQuery = v.Encode()
req, kerr := http.NewRequest(http.MethodPost, u.String(), nil)
if kerr != nil {
err = kerr
return
}
resp, kerr := clnt.Do(req)
if kerr != nil {
err = kerr
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
err = errors.New(resp.Status)
return
}
r := AssumeRoleWithLDAPResponse{}
if err = xml.NewDecoder(resp.Body).Decode(&r); err != nil {
return
}
cr := r.Result.Credentials
k.SetExpiration(cr.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: cr.AccessKey,
SecretAccessKey: cr.SecretKey,
SessionToken: cr.SessionToken,
SignerType: SignatureV4,
}, nil
}

View file

@ -0,0 +1,181 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package credentials
import (
"encoding/xml"
"errors"
"fmt"
"net/http"
"net/url"
"strconv"
"time"
)
// AssumeRoleWithWebIdentityResponse contains the result of successful AssumeRoleWithWebIdentity request.
type AssumeRoleWithWebIdentityResponse struct {
XMLName xml.Name `xml:"https://sts.amazonaws.com/doc/2011-06-15/ AssumeRoleWithWebIdentityResponse" json:"-"`
Result WebIdentityResult `xml:"AssumeRoleWithWebIdentityResult"`
ResponseMetadata struct {
RequestID string `xml:"RequestId,omitempty"`
} `xml:"ResponseMetadata,omitempty"`
}
// WebIdentityResult - Contains the response to a successful AssumeRoleWithWebIdentity
// request, including temporary credentials that can be used to make MinIO API requests.
type WebIdentityResult struct {
AssumedRoleUser AssumedRoleUser `xml:",omitempty"`
Audience string `xml:",omitempty"`
Credentials struct {
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
} `xml:",omitempty"`
PackedPolicySize int `xml:",omitempty"`
Provider string `xml:",omitempty"`
SubjectFromWebIdentityToken string `xml:",omitempty"`
}
// WebIdentityToken - web identity token with expiry.
type WebIdentityToken struct {
Token string
Expiry int
}
// A STSWebIdentity retrieves credentials from MinIO service, and keeps track if
// those credentials are expired.
type STSWebIdentity struct {
Expiry
// Required http Client to use when connecting to MinIO STS service.
Client *http.Client
// MinIO endpoint to fetch STS credentials.
stsEndpoint string
// getWebIDTokenExpiry function which returns ID tokens
// from IDP. This function should return two values one
// is ID token which is a self contained ID token (JWT)
// and second return value is the expiry associated with
// this token.
// This is a customer provided function and is mandatory.
getWebIDTokenExpiry func() (*WebIdentityToken, error)
// roleARN is the Amazon Resource Name (ARN) of the role that the caller is
// assuming.
roleARN string
// roleSessionName is the identifier for the assumed role session.
roleSessionName string
}
// NewSTSWebIdentity returns a pointer to a new
// Credentials object wrapping the STSWebIdentity.
func NewSTSWebIdentity(stsEndpoint string, getWebIDTokenExpiry func() (*WebIdentityToken, error)) (*Credentials, error) {
if stsEndpoint == "" {
return nil, errors.New("STS endpoint cannot be empty")
}
if getWebIDTokenExpiry == nil {
return nil, errors.New("Web ID token and expiry retrieval function should be defined")
}
return New(&STSWebIdentity{
Client: &http.Client{
Transport: http.DefaultTransport,
},
stsEndpoint: stsEndpoint,
getWebIDTokenExpiry: getWebIDTokenExpiry,
}), nil
}
func getWebIdentityCredentials(clnt *http.Client, endpoint, roleARN, roleSessionName string,
getWebIDTokenExpiry func() (*WebIdentityToken, error)) (AssumeRoleWithWebIdentityResponse, error) {
idToken, err := getWebIDTokenExpiry()
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
v := url.Values{}
v.Set("Action", "AssumeRoleWithWebIdentity")
if len(roleARN) > 0 {
v.Set("RoleArn", roleARN)
if len(roleSessionName) == 0 {
roleSessionName = strconv.FormatInt(time.Now().UnixNano(), 10)
}
v.Set("RoleSessionName", roleSessionName)
}
v.Set("WebIdentityToken", idToken.Token)
if idToken.Expiry > 0 {
v.Set("DurationSeconds", fmt.Sprintf("%d", idToken.Expiry))
}
v.Set("Version", "2011-06-15")
u, err := url.Parse(endpoint)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
u.RawQuery = v.Encode()
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
resp, err := clnt.Do(req)
if err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return AssumeRoleWithWebIdentityResponse{}, errors.New(resp.Status)
}
a := AssumeRoleWithWebIdentityResponse{}
if err = xml.NewDecoder(resp.Body).Decode(&a); err != nil {
return AssumeRoleWithWebIdentityResponse{}, err
}
return a, nil
}
// Retrieve retrieves credentials from the MinIO service.
// Error will be returned if the request fails.
func (m *STSWebIdentity) Retrieve() (Value, error) {
a, err := getWebIdentityCredentials(m.Client, m.stsEndpoint, m.roleARN, m.roleSessionName, m.getWebIDTokenExpiry)
if err != nil {
return Value{}, err
}
// Expiry window is set to 10secs.
m.SetExpiration(a.Result.Credentials.Expiration, DefaultExpiryWindow)
return Value{
AccessKeyID: a.Result.Credentials.AccessKey,
SecretAccessKey: a.Result.Credentials.SecretKey,
SessionToken: a.Result.Credentials.SessionToken,
SignerType: SignatureV4,
}, nil
}
// Expiration returns the expiration time of the credentials
func (m *STSWebIdentity) Expiration() time.Time {
return m.expiration
}

View file

@ -0,0 +1,198 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package encrypt
import (
"crypto/md5"
"encoding/base64"
"errors"
"net/http"
jsoniter "github.com/json-iterator/go"
"golang.org/x/crypto/argon2"
)
const (
// sseGenericHeader is the AWS SSE header used for SSE-S3 and SSE-KMS.
sseGenericHeader = "X-Amz-Server-Side-Encryption"
// sseKmsKeyID is the AWS SSE-KMS key id.
sseKmsKeyID = sseGenericHeader + "-Aws-Kms-Key-Id"
// sseEncryptionContext is the AWS SSE-KMS Encryption Context data.
sseEncryptionContext = sseGenericHeader + "-Encryption-Context"
// sseCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key.
sseCustomerAlgorithm = sseGenericHeader + "-Customer-Algorithm"
// sseCustomerKey is the AWS SSE-C encryption key HTTP header key.
sseCustomerKey = sseGenericHeader + "-Customer-Key"
// sseCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key.
sseCustomerKeyMD5 = sseGenericHeader + "-Customer-Key-MD5"
// sseCopyCustomerAlgorithm is the AWS SSE-C algorithm HTTP header key for CopyObject API.
sseCopyCustomerAlgorithm = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"
// sseCopyCustomerKey is the AWS SSE-C encryption key HTTP header key for CopyObject API.
sseCopyCustomerKey = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"
// sseCopyCustomerKeyMD5 is the AWS SSE-C encryption key MD5 HTTP header key for CopyObject API.
sseCopyCustomerKeyMD5 = "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"
)
// PBKDF creates a SSE-C key from the provided password and salt.
// PBKDF is a password-based key derivation function
// which can be used to derive a high-entropy cryptographic
// key from a low-entropy password and a salt.
type PBKDF func(password, salt []byte) ServerSide
// DefaultPBKDF is the default PBKDF. It uses Argon2id with the
// recommended parameters from the RFC draft (1 pass, 64 MB memory, 4 threads).
var DefaultPBKDF PBKDF = func(password, salt []byte) ServerSide {
sse := ssec{}
copy(sse[:], argon2.IDKey(password, salt, 1, 64*1024, 4, 32))
return sse
}
// Type is the server-side-encryption method. It represents one of
// the following encryption methods:
// - SSE-C: server-side-encryption with customer provided keys
// - KMS: server-side-encryption with managed keys
// - S3: server-side-encryption using S3 storage encryption
type Type string
const (
// SSEC represents server-side-encryption with customer provided keys
SSEC Type = "SSE-C"
// KMS represents server-side-encryption with managed keys
KMS Type = "KMS"
// S3 represents server-side-encryption using S3 storage encryption
S3 Type = "S3"
)
// ServerSide is a form of S3 server-side-encryption.
type ServerSide interface {
// Type returns the server-side-encryption method.
Type() Type
// Marshal adds encryption headers to the provided HTTP headers.
// It marks an HTTP request as server-side-encryption request
// and inserts the required data into the headers.
Marshal(h http.Header)
}
// NewSSE returns a server-side-encryption using S3 storage encryption.
// Using SSE-S3 the server will encrypt the object with server-managed keys.
func NewSSE() ServerSide { return s3{} }
// NewSSEKMS returns a new server-side-encryption using SSE-KMS and the provided Key Id and context.
func NewSSEKMS(keyID string, context interface{}) (ServerSide, error) {
if context == nil {
return kms{key: keyID, hasContext: false}, nil
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
serializedContext, err := json.Marshal(context)
if err != nil {
return nil, err
}
return kms{key: keyID, context: serializedContext, hasContext: true}, nil
}
// NewSSEC returns a new server-side-encryption using SSE-C and the provided key.
// The key must be 32 bytes long.
func NewSSEC(key []byte) (ServerSide, error) {
if len(key) != 32 {
return nil, errors.New("encrypt: SSE-C key must be 256 bit long")
}
sse := ssec{}
copy(sse[:], key)
return sse, nil
}
// SSE transforms a SSE-C copy encryption into a SSE-C encryption.
// It is the inverse of SSECopy(...).
//
// If the provided sse is no SSE-C copy encryption SSE returns
// sse unmodified.
func SSE(sse ServerSide) ServerSide {
if sse == nil || sse.Type() != SSEC {
return sse
}
if sse, ok := sse.(ssecCopy); ok {
return ssec(sse)
}
return sse
}
// SSECopy transforms a SSE-C encryption into a SSE-C copy
// encryption. This is required for SSE-C key rotation or a SSE-C
// copy where the source and the destination should be encrypted.
//
// If the provided sse is no SSE-C encryption SSECopy returns
// sse unmodified.
func SSECopy(sse ServerSide) ServerSide {
if sse == nil || sse.Type() != SSEC {
return sse
}
if sse, ok := sse.(ssec); ok {
return ssecCopy(sse)
}
return sse
}
type ssec [32]byte
func (s ssec) Type() Type { return SSEC }
func (s ssec) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
h.Set(sseCustomerAlgorithm, "AES256")
h.Set(sseCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(sseCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type ssecCopy [32]byte
func (s ssecCopy) Type() Type { return SSEC }
func (s ssecCopy) Marshal(h http.Header) {
keyMD5 := md5.Sum(s[:])
h.Set(sseCopyCustomerAlgorithm, "AES256")
h.Set(sseCopyCustomerKey, base64.StdEncoding.EncodeToString(s[:]))
h.Set(sseCopyCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMD5[:]))
}
type s3 struct{}
func (s s3) Type() Type { return S3 }
func (s s3) Marshal(h http.Header) { h.Set(sseGenericHeader, "AES256") }
type kms struct {
key string
context []byte
hasContext bool
}
func (s kms) Type() Type { return KMS }
func (s kms) Marshal(h http.Header) {
h.Set(sseGenericHeader, "aws:kms")
if s.key != "" {
h.Set(sseKmsKeyID, s.key)
}
if s.hasContext {
h.Set(sseEncryptionContext, base64.StdEncoding.EncodeToString(s.context))
}
}

View file

@ -0,0 +1,282 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package lifecycle contains all the lifecycle related data types and marshallers.
package lifecycle
import (
"encoding/xml"
"time"
)
// AbortIncompleteMultipartUpload structure, not supported yet on MinIO
type AbortIncompleteMultipartUpload struct {
XMLName xml.Name `xml:"AbortIncompleteMultipartUpload,omitempty" json:"-"`
DaysAfterInitiation ExpirationDays `xml:"DaysAfterInitiation,omitempty" json:"DaysAfterInitiation,omitempty"`
}
// IsDaysNull returns true if days field is null
func (n AbortIncompleteMultipartUpload) IsDaysNull() bool {
return n.DaysAfterInitiation == ExpirationDays(0)
}
// MarshalXML if days after initiation is set to non-zero value
func (n AbortIncompleteMultipartUpload) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.IsDaysNull() {
return nil
}
type abortIncompleteMultipartUploadWrapper AbortIncompleteMultipartUpload
return e.EncodeElement(abortIncompleteMultipartUploadWrapper(n), start)
}
// NoncurrentVersionExpiration - Specifies when noncurrent object versions expire.
// Upon expiration, server permanently deletes the noncurrent object versions.
// Set this lifecycle configuration action on a bucket that has versioning enabled
// (or suspended) to request server delete noncurrent object versions at a
// specific period in the object's lifetime.
type NoncurrentVersionExpiration struct {
XMLName xml.Name `xml:"NoncurrentVersionExpiration" json:"-"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty"`
}
// NoncurrentVersionTransition structure, set this action to request server to
// transition noncurrent object versions to different set storage classes
// at a specific period in the object's lifetime.
type NoncurrentVersionTransition struct {
XMLName xml.Name `xml:"NoncurrentVersionTransition,omitempty" json:"-"`
StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
NoncurrentDays ExpirationDays `xml:"NoncurrentDays,omitempty" json:"NoncurrentDays,omitempty"`
}
// MarshalXML if non-current days not set to non zero value
func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.IsDaysNull() {
return nil
}
type noncurrentVersionExpirationWrapper NoncurrentVersionExpiration
return e.EncodeElement(noncurrentVersionExpirationWrapper(n), start)
}
// IsDaysNull returns true if days field is null
func (n NoncurrentVersionExpiration) IsDaysNull() bool {
return n.NoncurrentDays == ExpirationDays(0)
}
// MarshalXML is extended to leave out
// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if n.NoncurrentDays == ExpirationDays(0) {
return nil
}
return e.EncodeElement(&n, start)
}
// Tag structure key/value pair representing an object tag to apply lifecycle configuration
type Tag struct {
XMLName xml.Name `xml:"Tag,omitempty" json:"-"`
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
Value string `xml:"Value,omitempty" json:"Value,omitempty"`
}
// IsEmpty returns whether this tag is empty or not.
func (tag Tag) IsEmpty() bool {
return tag.Key == ""
}
// Transition structure - transition details of lifecycle configuration
type Transition struct {
XMLName xml.Name `xml:"Transition" json:"-"`
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
}
// IsDaysNull returns true if days field is null
func (t Transition) IsDaysNull() bool {
return t.Days == ExpirationDays(0)
}
// IsDateNull returns true if date field is null
func (t Transition) IsDateNull() bool {
return t.Date.Time.IsZero()
}
// IsNull returns true if both date and days fields are null
func (t Transition) IsNull() bool {
return t.IsDaysNull() && t.IsDateNull()
}
// MarshalXML is transition is non null
func (t Transition) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
if t.IsNull() {
return nil
}
type transitionWrapper Transition
return en.EncodeElement(transitionWrapper(t), startElement)
}
// And And Rule for LifecycleTag, to be used in LifecycleRuleFilter
type And struct {
XMLName xml.Name `xml:"And,omitempty" json:"-"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Tags []Tag `xml:"Tag,omitempty" json:"Tags,omitempty"`
}
// IsEmpty returns true if Tags field is null
func (a And) IsEmpty() bool {
return len(a.Tags) == 0 && a.Prefix == ""
}
// Filter will be used in selecting rule(s) for lifecycle configuration
type Filter struct {
XMLName xml.Name `xml:"Filter" json:"-"`
And And `xml:"And,omitempty" json:"And,omitempty"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Tag Tag `xml:"Tag,omitempty" json:"-"`
}
// MarshalXML - produces the xml representation of the Filter struct
// only one of Prefix, And and Tag should be present in the output.
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if err := e.EncodeToken(start); err != nil {
return err
}
switch {
case !f.And.IsEmpty():
if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
return err
}
case !f.Tag.IsEmpty():
if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
return err
}
default:
// Always print Prefix field when both And & Tag are empty
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
return err
}
}
return e.EncodeToken(xml.EndElement{Name: start.Name})
}
// ExpirationDays is a type alias to unmarshal Days in Expiration
type ExpirationDays int
// MarshalXML encodes number of days to expire if it is non-zero and
// encodes empty string otherwise
func (eDays ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if eDays == 0 {
return nil
}
return e.EncodeElement(int(eDays), startElement)
}
// ExpirationDate is a embedded type containing time.Time to unmarshal
// Date in Expiration
type ExpirationDate struct {
time.Time
}
// MarshalXML encodes expiration date if it is non-zero and encodes
// empty string otherwise
func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if eDate.Time.IsZero() {
return nil
}
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
}
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
type ExpireDeleteMarker bool
// MarshalXML encodes delete marker boolean into an XML form.
func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if !b {
return nil
}
type expireDeleteMarkerWrapper ExpireDeleteMarker
return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement)
}
// Expiration structure - expiration details of lifecycle configuration
type Expiration struct {
XMLName xml.Name `xml:"Expiration,omitempty" json:"-"`
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty"`
}
// IsDaysNull returns true if days field is null
func (e Expiration) IsDaysNull() bool {
return e.Days == ExpirationDays(0)
}
// IsDateNull returns true if date field is null
func (e Expiration) IsDateNull() bool {
return e.Date.Time.IsZero()
}
// IsNull returns true if both date and days fields are null
func (e Expiration) IsNull() bool {
return e.IsDaysNull() && e.IsDateNull()
}
// MarshalXML is expiration is non null
func (e Expiration) MarshalXML(en *xml.Encoder, startElement xml.StartElement) error {
if e.IsNull() {
return nil
}
type expirationWrapper Expiration
return en.EncodeElement(expirationWrapper(e), startElement)
}
// Rule represents a single rule in lifecycle configuration
type Rule struct {
XMLName xml.Name `xml:"Rule,omitempty" json:"-"`
AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty" json:"AbortIncompleteMultipartUpload,omitempty"`
Expiration Expiration `xml:"Expiration,omitempty" json:"Expiration,omitempty"`
ID string `xml:"ID" json:"ID"`
RuleFilter Filter `xml:"Filter,omitempty" json:"Filter,omitempty"`
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty" json:"NoncurrentVersionExpiration,omitempty"`
NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty" json:"NoncurrentVersionTransition,omitempty"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Status string `xml:"Status" json:"Status"`
Transition Transition `xml:"Transition,omitempty" json:"Transition,omitempty"`
}
// Configuration is a collection of Rule objects.
type Configuration struct {
XMLName xml.Name `xml:"LifecycleConfiguration,omitempty" json:"-"`
Rules []Rule `xml:"Rule"`
}
// Empty check if lifecycle configuration is empty
func (c *Configuration) Empty() bool {
if c == nil {
return true
}
return len(c.Rules) == 0
}
// NewConfiguration initializes a fresh lifecycle configuration
// for manipulation, such as setting and removing lifecycle rules
// and filters.
func NewConfiguration() *Configuration {
return &Configuration{}
}

View file

@ -0,0 +1,78 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package notification
// Indentity represents the user id, this is a compliance field.
type identity struct {
PrincipalID string `json:"principalId"`
}
// event bucket metadata.
type bucketMeta struct {
Name string `json:"name"`
OwnerIdentity identity `json:"ownerIdentity"`
ARN string `json:"arn"`
}
// event object metadata.
type objectMeta struct {
Key string `json:"key"`
Size int64 `json:"size,omitempty"`
ETag string `json:"eTag,omitempty"`
ContentType string `json:"contentType,omitempty"`
UserMetadata map[string]string `json:"userMetadata,omitempty"`
VersionID string `json:"versionId,omitempty"`
Sequencer string `json:"sequencer"`
}
// event server specific metadata.
type eventMeta struct {
SchemaVersion string `json:"s3SchemaVersion"`
ConfigurationID string `json:"configurationId"`
Bucket bucketMeta `json:"bucket"`
Object objectMeta `json:"object"`
}
// sourceInfo represents information on the client that
// triggered the event notification.
type sourceInfo struct {
Host string `json:"host"`
Port string `json:"port"`
UserAgent string `json:"userAgent"`
}
// Event represents an Amazon an S3 bucket notification event.
type Event struct {
EventVersion string `json:"eventVersion"`
EventSource string `json:"eventSource"`
AwsRegion string `json:"awsRegion"`
EventTime string `json:"eventTime"`
EventName string `json:"eventName"`
UserIdentity identity `json:"userIdentity"`
RequestParameters map[string]string `json:"requestParameters"`
ResponseElements map[string]string `json:"responseElements"`
S3 eventMeta `json:"s3"`
Source sourceInfo `json:"source"`
}
// Info - represents the collection of notification events, additionally
// also reports errors if any while listening on bucket notifications.
type Info struct {
Records []Event
Err error
}

View file

@ -0,0 +1,385 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package notification
import (
"encoding/xml"
"errors"
"fmt"
"github.com/minio/minio-go/v7/pkg/set"
)
// EventType is a S3 notification event associated to the bucket notification configuration
type EventType string
// The role of all event types are described in :
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
const (
ObjectCreatedAll EventType = "s3:ObjectCreated:*"
ObjectCreatedPut = "s3:ObjectCreated:Put"
ObjectCreatedPost = "s3:ObjectCreated:Post"
ObjectCreatedCopy = "s3:ObjectCreated:Copy"
ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
ObjectAccessedGet = "s3:ObjectAccessed:Get"
ObjectAccessedHead = "s3:ObjectAccessed:Head"
ObjectAccessedAll = "s3:ObjectAccessed:*"
ObjectRemovedAll = "s3:ObjectRemoved:*"
ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
)
// FilterRule - child of S3Key, a tag in the notification xml which
// carries suffix/prefix filters
type FilterRule struct {
Name string `xml:"Name"`
Value string `xml:"Value"`
}
// S3Key - child of Filter, a tag in the notification xml which
// carries suffix/prefix filters
type S3Key struct {
FilterRules []FilterRule `xml:"FilterRule,omitempty"`
}
// Filter - a tag in the notification xml structure which carries
// suffix/prefix filters
type Filter struct {
S3Key S3Key `xml:"S3Key,omitempty"`
}
// Arn - holds ARN information that will be sent to the web service,
// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
type Arn struct {
Partition string
Service string
Region string
AccountID string
Resource string
}
// NewArn creates new ARN based on the given partition, service, region, account id and resource
func NewArn(partition, service, region, accountID, resource string) Arn {
return Arn{Partition: partition,
Service: service,
Region: region,
AccountID: accountID,
Resource: resource}
}
// String returns the string format of the ARN
func (arn Arn) String() string {
return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
}
// Config - represents one single notification configuration
// such as topic, queue or lambda configuration.
type Config struct {
ID string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []EventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`
}
// NewConfig creates one notification config and sets the given ARN
func NewConfig(arn Arn) Config {
return Config{Arn: arn, Filter: &Filter{}}
}
// AddEvents adds one event to the current notification config
func (t *Config) AddEvents(events ...EventType) {
t.Events = append(t.Events, events...)
}
// AddFilterSuffix sets the suffix configuration to the current notification config
func (t *Config) AddFilterSuffix(suffix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
newFilterRule := FilterRule{Name: "suffix", Value: suffix}
// Replace any suffix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
}
// AddFilterPrefix sets the prefix configuration to the current notification config
func (t *Config) AddFilterPrefix(prefix string) {
if t.Filter == nil {
t.Filter = &Filter{}
}
newFilterRule := FilterRule{Name: "prefix", Value: prefix}
// Replace any prefix rule if existing and add to the list otherwise
for index := range t.Filter.S3Key.FilterRules {
if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
t.Filter.S3Key.FilterRules[index] = newFilterRule
return
}
}
t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
}
// EqualEventTypeList tells whether a and b contain the same events
func EqualEventTypeList(a, b []EventType) bool {
if len(a) != len(b) {
return false
}
setA := set.NewStringSet()
for _, i := range a {
setA.Add(string(i))
}
setB := set.NewStringSet()
for _, i := range b {
setB.Add(string(i))
}
return setA.Difference(setB).IsEmpty()
}
// EqualFilterRuleList tells whether a and b contain the same filters
func EqualFilterRuleList(a, b []FilterRule) bool {
if len(a) != len(b) {
return false
}
setA := set.NewStringSet()
for _, i := range a {
setA.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
}
setB := set.NewStringSet()
for _, i := range b {
setB.Add(fmt.Sprintf("%s-%s", i.Name, i.Value))
}
return setA.Difference(setB).IsEmpty()
}
// Equal returns whether this `Config` is equal to another defined by the passed parameters
func (t *Config) Equal(events []EventType, prefix, suffix string) bool {
//Compare events
passEvents := EqualEventTypeList(t.Events, events)
//Compare filters
var newFilter []FilterRule
if prefix != "" {
newFilter = append(newFilter, FilterRule{Name: "prefix", Value: prefix})
}
if suffix != "" {
newFilter = append(newFilter, FilterRule{Name: "suffix", Value: suffix})
}
passFilters := EqualFilterRuleList(t.Filter.S3Key.FilterRules, newFilter)
// if it matches events and filters, mark the index for deletion
return passEvents && passFilters
}
// TopicConfig carries one single topic notification configuration
type TopicConfig struct {
Config
Topic string `xml:"Topic"`
}
// QueueConfig carries one single queue notification configuration
type QueueConfig struct {
Config
Queue string `xml:"Queue"`
}
// LambdaConfig carries one single cloudfunction notification configuration
type LambdaConfig struct {
Config
Lambda string `xml:"CloudFunction"`
}
// Configuration - the struct that represents the whole XML to be sent to the web service
type Configuration struct {
XMLName xml.Name `xml:"NotificationConfiguration"`
LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
}
// AddTopic adds a given topic config to the general bucket notification config
func (b *Configuration) AddTopic(topicConfig Config) bool {
newTopicConfig := TopicConfig{Config: topicConfig, Topic: topicConfig.Arn.String()}
for _, n := range b.TopicConfigs {
// If new config matches existing one
if n.Topic == newTopicConfig.Arn.String() && newTopicConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
}
newConfig := set.NewStringSet()
for _, v := range topicConfig.Events {
newConfig.Add(string(v))
}
if !newConfig.Intersection(existingConfig).IsEmpty() {
return false
}
}
}
b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
return true
}
// AddQueue adds a given queue config to the general bucket notification config
func (b *Configuration) AddQueue(queueConfig Config) bool {
newQueueConfig := QueueConfig{Config: queueConfig, Queue: queueConfig.Arn.String()}
for _, n := range b.QueueConfigs {
if n.Queue == newQueueConfig.Arn.String() && newQueueConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
}
newConfig := set.NewStringSet()
for _, v := range queueConfig.Events {
newConfig.Add(string(v))
}
if !newConfig.Intersection(existingConfig).IsEmpty() {
return false
}
}
}
b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
return true
}
// AddLambda adds a given lambda config to the general bucket notification config
func (b *Configuration) AddLambda(lambdaConfig Config) bool {
newLambdaConfig := LambdaConfig{Config: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
for _, n := range b.LambdaConfigs {
if n.Lambda == newLambdaConfig.Arn.String() && newLambdaConfig.Filter == n.Filter {
existingConfig := set.NewStringSet()
for _, v := range n.Events {
existingConfig.Add(string(v))
}
newConfig := set.NewStringSet()
for _, v := range lambdaConfig.Events {
newConfig.Add(string(v))
}
if !newConfig.Intersection(existingConfig).IsEmpty() {
return false
}
}
}
b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
return true
}
// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
func (b *Configuration) RemoveTopicByArn(arn Arn) {
var topics []TopicConfig
for _, topic := range b.TopicConfigs {
if topic.Topic != arn.String() {
topics = append(topics, topic)
}
}
b.TopicConfigs = topics
}
// ErrNoConfigMatch is returned when a notification configuration (sqs,sns,lambda) is not found when trying to delete
var ErrNoConfigMatch = errors.New("no notification configuration matched")
// RemoveTopicByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
func (b *Configuration) RemoveTopicByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
removeIndex := -1
for i, v := range b.TopicConfigs {
// if it matches events and filters, mark the index for deletion
if v.Topic == arn.String() && v.Config.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}
}
if removeIndex >= 0 {
b.TopicConfigs = append(b.TopicConfigs[:removeIndex], b.TopicConfigs[removeIndex+1:]...)
return nil
}
return ErrNoConfigMatch
}
// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
func (b *Configuration) RemoveQueueByArn(arn Arn) {
var queues []QueueConfig
for _, queue := range b.QueueConfigs {
if queue.Queue != arn.String() {
queues = append(queues, queue)
}
}
b.QueueConfigs = queues
}
// RemoveQueueByArnEventsPrefixSuffix removes a queue configuration that match the exact specified ARN, events, prefix and suffix
func (b *Configuration) RemoveQueueByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
removeIndex := -1
for i, v := range b.QueueConfigs {
// if it matches events and filters, mark the index for deletion
if v.Queue == arn.String() && v.Config.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}
}
if removeIndex >= 0 {
b.QueueConfigs = append(b.QueueConfigs[:removeIndex], b.QueueConfigs[removeIndex+1:]...)
return nil
}
return ErrNoConfigMatch
}
// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
func (b *Configuration) RemoveLambdaByArn(arn Arn) {
var lambdas []LambdaConfig
for _, lambda := range b.LambdaConfigs {
if lambda.Lambda != arn.String() {
lambdas = append(lambdas, lambda)
}
}
b.LambdaConfigs = lambdas
}
// RemoveLambdaByArnEventsPrefixSuffix removes a topic configuration that match the exact specified ARN, events, prefix and suffix
func (b *Configuration) RemoveLambdaByArnEventsPrefixSuffix(arn Arn, events []EventType, prefix, suffix string) error {
removeIndex := -1
for i, v := range b.LambdaConfigs {
// if it matches events and filters, mark the index for deletion
if v.Lambda == arn.String() && v.Config.Equal(events, prefix, suffix) {
removeIndex = i
break // since we have at most one matching config
}
}
if removeIndex >= 0 {
b.LambdaConfigs = append(b.LambdaConfigs[:removeIndex], b.LambdaConfigs[removeIndex+1:]...)
return nil
}
return ErrNoConfigMatch
}

View file

@ -0,0 +1,380 @@
/*
* MinIO Client (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package replication
import (
"bytes"
"encoding/xml"
"fmt"
"strconv"
"strings"
"unicode/utf8"
"github.com/rs/xid"
)
var errInvalidFilter = fmt.Errorf("Invalid filter")
// OptionType specifies operation to be performed on config
type OptionType string
const (
// AddOption specifies addition of rule to config
AddOption OptionType = "Add"
// SetOption specifies modification of existing rule to config
SetOption OptionType = "Set"
// RemoveOption specifies rule options are for removing a rule
RemoveOption OptionType = "Remove"
// ImportOption is for getting current config
ImportOption OptionType = "Import"
)
// Options represents options to set a replication configuration rule
type Options struct {
Op OptionType
ID string
Prefix string
RuleStatus string
Priority string
TagString string
StorageClass string
Arn string
}
// Tags returns a slice of tags for a rule
func (opts Options) Tags() []Tag {
var tagList []Tag
tagTokens := strings.Split(opts.TagString, "&")
for _, tok := range tagTokens {
if tok == "" {
break
}
kv := strings.SplitN(tok, "=", 2)
tagList = append(tagList, Tag{
Key: kv[0],
Value: kv[1],
})
}
return tagList
}
// Config - replication configuration specified in
// https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
type Config struct {
XMLName xml.Name `xml:"ReplicationConfiguration" json:"-"`
Rules []Rule `xml:"Rule" json:"Rules"`
Role string `xml:"Role" json:"Role"`
}
// Empty returns true if config is not set
func (c *Config) Empty() bool {
return len(c.Rules) == 0
}
// AddRule adds a new rule to existing replication config. If a rule exists with the
// same ID, then the rule is replaced.
func (c *Config) AddRule(opts Options) error {
tags := opts.Tags()
andVal := And{
Tags: opts.Tags(),
}
filter := Filter{Prefix: opts.Prefix}
// only a single tag is set.
if opts.Prefix == "" && len(tags) == 1 {
filter.Tag = tags[0]
}
// both prefix and tag are present
if len(andVal.Tags) > 1 || opts.Prefix != "" {
filter.And = andVal
filter.And.Prefix = opts.Prefix
filter.Prefix = ""
}
if opts.ID == "" {
opts.ID = xid.New().String()
}
var status Status
// toggle rule status for edit option
switch opts.RuleStatus {
case "enable":
status = Enabled
case "disable":
status = Disabled
}
arnStr := opts.Arn
if opts.Arn == "" {
arnStr = c.Role
}
tokens := strings.Split(arnStr, ":")
if len(tokens) != 6 {
return fmt.Errorf("invalid format for replication Arn")
}
if c.Role == "" { // for new configurations
c.Role = opts.Arn
}
priority, err := strconv.Atoi(opts.Priority)
if err != nil {
return err
}
newRule := Rule{
ID: opts.ID,
Priority: priority,
Status: status,
Filter: filter,
Destination: Destination{
Bucket: fmt.Sprintf("arn:aws:s3:::%s", tokens[5]),
StorageClass: opts.StorageClass,
},
DeleteMarkerReplication: DeleteMarkerReplication{Status: Disabled},
}
ruleFound := false
for i, rule := range c.Rules {
if rule.Priority == newRule.Priority && rule.ID != newRule.ID {
return fmt.Errorf("Priority must be unique. Replication configuration already has a rule with this priority")
}
if rule.Destination.Bucket != newRule.Destination.Bucket {
return fmt.Errorf("The destination bucket must be same for all rules")
}
if rule.ID != newRule.ID {
continue
}
if opts.Priority == "" && rule.ID == newRule.ID {
// inherit priority from existing rule, required field on server
newRule.Priority = rule.Priority
}
if opts.RuleStatus == "" {
newRule.Status = rule.Status
}
c.Rules[i] = newRule
ruleFound = true
break
}
// validate rule after overlaying priority for pre-existing rule being disabled.
if err := newRule.Validate(); err != nil {
return err
}
if !ruleFound && opts.Op == SetOption {
return fmt.Errorf("Rule with ID %s not found in replication configuration", opts.ID)
}
if !ruleFound {
c.Rules = append(c.Rules, newRule)
}
return nil
}
// RemoveRule removes a rule from replication config.
func (c *Config) RemoveRule(opts Options) error {
var newRules []Rule
for _, rule := range c.Rules {
if rule.ID != opts.ID {
newRules = append(newRules, rule)
}
}
if len(newRules) == 0 {
return fmt.Errorf("Replication configuration should have at least one rule")
}
c.Rules = newRules
return nil
}
// Rule - a rule for replication configuration.
type Rule struct {
XMLName xml.Name `xml:"Rule" json:"-"`
ID string `xml:"ID,omitempty"`
Status Status `xml:"Status"`
Priority int `xml:"Priority"`
DeleteMarkerReplication DeleteMarkerReplication `xml:"DeleteMarkerReplication"`
Destination Destination `xml:"Destination"`
Filter Filter `xml:"Filter" json:"Filter"`
}
// Validate validates the rule for correctness
func (r Rule) Validate() error {
if err := r.validateID(); err != nil {
return err
}
if err := r.validateStatus(); err != nil {
return err
}
if err := r.validateFilter(); err != nil {
return err
}
if r.Priority < 0 && r.Status == Enabled {
return fmt.Errorf("Priority must be set for the rule")
}
return nil
}
// validateID - checks if ID is valid or not.
func (r Rule) validateID() error {
// cannot be longer than 255 characters
if len(r.ID) > 255 {
return fmt.Errorf("ID must be less than 255 characters")
}
return nil
}
// validateStatus - checks if status is valid or not.
func (r Rule) validateStatus() error {
// Status can't be empty
if len(r.Status) == 0 {
return fmt.Errorf("status cannot be empty")
}
// Status must be one of Enabled or Disabled
if r.Status != Enabled && r.Status != Disabled {
return fmt.Errorf("status must be set to either Enabled or Disabled")
}
return nil
}
func (r Rule) validateFilter() error {
if err := r.Filter.Validate(); err != nil {
return err
}
return nil
}
// Prefix - a rule can either have prefix under <filter></filter> or under
// <filter><and></and></filter>. This method returns the prefix from the
// location where it is available
func (r Rule) Prefix() string {
if r.Filter.Prefix != "" {
return r.Filter.Prefix
}
return r.Filter.And.Prefix
}
// Tags - a rule can either have tag under <filter></filter> or under
// <filter><and></and></filter>. This method returns all the tags from the
// rule in the format tag1=value1&tag2=value2
func (r Rule) Tags() string {
if len(r.Filter.And.Tags) != 0 {
var buf bytes.Buffer
for _, t := range r.Filter.And.Tags {
if buf.Len() > 0 {
buf.WriteString("&")
}
buf.WriteString(t.String())
}
return buf.String()
}
return ""
}
// Filter - a filter for a replication configuration Rule.
type Filter struct {
XMLName xml.Name `xml:"Filter" json:"-"`
Prefix string `json:"Prefix,omitempty"`
And And `xml:"And,omitempty" json:"And,omitempty"`
Tag Tag `xml:"Tag,omitempty" json:"Tag,omitempty"`
}
// Validate - validates the filter element
func (f Filter) Validate() error {
// A Filter must have exactly one of Prefix, Tag, or And specified.
if !f.And.isEmpty() {
if f.Prefix != "" {
return errInvalidFilter
}
if !f.Tag.IsEmpty() {
return errInvalidFilter
}
}
if f.Prefix != "" {
if !f.Tag.IsEmpty() {
return errInvalidFilter
}
}
if !f.Tag.IsEmpty() {
if err := f.Tag.Validate(); err != nil {
return err
}
}
return nil
}
// Tag - a tag for a replication configuration Rule filter.
type Tag struct {
XMLName xml.Name `json:"-"`
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
Value string `xml:"Value,omitempty" json:"Value,omitempty"`
}
func (tag Tag) String() string {
return tag.Key + "=" + tag.Value
}
// IsEmpty returns whether this tag is empty or not.
func (tag Tag) IsEmpty() bool {
return tag.Key == ""
}
// Validate checks this tag.
func (tag Tag) Validate() error {
if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
return fmt.Errorf("Invalid Tag Key")
}
if utf8.RuneCountInString(tag.Value) > 256 {
return fmt.Errorf("Invalid Tag Value")
}
return nil
}
// Destination - destination in ReplicationConfiguration.
type Destination struct {
XMLName xml.Name `xml:"Destination" json:"-"`
Bucket string `xml:"Bucket" json:"Bucket"`
StorageClass string `xml:"StorageClass,omitempty" json:"StorageClass,omitempty"`
}
// And - a tag to combine a prefix and multiple tags for replication configuration rule.
type And struct {
XMLName xml.Name `xml:"And,omitempty" json:"-"`
Prefix string `xml:"Prefix,omitempty" json:"Prefix,omitempty"`
Tags []Tag `xml:"Tag,omitempty" json:"Tags,omitempty"`
}
// isEmpty returns true if Tags field is null
func (a And) isEmpty() bool {
return len(a.Tags) == 0 && a.Prefix == ""
}
// Status represents Enabled/Disabled status
type Status string
// Supported status types
const (
Enabled Status = "Enabled"
Disabled Status = "Disabled"
)
// DeleteMarkerReplication - whether delete markers are replicated - https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html
type DeleteMarkerReplication struct {
Status Status `xml:"Status" json:"Status"` // should be set to "Disabled" by default
}
// IsEmpty returns true if DeleteMarkerReplication is not set
func (d DeleteMarkerReplication) IsEmpty() bool {
return len(d.Status) == 0
}

View file

@ -0,0 +1,384 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3utils
import (
"bytes"
"encoding/hex"
"errors"
"net"
"net/url"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
// IsValidDomain validates if input string is a valid domain name.
func IsValidDomain(host string) bool {
// See RFC 1035, RFC 3696.
host = strings.TrimSpace(host)
if len(host) == 0 || len(host) > 255 {
return false
}
// host cannot start or end with "-"
if host[len(host)-1:] == "-" || host[:1] == "-" {
return false
}
// host cannot start or end with "_"
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start with a "."
if host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
return false
}
// No need to regexp match, since the list is non-exhaustive.
// We let it valid and fail later.
return true
}
// IsValidIP parses input string for ip address validity.
func IsValidIP(ip string) bool {
return net.ParseIP(ip) != nil
}
// IsVirtualHostSupported - verifies if bucketName can be part of
// virtual host. Currently only Amazon S3 and Google Cloud Storage
// would support this.
func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
if endpointURL == sentinelURL {
return false
}
// bucketName can be valid but '.' in the hostname will fail SSL
// certificate validation. So do not use host-style for such buckets.
if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
return false
}
// Return true for all other cases
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL) || IsAliyunOSSEndpoint(endpointURL)
}
// Refer for region styles - https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
// amazonS3HostHyphen - regular expression used to determine if an arg is s3 host in hyphenated style.
var amazonS3HostHyphen = regexp.MustCompile(`^s3-(.*?).amazonaws.com$`)
// amazonS3HostDualStack - regular expression used to determine if an arg is s3 host dualstack.
var amazonS3HostDualStack = regexp.MustCompile(`^s3.dualstack.(.*?).amazonaws.com$`)
// amazonS3HostDot - regular expression used to determine if an arg is s3 host in . style.
var amazonS3HostDot = regexp.MustCompile(`^s3.(.*?).amazonaws.com$`)
// amazonS3ChinaHost - regular expression used to determine if the arg is s3 china host.
var amazonS3ChinaHost = regexp.MustCompile(`^s3.(cn.*?).amazonaws.com.cn$`)
// Regular expression used to determine if the arg is elb host.
var elbAmazonRegex = regexp.MustCompile(`elb(.*?).amazonaws.com$`)
// Regular expression used to determine if the arg is elb host in china.
var elbAmazonCnRegex = regexp.MustCompile(`elb(.*?).amazonaws.com.cn$`)
// GetRegionFromURL - returns a region from url host.
func GetRegionFromURL(endpointURL url.URL) string {
if endpointURL == sentinelURL {
return ""
}
if endpointURL.Host == "s3-external-1.amazonaws.com" {
return ""
}
if IsAmazonGovCloudEndpoint(endpointURL) {
return "us-gov-west-1"
}
// if elb's are used we cannot calculate which region it may be, just return empty.
if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) {
return ""
}
parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsAliyunOSSEndpoint - Match if it is exactly Aliyun OSS endpoint.
func IsAliyunOSSEndpoint(endpointURL url.URL) bool {
return strings.HasSuffix(endpointURL.Host, "aliyuncs.com")
}
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
func IsAmazonEndpoint(endpointURL url.URL) bool {
if endpointURL.Host == "s3-external-1.amazonaws.com" || endpointURL.Host == "s3.amazonaws.com" {
return true
}
return GetRegionFromURL(endpointURL) != ""
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" ||
IsAmazonFIPSGovCloudEndpoint(endpointURL))
}
// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint.
// See https://aws.amazon.com/compliance/fips.
func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" ||
endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com"
}
// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint.
// See https://aws.amazon.com/compliance/fips.
func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
switch endpointURL.Host {
case "s3-fips.us-east-2.amazonaws.com":
case "s3-fips.dualstack.us-west-1.amazonaws.com":
case "s3-fips.dualstack.us-west-2.amazonaws.com":
case "s3-fips.dualstack.us-east-2.amazonaws.com":
case "s3-fips.dualstack.us-east-1.amazonaws.com":
case "s3-fips.us-west-1.amazonaws.com":
case "s3-fips.us-west-2.amazonaws.com":
case "s3-fips.us-east-1.amazonaws.com":
default:
return false
}
return true
}
// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint.
// See https://aws.amazon.com/compliance/fips.
func IsAmazonFIPSEndpoint(endpointURL url.URL) bool {
return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL)
}
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "storage.googleapis.com"
}
// Expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
}
// QueryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func QueryEncode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := percentEncodeSlash(EncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(percentEncodeSlash(EncodePath(v)))
}
}
return buf.String()
}
// TagDecode - decodes canonical tag into map of key and value.
func TagDecode(ctag string) map[string]string {
if ctag == "" {
return map[string]string{}
}
tags := strings.Split(ctag, "&")
tagMap := make(map[string]string, len(tags))
var err error
for _, tag := range tags {
kvs := strings.SplitN(tag, "=", 2)
if len(kvs) == 0 {
return map[string]string{}
}
if len(kvs) == 1 {
return map[string]string{}
}
tagMap[kvs[0]], err = url.PathUnescape(kvs[1])
if err != nil {
continue
}
}
return tagMap
}
// TagEncode - encodes tag values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func TagEncode(tags map[string]string) string {
if tags == nil {
return ""
}
values := url.Values{}
for k, v := range tags {
values[k] = []string{v}
}
return QueryEncode(values)
}
// if object matches reserved string, no need to encode them
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func EncodePath(pathName string) string {
if reservedObjectNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.
var (
validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
)
// Common checker for both stricter and basic validation.
func checkBucketNameCommon(bucketName string, strict bool) (err error) {
if strings.TrimSpace(bucketName) == "" {
return errors.New("Bucket name cannot be empty")
}
if len(bucketName) < 3 {
return errors.New("Bucket name cannot be shorter than 3 characters")
}
if len(bucketName) > 63 {
return errors.New("Bucket name cannot be longer than 63 characters")
}
if ipAddress.MatchString(bucketName) {
return errors.New("Bucket name cannot be an ip address")
}
if strings.Contains(bucketName, "..") || strings.Contains(bucketName, ".-") || strings.Contains(bucketName, "-.") {
return errors.New("Bucket name contains invalid characters")
}
if strict {
if !validBucketNameStrict.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
if !validBucketName.MatchString(bucketName) {
err = errors.New("Bucket name contains invalid characters")
}
return err
}
// CheckValidBucketName - checks if we have a valid input bucket name.
func CheckValidBucketName(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, false)
}
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
// This is a stricter version.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func CheckValidBucketNameStrict(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, true)
}
// CheckValidObjectNamePrefix - checks if we have a valid input object name prefix.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectNamePrefix(objectName string) error {
if len(objectName) > 1024 {
return errors.New("Object name cannot be longer than 1024 characters")
}
if !utf8.ValidString(objectName) {
return errors.New("Object name with non UTF-8 strings are not supported")
}
return nil
}
// CheckValidObjectName - checks if we have a valid input object name.
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html
func CheckValidObjectName(objectName string) error {
if strings.TrimSpace(objectName) == "" {
return errors.New("Object name cannot be empty")
}
return CheckValidObjectNamePrefix(objectName)
}

View file

@ -0,0 +1,200 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package set
import (
"fmt"
"sort"
jsoniter "github.com/json-iterator/go"
)
// StringSet - uses map as set of strings.
type StringSet map[string]struct{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
// ToSlice - returns StringSet as string slice.
func (set StringSet) ToSlice() []string {
keys := make([]string, 0, len(set))
for k := range set {
keys = append(keys, k)
}
sort.Strings(keys)
return keys
}
// IsEmpty - returns whether the set is empty or not.
func (set StringSet) IsEmpty() bool {
return len(set) == 0
}
// Add - adds string to the set.
func (set StringSet) Add(s string) {
set[s] = struct{}{}
}
// Remove - removes string in the set. It does nothing if string does not exist in the set.
func (set StringSet) Remove(s string) {
delete(set, s)
}
// Contains - checks if string is in the set.
func (set StringSet) Contains(s string) bool {
_, ok := set[s]
return ok
}
// FuncMatch - returns new set containing each value who passes match function.
// A 'matchFn' should accept element in a set as first argument and
// 'matchString' as second argument. The function can do any logic to
// compare both the arguments and should return true to accept element in
// a set to include in output set else the element is ignored.
func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
nset := NewStringSet()
for k := range set {
if matchFn(k, matchString) {
nset.Add(k)
}
}
return nset
}
// ApplyFunc - returns new set containing each value processed by 'applyFn'.
// A 'applyFn' should accept element in a set as a argument and return
// a processed string. The function can do any logic to return a processed
// string.
func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(applyFn(k))
}
return nset
}
// Equals - checks whether given set is equal to current set or not.
func (set StringSet) Equals(sset StringSet) bool {
// If length of set is not equal to length of given set, the
// set is not equal to given set.
if len(set) != len(sset) {
return false
}
// As both sets are equal in length, check each elements are equal.
for k := range set {
if _, ok := sset[k]; !ok {
return false
}
}
return true
}
// Intersection - returns the intersection with given set as new set.
func (set StringSet) Intersection(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; ok {
nset.Add(k)
}
}
return nset
}
// Difference - returns the difference with given set as new set.
func (set StringSet) Difference(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
if _, ok := sset[k]; !ok {
nset.Add(k)
}
}
return nset
}
// Union - returns the union with given set as new set.
func (set StringSet) Union(sset StringSet) StringSet {
nset := NewStringSet()
for k := range set {
nset.Add(k)
}
for k := range sset {
nset.Add(k)
}
return nset
}
// MarshalJSON - converts to JSON data.
func (set StringSet) MarshalJSON() ([]byte, error) {
return json.Marshal(set.ToSlice())
}
// UnmarshalJSON - parses JSON data and creates new set with it.
// If 'data' contains JSON string array, the set contains each string.
// If 'data' contains JSON string, the set contains the string as one element.
// If 'data' contains Other JSON types, JSON parse error is returned.
func (set *StringSet) UnmarshalJSON(data []byte) error {
sl := []string{}
var err error
if err = json.Unmarshal(data, &sl); err == nil {
*set = make(StringSet)
for _, s := range sl {
set.Add(s)
}
} else {
var s string
if err = json.Unmarshal(data, &s); err == nil {
*set = make(StringSet)
set.Add(s)
}
}
return err
}
// String - returns printable string of the set.
func (set StringSet) String() string {
return fmt.Sprintf("%s", set.ToSlice())
}
// NewStringSet - creates new string set.
func NewStringSet() StringSet {
return make(StringSet)
}
// CreateStringSet - creates new string set with given string values.
func CreateStringSet(sl ...string) StringSet {
set := make(StringSet)
for _, k := range sl {
set.Add(k)
}
return set
}
// CopyStringSet - returns copy of given set.
func CopyStringSet(set StringSet) StringSet {
nset := NewStringSet()
for k, v := range set {
nset[k] = v
}
return nset
}

View file

@ -0,0 +1,306 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package signer
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
)
// Reference for constants used below -
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const (
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
payloadChunkSize = 64 * 1024
chunkSigConstLen = 17 // ";chunk-signature="
signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2"
crlfLen = 2 // CRLF
)
// Request headers to be ignored while calculating seed signature for
// a request.
var ignoredStreamingHeaders = map[string]bool{
"Authorization": true,
"User-Agent": true,
"Content-Type": true,
}
// getSignedChunkLength - calculates the length of chunk metadata
func getSignedChunkLength(chunkDataSize int64) int64 {
return int64(len(fmt.Sprintf("%x", chunkDataSize))) +
chunkSigConstLen +
signatureStrLen +
crlfLen +
chunkDataSize +
crlfLen
}
// getStreamLength - calculates the length of the overall stream (data + metadata)
func getStreamLength(dataLen, chunkSize int64) int64 {
if dataLen <= 0 {
return 0
}
chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize)
streamLen := int64(0)
streamLen += chunksCount * getSignedChunkLength(chunkSize)
if remainingBytes > 0 {
streamLen += getSignedChunkLength(remainingBytes)
}
streamLen += getSignedChunkLength(0)
return streamLen
}
// buildChunkStringToSign - returns the string to sign given chunk data
// and previous signature.
func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string {
stringToSignParts := []string{
streamingPayloadHdr,
t.Format(iso8601DateFormat),
getScope(region, t, ServiceTypeS3),
previousSig,
emptySHA256,
hex.EncodeToString(sum256(chunkData)),
}
return strings.Join(stringToSignParts, "\n")
}
// prepareStreamingRequest - prepares a request with appropriate
// headers before computing the seed signature.
func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int64, timestamp time.Time) {
// Set x-amz-content-sha256 header.
req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm)
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
}
// buildChunkHeader - returns the chunk header.
// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n
func buildChunkHeader(chunkLen int64, signature string) []byte {
return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n")
}
// buildChunkSignature - returns chunk signature for a given chunk and previous signature.
func buildChunkSignature(chunkData []byte, reqTime time.Time, region,
previousSignature, secretAccessKey string) string {
chunkStringToSign := buildChunkStringToSign(reqTime, region,
previousSignature, chunkData)
signingKey := getSigningKey(secretAccessKey, region, reqTime, ServiceTypeS3)
return getSignature(signingKey, chunkStringToSign)
}
// getSeedSignature - returns the seed signature for a given request.
func (s *StreamingReader) setSeedSignature(req *http.Request) {
// Get canonical request
canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders, getHashedPayload(*req))
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest, ServiceTypeS3)
signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime, ServiceTypeS3)
// Calculate signature.
s.seedSignature = getSignature(signingKey, stringToSign)
}
// StreamingReader implements chunked upload signature as a reader on
// top of req.Body's ReaderCloser chunk header;data;... repeat
type StreamingReader struct {
accessKeyID string
secretAccessKey string
sessionToken string
region string
prevSignature string
seedSignature string
contentLen int64 // Content-Length from req header
baseReadCloser io.ReadCloser // underlying io.Reader
bytesRead int64 // bytes read from underlying io.Reader
buf bytes.Buffer // holds signed chunk
chunkBuf []byte // holds raw data read from req Body
chunkBufLen int // no. of bytes read so far into chunkBuf
done bool // done reading the underlying reader to EOF
reqTime time.Time
chunkNum int
totalChunks int
lastChunkSize int
}
// signChunk - signs a chunk read from s.baseReader of chunkLen size.
func (s *StreamingReader) signChunk(chunkLen int) {
// Compute chunk signature for next header
signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime,
s.region, s.prevSignature, s.secretAccessKey)
// For next chunk signature computation
s.prevSignature = signature
// Write chunk header into streaming buffer
chunkHdr := buildChunkHeader(int64(chunkLen), signature)
s.buf.Write(chunkHdr)
// Write chunk data into streaming buffer
s.buf.Write(s.chunkBuf[:chunkLen])
// Write the chunk trailer.
s.buf.Write([]byte("\r\n"))
// Reset chunkBufLen for next chunk read.
s.chunkBufLen = 0
s.chunkNum++
}
// setStreamingAuthHeader - builds and sets authorization header value
// for streaming signature.
func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) {
credential := GetCredential(s.accessKeyID, s.region, s.reqTime, ServiceTypeS3)
authParts := []string{
signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders),
"Signature=" + s.seedSignature,
}
// Set authorization header.
auth := strings.Join(authParts, ",")
req.Header.Set("Authorization", auth)
}
// StreamingSignV4 - provides chunked upload signatureV4 support by
// implementing io.Reader.
func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, sessionToken,
region string, dataLen int64, reqTime time.Time) *http.Request {
// Set headers needed for streaming signature.
prepareStreamingRequest(req, sessionToken, dataLen, reqTime)
if req.Body == nil {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
}
stReader := &StreamingReader{
baseReadCloser: req.Body,
accessKeyID: accessKeyID,
secretAccessKey: secretAccessKey,
sessionToken: sessionToken,
region: region,
reqTime: reqTime,
chunkBuf: make([]byte, payloadChunkSize),
contentLen: dataLen,
chunkNum: 1,
totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1,
lastChunkSize: int(dataLen % payloadChunkSize),
}
// Add the request headers required for chunk upload signing.
// Compute the seed signature.
stReader.setSeedSignature(req)
// Set the authorization header with the seed signature.
stReader.setStreamingAuthHeader(req)
// Set seed signature as prevSignature for subsequent
// streaming signing process.
stReader.prevSignature = stReader.seedSignature
req.Body = stReader
return req
}
// Read - this method performs chunk upload signature providing a
// io.Reader interface.
func (s *StreamingReader) Read(buf []byte) (int, error) {
switch {
// After the last chunk is read from underlying reader, we
// never re-fill s.buf.
case s.done:
// s.buf will be (re-)filled with next chunk when has lesser
// bytes than asked for.
case s.buf.Len() < len(buf):
s.chunkBufLen = 0
for {
n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:])
// Usually we validate `err` first, but in this case
// we are validating n > 0 for the following reasons.
//
// 1. n > 0, err is one of io.EOF, nil (near end of stream)
// A Reader returning a non-zero number of bytes at the end
// of the input stream may return either err == EOF or err == nil
//
// 2. n == 0, err is io.EOF (actual end of stream)
//
// Callers should always process the n > 0 bytes returned
// before considering the error err.
if n1 > 0 {
s.chunkBufLen += n1
s.bytesRead += int64(n1)
if s.chunkBufLen == payloadChunkSize ||
(s.chunkNum == s.totalChunks-1 &&
s.chunkBufLen == s.lastChunkSize) {
// Sign the chunk and write it to s.buf.
s.signChunk(s.chunkBufLen)
break
}
}
if err != nil {
if err == io.EOF {
// No more data left in baseReader - last chunk.
// Done reading the last chunk from baseReader.
s.done = true
// bytes read from baseReader different than
// content length provided.
if s.bytesRead != s.contentLen {
return 0, fmt.Errorf("http: ContentLength=%d with Body length %d", s.contentLen, s.bytesRead)
}
// Sign the chunk and write it to s.buf.
s.signChunk(0)
break
}
return 0, err
}
}
}
return s.buf.Read(buf)
}
// Close - this method makes underlying io.ReadCloser's Close method available.
func (s *StreamingReader) Close() error {
return s.baseReadCloser.Close()
}

View file

@ -0,0 +1,317 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package signer
import (
"bytes"
"crypto/hmac"
"crypto/sha1"
"encoding/base64"
"fmt"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// Signature and API related constants.
const (
signV2Algorithm = "AWS"
)
// Encode input URL path to URL encoded path.
func encodeURL2Path(req *http.Request, virtualHost bool) (path string) {
if virtualHost {
reqHost := getHostAddr(req)
dotPos := strings.Index(reqHost, ".")
if dotPos > -1 {
bucketName := reqHost[:dotPos]
path = "/" + bucketName
path += req.URL.Path
path = s3utils.EncodePath(path)
return
}
}
path = s3utils.EncodePath(req.URL.Path)
return
}
// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64, virtualHost bool) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
d := time.Now().UTC()
// Find epoch expires when the request will expire.
epochExpires := d.Unix() + expires
// Add expires header if not present.
if expiresStr := req.Header.Get("Expires"); expiresStr == "" {
req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10))
}
// Get presigned string to sign.
stringToSign := preStringToSignV2(req, virtualHost)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
// Calculate signature.
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
query := req.URL.Query()
// Handle specially for Google Cloud Storage.
if strings.Contains(getHostAddr(&req), ".storage.googleapis.com") {
query.Set("GoogleAccessId", accessKeyID)
} else {
query.Set("AWSAccessKeyId", accessKeyID)
}
// Fill in Expires for presigned query.
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
}
// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
return signature
}
// Authorization = "AWS" + " " + AWSAccessKeyId + ":" + Signature;
// Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) );
//
// StringToSign = HTTP-Verb + "\n" +
// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
//
// CanonicalizedProtocolHeaders = <described below>
// SignV2 sign the request before Do() (AWS Signature Version 2).
func SignV2(req http.Request, accessKeyID, secretAccessKey string, virtualHost bool) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
d := time.Now().UTC()
// Add date if not present.
if date := req.Header.Get("Date"); date == "" {
req.Header.Set("Date", d.Format(http.TimeFormat))
}
// Calculate HMAC for secretAccessKey.
stringToSign := stringToSignV2(req, virtualHost)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
// Prepare auth header.
authHeader := new(bytes.Buffer)
authHeader.WriteString(fmt.Sprintf("%s %s:", signV2Algorithm, accessKeyID))
encoder := base64.NewEncoder(base64.StdEncoding, authHeader)
encoder.Write(hm.Sum(nil))
encoder.Close()
// Set Authorization header.
req.Header.Set("Authorization", authHeader.String())
return &req
}
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Expires + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
func preStringToSignV2(req http.Request, virtualHost bool) string {
buf := new(bytes.Buffer)
// Write standard headers.
writePreSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
writeCanonicalizedResource(buf, req, virtualHost)
return buf.String()
}
// writePreSignV2Headers - write preSign v2 required headers.
func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method + "\n")
buf.WriteString(req.Header.Get("Content-Md5") + "\n")
buf.WriteString(req.Header.Get("Content-Type") + "\n")
buf.WriteString(req.Header.Get("Expires") + "\n")
}
// From the Amazon docs:
//
// StringToSign = HTTP-Verb + "\n" +
// Content-Md5 + "\n" +
// Content-Type + "\n" +
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
func stringToSignV2(req http.Request, virtualHost bool) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
writeCanonicalizedResource(buf, req, virtualHost)
return buf.String()
}
// writeSignV2Headers - write signV2 required headers.
func writeSignV2Headers(buf *bytes.Buffer, req http.Request) {
buf.WriteString(req.Method + "\n")
buf.WriteString(req.Header.Get("Content-Md5") + "\n")
buf.WriteString(req.Header.Get("Content-Type") + "\n")
buf.WriteString(req.Header.Get("Date") + "\n")
}
// writeCanonicalizedHeaders - write canonicalized headers.
func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
var protoHeaders []string
vals := make(map[string][]string)
for k, vv := range req.Header {
// All the AMZ headers should be lowercase
lk := strings.ToLower(k)
if strings.HasPrefix(lk, "x-amz") {
protoHeaders = append(protoHeaders, lk)
vals[lk] = vv
}
}
sort.Strings(protoHeaders)
for _, k := range protoHeaders {
buf.WriteString(k)
buf.WriteByte(':')
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
if strings.Contains(v, "\n") {
// TODO: "Unfold" long headers that
// span multiple lines (as allowed by
// RFC 2616, section 4.2) by replacing
// the folding white-space (including
// new-line) by a single space.
buf.WriteString(v)
} else {
buf.WriteString(v)
}
}
buf.WriteByte('\n')
}
}
// AWS S3 Signature V2 calculation rule is give here:
// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
// Whitelist resource list that will be used in query string for signature-V2 calculation.
// The list should be alphabetically sorted
var resourceList = []string{
"acl",
"delete",
"lifecycle",
"location",
"logging",
"notification",
"partNumber",
"policy",
"replication",
"requestPayment",
"response-cache-control",
"response-content-disposition",
"response-content-encoding",
"response-content-language",
"response-content-type",
"response-expires",
"torrent",
"uploadId",
"uploads",
"versionId",
"versioning",
"versions",
"website",
}
// From the Amazon docs:
//
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, virtualHost bool) {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
buf.WriteString(encodeURL2Path(&req, virtualHost))
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
// Verify if any sub resource queries are present, if yes
// canonicallize them.
for _, resource := range resourceList {
if vv, ok := vals[resource]; ok && len(vv) > 0 {
n++
// First element
switch n {
case 1:
buf.WriteByte('?')
// The rest
default:
buf.WriteByte('&')
}
buf.WriteString(resource)
// Request parameters
if len(vv[0]) > 0 {
buf.WriteByte('=')
buf.WriteString(vv[0])
}
}
}
}
}

View file

@ -0,0 +1,318 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package signer
import (
"bytes"
"encoding/hex"
"net/http"
"sort"
"strconv"
"strings"
"time"
"github.com/minio/minio-go/v7/pkg/s3utils"
)
// Signature and API related constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
yyyymmdd = "20060102"
)
// Different service types
const (
ServiceTypeS3 = "s3"
ServiceTypeSTS = "sts"
)
///
/// Excerpts from @lsegal -
/// https://github.com/aws/aws-sdk-js/issues/659#issuecomment-120477258.
///
/// User-Agent:
///
/// This is ignored from signing because signing this causes
/// problems with generating pre-signed URLs (that are executed
/// by other agents) or when customers pass requests through
/// proxies, which may modify the user-agent.
///
///
/// Authorization:
///
/// Is skipped for obvious reasons
///
var v4IgnoredHeaders = map[string]bool{
"Authorization": true,
"User-Agent": true,
}
// getSigningKey hmac seed to calculate final signature.
func getSigningKey(secret, loc string, t time.Time, serviceType string) []byte {
date := sumHMAC([]byte("AWS4"+secret), []byte(t.Format(yyyymmdd)))
location := sumHMAC(date, []byte(loc))
service := sumHMAC(location, []byte(serviceType))
signingKey := sumHMAC(service, []byte("aws4_request"))
return signingKey
}
// getSignature final signature in hexadecimal form.
func getSignature(signingKey []byte, stringToSign string) string {
return hex.EncodeToString(sumHMAC(signingKey, []byte(stringToSign)))
}
// getScope generate a string of a specific date, an AWS region, and a
// service.
func getScope(location string, t time.Time, serviceType string) string {
scope := strings.Join([]string{
t.Format(yyyymmdd),
location,
serviceType,
"aws4_request",
}, "/")
return scope
}
// GetCredential generate a credential string.
func GetCredential(accessKeyID, location string, t time.Time, serviceType string) string {
scope := getScope(location, t, serviceType)
return accessKeyID + "/" + scope
}
// getHashedPayload get the hexadecimal value of the SHA256 hash of
// the request payload.
func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value.
hashedPayload = unsignedPayload
}
return hashedPayload
}
// getCanonicalHeaders generate a list of request headers for
// signature.
func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
vals := make(map[string][]string)
for k, vv := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // ignored header
}
headers = append(headers, strings.ToLower(k))
vals[strings.ToLower(k)] = vv
}
headers = append(headers, "host")
sort.Strings(headers)
var buf bytes.Buffer
// Save all the headers in canonical form <header>:<value> newline
// separated for each header.
for _, k := range headers {
buf.WriteString(k)
buf.WriteByte(':')
switch {
case k == "host":
buf.WriteString(getHostAddr(&req))
fallthrough
default:
for idx, v := range vals[k] {
if idx > 0 {
buf.WriteByte(',')
}
buf.WriteString(signV4TrimAll(v))
}
buf.WriteByte('\n')
}
}
return buf.String()
}
// getSignedHeaders generate all signed request headers.
// i.e lexically sorted, semicolon-separated list of lowercase
// request header names.
func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string {
var headers []string
for k := range req.Header {
if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok {
continue // Ignored header found continue.
}
headers = append(headers, strings.ToLower(k))
}
headers = append(headers, "host")
sort.Strings(headers)
return strings.Join(headers, ";")
}
// getCanonicalRequest generate a canonical request of style.
//
// canonicalRequest =
// <HTTPMethod>\n
// <CanonicalURI>\n
// <CanonicalQueryString>\n
// <CanonicalHeaders>\n
// <SignedHeaders>\n
// <HashedPayload>
func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool, hashedPayload string) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req, ignoredHeaders),
getSignedHeaders(req, ignoredHeaders),
hashedPayload,
}, "\n")
return canonicalRequest
}
// getStringToSign a string based on selected query values.
func getStringToSignV4(t time.Time, location, canonicalRequest, serviceType string) string {
stringToSign := signV4Algorithm + "\n" + t.Format(iso8601DateFormat) + "\n"
stringToSign = stringToSign + getScope(location, t, serviceType) + "\n"
stringToSign = stringToSign + hex.EncodeToString(sum256([]byte(canonicalRequest)))
return stringToSign
}
// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
t := time.Now().UTC()
// Get credential string.
credential := GetCredential(accessKeyID, location, t, ServiceTypeS3)
// Get all signed headers.
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Set URL query.
query := req.URL.Query()
query.Set("X-Amz-Algorithm", signV4Algorithm)
query.Set("X-Amz-Date", t.Format(iso8601DateFormat))
query.Set("X-Amz-Expires", strconv.FormatInt(expires, 10))
query.Set("X-Amz-SignedHeaders", signedHeaders)
query.Set("X-Amz-Credential", credential)
// Set session token if available.
if sessionToken != "" {
query.Set("X-Amz-Security-Token", sessionToken)
}
req.URL.RawQuery = query.Encode()
// Get canonical request.
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, getHashedPayload(req))
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest, ServiceTypeS3)
// Gext hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)
// Add signature header to RawQuery.
req.URL.RawQuery += "&X-Amz-Signature=" + signature
return &req
}
// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t, ServiceTypeS3)
// Calculate signature.
signature := getSignature(signingkey, policyBase64)
return signature
}
// SignV4STS - signature v4 for STS request.
func SignV4STS(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
return signV4(req, accessKeyID, secretAccessKey, "", location, ServiceTypeSTS)
}
// Internal function called for different service types.
func signV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location, serviceType string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
}
// Initial time.
t := time.Now().UTC()
// Set x-amz-date.
req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat))
// Set session token if available.
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
hashedPayload := getHashedPayload(req)
if serviceType == ServiceTypeSTS {
// Content sha256 header is not sent with the request
// but it is expected to have sha256 of payload for signature
// in STS service type request.
req.Header.Del("X-Amz-Content-Sha256")
}
// Get canonical request.
canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders, hashedPayload)
// Get string to sign from canonical request.
stringToSign := getStringToSignV4(t, location, canonicalRequest, serviceType)
// Get hmac signing key.
signingKey := getSigningKey(secretAccessKey, location, t, serviceType)
// Get credential string.
credential := GetCredential(accessKeyID, location, t, serviceType)
// Get all signed headers.
signedHeaders := getSignedHeaders(req, v4IgnoredHeaders)
// Calculate signature.
signature := getSignature(signingKey, stringToSign)
// If regular request, construct the final authorization header.
parts := []string{
signV4Algorithm + " Credential=" + credential,
"SignedHeaders=" + signedHeaders,
"Signature=" + signature,
}
// Set authorization header.
auth := strings.Join(parts, ", ")
req.Header.Set("Authorization", auth)
return &req
}
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func SignV4(req http.Request, accessKeyID, secretAccessKey, sessionToken, location string) *http.Request {
return signV4(req, accessKeyID, secretAccessKey, sessionToken, location, ServiceTypeS3)
}

View file

@ -0,0 +1,59 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2015-2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package signer
import (
"crypto/hmac"
"net/http"
"strings"
"github.com/minio/sha256-simd"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
const unsignedPayload = "UNSIGNED-PAYLOAD"
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// getHostAddr returns host header if available, otherwise returns host from URL
func getHostAddr(req *http.Request) string {
if req.Host != "" {
return req.Host
}
return req.URL.Host
}
// Trim leading and trailing spaces and replace sequential spaces with one space, following Trimall()
// in http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
func signV4TrimAll(input string) string {
// Compress adjacent spaces (a space is determined by
// unicode.IsSpace() internally here) to one space and return
return strings.Join(strings.Fields(input), " ")
}

66
vendor/github.com/minio/minio-go/v7/pkg/sse/sse.go generated vendored Normal file
View file

@ -0,0 +1,66 @@
/*
* MinIO Go Library for Amazon S3 Compatible Cloud Storage
* Copyright 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sse
import "encoding/xml"
// ApplySSEByDefault defines default encryption configuration, KMS or SSE. To activate
// KMS, SSEAlgoritm needs to be set to "aws:kms"
// Minio currently does not support Kms.
type ApplySSEByDefault struct {
KmsMasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
SSEAlgorithm string `xml:"SSEAlgorithm"`
}
// Rule layer encapsulates default encryption configuration
type Rule struct {
Apply ApplySSEByDefault `xml:"ApplyServerSideEncryptionByDefault"`
}
// Configuration is the default encryption configuration structure
type Configuration struct {
XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
Rules []Rule `xml:"Rule"`
}
// NewConfigurationSSES3 initializes a new SSE-S3 configuration
func NewConfigurationSSES3() *Configuration {
return &Configuration{
Rules: []Rule{
{
Apply: ApplySSEByDefault{
SSEAlgorithm: "AES256",
},
},
},
}
}
// NewConfigurationSSEKMS initializes a new SSE-KMS configuration
func NewConfigurationSSEKMS(kmsMasterKey string) *Configuration {
return &Configuration{
Rules: []Rule{
{
Apply: ApplySSEByDefault{
KmsMasterKeyID: kmsMasterKey,
SSEAlgorithm: "aws:kms",
},
},
},
}
}

342
vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go generated vendored Normal file
View file

@ -0,0 +1,342 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package tags
import (
"encoding/xml"
"io"
"net/url"
"strings"
"unicode/utf8"
)
// Error contains tag specific error.
type Error interface {
error
Code() string
}
type errTag struct {
code string
message string
}
// Code contains error code.
func (err errTag) Code() string {
return err.code
}
// Error contains error message.
func (err errTag) Error() string {
return err.message
}
var (
errTooManyObjectTags = &errTag{"BadRequest", "Tags cannot be more than 10"}
errTooManyTags = &errTag{"BadRequest", "Tags cannot be more than 50"}
errInvalidTagKey = &errTag{"InvalidTag", "The TagKey you have provided is invalid"}
errInvalidTagValue = &errTag{"InvalidTag", "The TagValue you have provided is invalid"}
errDuplicateTagKey = &errTag{"InvalidTag", "Cannot provide multiple Tags with the same key"}
)
// Tag comes with limitation as per
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html amd
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html#tag-restrictions
const (
maxKeyLength = 128
maxValueLength = 256
maxObjectTagCount = 10
maxTagCount = 50
)
func checkKey(key string) error {
if len(key) == 0 || utf8.RuneCountInString(key) > maxKeyLength || strings.Contains(key, "&") {
return errInvalidTagKey
}
return nil
}
func checkValue(value string) error {
if utf8.RuneCountInString(value) > maxValueLength || strings.Contains(value, "&") {
return errInvalidTagValue
}
return nil
}
// Tag denotes key and value.
type Tag struct {
Key string `xml:"Key"`
Value string `xml:"Value"`
}
func (tag Tag) String() string {
return tag.Key + "=" + tag.Value
}
// IsEmpty returns whether this tag is empty or not.
func (tag Tag) IsEmpty() bool {
return tag.Key == ""
}
// Validate checks this tag.
func (tag Tag) Validate() error {
if err := checkKey(tag.Key); err != nil {
return err
}
return checkValue(tag.Value)
}
// MarshalXML encodes to XML data.
func (tag Tag) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
if err := tag.Validate(); err != nil {
return err
}
type subTag Tag // to avoid recursively calling MarshalXML()
return e.EncodeElement(subTag(tag), start)
}
// UnmarshalXML decodes XML data to tag.
func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
type subTag Tag // to avoid recursively calling UnmarshalXML()
var st subTag
if err := d.DecodeElement(&st, &start); err != nil {
return err
}
if err := Tag(st).Validate(); err != nil {
return err
}
*tag = Tag(st)
return nil
}
// tagSet represents list of unique tags.
type tagSet struct {
tagMap map[string]string
isObject bool
}
func (tags tagSet) String() string {
s := []string{}
for key, value := range tags.tagMap {
s = append(s, key+"="+value)
}
return strings.Join(s, "&")
}
func (tags *tagSet) remove(key string) {
delete(tags.tagMap, key)
}
func (tags *tagSet) set(key, value string, failOnExist bool) error {
if failOnExist {
if _, found := tags.tagMap[key]; found {
return errDuplicateTagKey
}
}
if err := checkKey(key); err != nil {
return err
}
if err := checkValue(value); err != nil {
return err
}
if tags.isObject {
if len(tags.tagMap) == maxObjectTagCount {
return errTooManyObjectTags
}
} else if len(tags.tagMap) == maxTagCount {
return errTooManyTags
}
tags.tagMap[key] = value
return nil
}
func (tags tagSet) toMap() map[string]string {
m := make(map[string]string)
for key, value := range tags.tagMap {
m[key] = value
}
return m
}
// MarshalXML encodes to XML data.
func (tags tagSet) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
tagList := struct {
Tags []Tag `xml:"Tag"`
}{}
for key, value := range tags.tagMap {
tagList.Tags = append(tagList.Tags, Tag{key, value})
}
return e.EncodeElement(tagList, start)
}
// UnmarshalXML decodes XML data to tag list.
func (tags *tagSet) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
tagList := struct {
Tags []Tag `xml:"Tag"`
}{}
if err := d.DecodeElement(&tagList, &start); err != nil {
return err
}
if tags.isObject {
if len(tagList.Tags) > maxObjectTagCount {
return errTooManyObjectTags
}
} else if len(tagList.Tags) > maxTagCount {
return errTooManyTags
}
m := map[string]string{}
for _, tag := range tagList.Tags {
if _, found := m[tag.Key]; found {
return errDuplicateTagKey
}
m[tag.Key] = tag.Value
}
tags.tagMap = m
return nil
}
type tagging struct {
XMLName xml.Name `xml:"Tagging"`
TagSet *tagSet `xml:"TagSet"`
}
// Tags is list of tags of XML request/response as per
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html#API_GetBucketTagging_RequestBody
type Tags tagging
func (tags Tags) String() string {
return tags.TagSet.String()
}
// Remove removes a tag by its key.
func (tags *Tags) Remove(key string) {
tags.TagSet.remove(key)
}
// Set sets new tag.
func (tags *Tags) Set(key, value string) error {
return tags.TagSet.set(key, value, false)
}
// ToMap returns copy of tags.
func (tags Tags) ToMap() map[string]string {
return tags.TagSet.toMap()
}
// MapToObjectTags converts an input map of key and value into
// *Tags data structure with validation.
func MapToObjectTags(tagMap map[string]string) (*Tags, error) {
return NewTags(tagMap, true)
}
// MapToBucketTags converts an input map of key and value into
// *Tags data structure with validation.
func MapToBucketTags(tagMap map[string]string) (*Tags, error) {
return NewTags(tagMap, false)
}
// NewTags creates Tags from tagMap, If isObject is set, it validates for object tags.
func NewTags(tagMap map[string]string, isObject bool) (*Tags, error) {
tagging := &Tags{
TagSet: &tagSet{
tagMap: make(map[string]string),
isObject: isObject,
},
}
for key, value := range tagMap {
if err := tagging.TagSet.set(key, value, true); err != nil {
return nil, err
}
}
return tagging, nil
}
func unmarshalXML(reader io.Reader, isObject bool) (*Tags, error) {
tagging := &Tags{
TagSet: &tagSet{
tagMap: make(map[string]string),
isObject: isObject,
},
}
if err := xml.NewDecoder(reader).Decode(tagging); err != nil {
return nil, err
}
return tagging, nil
}
// ParseBucketXML decodes XML data of tags in reader specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html#API_PutBucketTagging_RequestSyntax.
func ParseBucketXML(reader io.Reader) (*Tags, error) {
return unmarshalXML(reader, false)
}
// ParseObjectXML decodes XML data of tags in reader specified in
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html#API_PutObjectTagging_RequestSyntax
func ParseObjectXML(reader io.Reader) (*Tags, error) {
return unmarshalXML(reader, true)
}
// Parse decodes HTTP query formatted string into tags which is limited by isObject.
// A query formatted string is like "key1=value1&key2=value2".
func Parse(s string, isObject bool) (*Tags, error) {
values, err := url.ParseQuery(s)
if err != nil {
return nil, err
}
tagging := &Tags{
TagSet: &tagSet{
tagMap: make(map[string]string),
isObject: isObject,
},
}
for key := range values {
if err := tagging.TagSet.set(key, values.Get(key), true); err != nil {
return nil, err
}
}
return tagging, nil
}
// ParseObjectTags decodes HTTP query formatted string into tags. A query formatted string is like "key1=value1&key2=value2".
func ParseObjectTags(s string) (*Tags, error) {
return Parse(s, true)
}