Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions buildscripts/checkdeps.sh
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,11 @@ check_minimum_version() {

assert_is_supported_arch() {
case "${ARCH}" in
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64)
x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64)
return
;;
*)
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]"
echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]"
exit 1
;;
esac
Expand Down
2 changes: 1 addition & 1 deletion buildscripts/cross-compile.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ function _init() {
export CGO_ENABLED=0

## List of architectures and OS to test coss compilation.
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64"
SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64"
}

function _build() {
Expand Down
6 changes: 4 additions & 2 deletions cmd/admin-handlers-users.go
Original file line number Diff line number Diff line change
Expand Up @@ -1827,16 +1827,18 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
iamLogIf(ctx, err)
} else if foundGroupDN == nil || !underBaseDN {
err = errNoSuchGroup
} else {
entityName = foundGroupDN.NormDN
}
entityName = foundGroupDN.NormDN
} else {
var foundUserDN *xldap.DNSearchResult
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
iamLogIf(ctx, err)
} else if foundUserDN == nil {
err = errNoSuchUser
} else {
entityName = foundUserDN.NormDN
}
entityName = foundUserDN.NormDN
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
Expand Down
32 changes: 29 additions & 3 deletions cmd/encryption-v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -1074,8 +1074,16 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
return input, nil
}
var key []byte
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
key = k[:]
if crypto.SSECopy.IsRequested(h) {
sseCopyKey, err := crypto.SSECopy.ParseHTTP(h)
if err != nil {
return nil, err
}
key = sseCopyKey[:]
} else {
if k, err := crypto.SSEC.ParseHTTP(h); err == nil {
key = k[:]
}
}
key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined)
if err != nil {
Expand All @@ -1087,7 +1095,8 @@ func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn {
}
}

// decryptPartsChecksums will attempt to decode checksums and return it/them if set.
// decryptPartsChecksums will attempt to decrypt and decode part checksums, and save
// only the decrypted part checksum values on ObjectInfo directly.
// if part > 0, and we have the checksum for the part that will be returned.
func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
data := o.Checksum
Expand All @@ -1112,6 +1121,23 @@ func (o *ObjectInfo) decryptPartsChecksums(h http.Header) {
}
}

// decryptChecksum will attempt to decrypt the ObjectInfo.Checksum, returns the decrypted value
// An error is only returned if it was encrypted and the decryption failed.
func (o *ObjectInfo) decryptChecksum(h http.Header) ([]byte, error) {
data := o.Checksum
if len(data) == 0 {
return data, nil
}
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter(h)("object-checksum", data)
if err != nil {
return nil, err
}
data = decrypted
}
return data, nil
}

// metadataEncryptFn provides an encryption function for metadata.
// Will return nil, nil if unencrypted.
func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn, error) {
Expand Down
12 changes: 11 additions & 1 deletion cmd/erasure-object.go
Original file line number Diff line number Diff line change
Expand Up @@ -1470,7 +1470,17 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
actualSize = n
}
}
if fi.Checksum == nil {
// If ServerSideChecksum is wanted for this object, it takes precedence
// over opts.WantChecksum.
if opts.WantServerSideChecksumType.IsSet() {
serverSideChecksum := r.RawServerSideChecksumResult()
if serverSideChecksum != nil {
fi.Checksum = serverSideChecksum.AppendTo(nil, nil)
if opts.EncryptFn != nil {
fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum)
}
}
} else if fi.Checksum == nil && opts.WantChecksum != nil {
// Trailing headers checksums should now be filled.
fi.Checksum = opts.WantChecksum.AppendTo(nil, nil)
if opts.EncryptFn != nil {
Expand Down
19 changes: 12 additions & 7 deletions cmd/erasure-server-pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1340,12 +1340,15 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec
}

putOpts := ObjectOptions{
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
NoLock: true,
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
NoLock: true,
EncryptFn: dstOpts.EncryptFn,
WantChecksum: dstOpts.WantChecksum,
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
}

return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
Expand Down Expand Up @@ -1709,7 +1712,9 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p
}

z.mpCache.Range(func(_ string, mp MultipartInfo) bool {
poolResult.Uploads = append(poolResult.Uploads, mp)
if mp.Bucket == bucket {
poolResult.Uploads = append(poolResult.Uploads, mp)
}
return true
})
sort.Slice(poolResult.Uploads, func(i int, j int) bool {
Expand Down
13 changes: 8 additions & 5 deletions cmd/erasure-sets.go
Original file line number Diff line number Diff line change
Expand Up @@ -868,11 +868,14 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB
}

putOpts := ObjectOptions{
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
ServerSideEncryption: dstOpts.ServerSideEncryption,
UserDefined: srcInfo.UserDefined,
Versioned: dstOpts.Versioned,
VersionID: dstOpts.VersionID,
MTime: dstOpts.MTime,
EncryptFn: dstOpts.EncryptFn,
WantChecksum: dstOpts.WantChecksum,
WantServerSideChecksumType: dstOpts.WantServerSideChecksumType,
}

return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts)
Expand Down
4 changes: 4 additions & 0 deletions cmd/logging.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,10 @@ func encLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "encryption", err, errKind...)
}

func encLogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) {
logger.LogOnceIf(ctx, "encryption", err, id, errKind...)
}

func storageLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "storage", err, errKind...)
}
Expand Down
38 changes: 19 additions & 19 deletions cmd/metrics-v3-bucket-replication.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,61 +49,61 @@ const (
var (
bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes,
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount,
"Total number of objects which failed replication in the last hour on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes,
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount,
"Total number of objects which failed replication in the last full minute on a bucket",
bucketL)
bucketL, targetArnL)
bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs,
"Replication latency on a bucket in milliseconds",
bucketL, operationL, rangeL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal,
"Number of DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures,
"Number of failures in GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal,
"Number of GET requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures,
"Number of failures in GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal,
"Number of GET tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures,
"Number of failures in HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal,
"Number of HEAD requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures,
"Number of failures in PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal,
"Number of PUT tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes,
"Total number of bytes replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplSentCountMD = NewCounterMD(bucketReplSentCount,
"Total number of objects replicated to the target",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes,
"Total number of bytes failed at least once to replicate since server start",
bucketL)
bucketL, targetArnL)
bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount,
"Total number of objects which failed replication since server start",
bucketL)
bucketL, targetArnL)
bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures,
"Number of failures in DELETE tagging requests proxied to replication target",
bucketL)
bucketL, targetArnL)
)

// loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics
Expand All @@ -121,11 +121,11 @@ func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metric

bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage)
for _, bucket := range buckets {
labels := []string{bucketL, bucket}
if s, ok := bucketReplStats[bucket]; ok {
stats := s.ReplicationStats
if stats.hasReplicationUsage() {
for arn, stat := range stats.Stats {
labels := []string{bucketL, bucket, targetArnL, arn}
m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...)
m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...)
m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...)
Expand Down
1 change: 1 addition & 0 deletions cmd/object-api-datatypes.go
Original file line number Diff line number Diff line change
Expand Up @@ -654,6 +654,7 @@ type objectAttributesChecksum struct {
ChecksumSHA1 string `xml:",omitempty"`
ChecksumSHA256 string `xml:",omitempty"`
ChecksumCRC64NVME string `xml:",omitempty"`
ChecksumType string `xml:",omitempty"`
}

type objectAttributesParts struct {
Expand Down
2 changes: 2 additions & 0 deletions cmd/object-api-interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,8 @@ type ObjectOptions struct {

WantChecksum *hash.Checksum // x-amz-checksum-XXX checksum sent to PutObject/ CompleteMultipartUpload.

WantServerSideChecksumType hash.ChecksumType // if set, we compute a server-side checksum of this type

NoDecryption bool // indicates if the stream must be decrypted.
PreserveETag string // preserves this etag during a PUT call.
NoLock bool // indicates to lower layers if the caller is expecting to hold locks.
Expand Down
10 changes: 10 additions & 0 deletions cmd/object-api-utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -1096,6 +1096,16 @@ func NewPutObjReader(rawReader *hash.Reader) *PutObjReader {
return &PutObjReader{Reader: rawReader, rawReader: rawReader}
}

// RawServerSideChecksumResult returns the ServerSideChecksumResult from the
// underlying rawReader, since the PutObjReader might be encrypted data and
// thus any checksum from that would be incorrect.
func (p *PutObjReader) RawServerSideChecksumResult() *hash.Checksum {
if p.rawReader != nil {
return p.rawReader.ServerSideChecksumResult
}
return nil
}

func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte {
var emptyKey [32]byte
if bytes.Equal(encKey[:], emptyKey[:]) {
Expand Down
49 changes: 49 additions & 0 deletions cmd/object-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -641,6 +641,7 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj
ChecksumSHA1: strings.Split(chkSums["SHA1"], "-")[0],
ChecksumSHA256: strings.Split(chkSums["SHA256"], "-")[0],
ChecksumCRC64NVME: strings.Split(chkSums["CRC64NVME"], "-")[0],
ChecksumType: chkSums[xhttp.AmzChecksumType],
}
}
}
Expand Down Expand Up @@ -1465,6 +1466,46 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
targetSize, _ = srcInfo.DecryptedSize()
}

// Client can request that a different type of checksum is computed server-side for the
// destination object using the x-amz-checksum-algorithm header.
headerChecksumType := hash.NewChecksumHeader(r.Header)
if headerChecksumType.IsSet() {
dstOpts.WantServerSideChecksumType = headerChecksumType.Base()
srcInfo.Reader.AddServerSideChecksumHasher(headerChecksumType)
dstOpts.WantChecksum = nil
} else {
// Check the source object for checksum.
// If Checksum is not encrypted, decryptChecksum will be a no-op and return
// the already unencrypted value.
srcChecksumDecrypted, err := srcInfo.decryptChecksum(r.Header)
if err != nil {
encLogOnceIf(GlobalContext,
fmt.Errorf("Unable to decryptChecksum for object: %s/%s, error: %w", srcBucket, srcObject, err),
"copy-object-decrypt-checksums-"+srcBucket+srcObject)
}

// The source object has a checksum set, we need the destination to have one too.
if srcChecksumDecrypted != nil {
dstOpts.WantChecksum = hash.ChecksumFromBytes(srcChecksumDecrypted)

// When an object is being copied from a source that is multipart, the destination will
// no longer be multipart, and thus the checksum becomes full-object instead. Since
// the CopyObject API does not require that the caller send us this final checksum, we need
// to compute it server-side, with the same type as the source object.
if dstOpts.WantChecksum != nil && dstOpts.WantChecksum.Type.IsMultipartComposite() {
dstOpts.WantServerSideChecksumType = dstOpts.WantChecksum.Type.Base()
srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType)
dstOpts.WantChecksum = nil
}
} else {
// S3: All copied objects without checksums and specified destination checksum algorithms
// automatically gain a CRC-64NVME checksum algorithm.
dstOpts.WantServerSideChecksumType = hash.ChecksumCRC64NVME
srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType)
dstOpts.WantChecksum = nil
}
}

if isTargetEncrypted {
var encReader io.Reader
kind, _ := crypto.IsRequested(r.Header)
Expand Down Expand Up @@ -1498,6 +1539,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
if dstOpts.IndexCB != nil {
dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB)
}
dstOpts.EncryptFn = metadataEncrypter(objEncKey)
}
}

Expand Down Expand Up @@ -1633,6 +1675,13 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re
return
}

// After we've checked for an invalid copy (above), if a server-side checksum type
// is requested, we need to read the source to recompute the checksum.
if dstOpts.WantServerSideChecksumType.IsSet() {
srcInfo.metadataOnly = false
}

// Federation only.
remoteCallRequired := isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI)

var objInfo ObjectInfo
Expand Down
1 change: 1 addition & 0 deletions cmd/warm-backend-s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) {
Creds: creds,
Secure: u.Scheme == "https",
Transport: globalRemoteTargetTransport,
Region: conf.Region,
}
client, err := minio.New(u.Host, opts)
if err != nil {
Expand Down
Loading