diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 807c83e4ca530..cd3f41530c233 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,14 +1,19 @@ --- name: Bug report -about: Create a report to help us improve +about: Report a bug in MinIO (community edition is source-only) title: '' labels: community, triage assignees: '' --- -## NOTE -If this case is urgent, please subscribe to [Subnet](https://min.io/pricing) so that our 24/7 support team may help you faster. +## IMPORTANT NOTES + +**Community Edition**: MinIO community edition is now source-only. Install via `go install github.com/minio/minio@latest` + +**Feature Requests**: We are no longer accepting feature requests for the community edition. For feature requests and enterprise support, please subscribe to [MinIO Enterprise Support](https://min.io/pricing). + +**Urgent Issues**: If this case is urgent or affects production, please subscribe to [SUBNET](https://min.io/pricing) for 24/7 enterprise support. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 95238236df67d..28ed32af2d61e 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -2,7 +2,7 @@ blank_issues_enabled: false contact_links: - name: MinIO Community Support url: https://slack.min.io - about: Join here for Community Support - - name: MinIO SUBNET Support + about: Community support via Slack - for questions and discussions + - name: MinIO Enterprise Support (SUBNET) url: https://min.io/pricing - about: Join here for Enterprise Support + about: Enterprise support with SLA - for production deployments and feature requests diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 4a7b218c94c69..0000000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: community, triage -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the solution you'd like** -A clear and concise description of what you want to happen. - -**Describe alternatives you've considered** -A clear and concise description of any alternative solutions or features you've considered. - -**Additional context** -Add any other context or screenshots about the feature request here. diff --git a/.github/workflows/depsreview.yaml b/.github/workflows/depsreview.yaml index f2605b7a7e2c7..b9d6d20fff4e7 100644 --- a/.github/workflows/depsreview.yaml +++ b/.github/workflows/depsreview.yaml @@ -9,6 +9,6 @@ jobs: runs-on: ubuntu-latest steps: - name: 'Checkout Repository' - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: 'Dependency Review' - uses: actions/dependency-review-action@v1 + uses: actions/dependency-review-action@v4 diff --git a/.github/workflows/go-cross.yml b/.github/workflows/go-cross.yml index 825280e137451..324af3e956eba 100644 --- a/.github/workflows/go-cross.yml +++ b/.github/workflows/go-cross.yml @@ -3,12 +3,11 @@ name: Crosscompile on: pull_request: branches: - - master - - next + - master # This ensures that previous jobs for the PR are canceled when the PR is # updated. -concurrency: +concurrency: group: ${{ github.workflow }}-${{ github.head_ref }} cancel-in-progress: true @@ -21,11 +20,11 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - go-version: [1.21.x] + go-version: [1.24.x] os: [ubuntu-latest] steps: - - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3 - - uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} check-latest: true diff --git a/.github/workflows/go-fips.yml b/.github/workflows/go-fips.yml deleted file mode 100644 index e812d610ea1f4..0000000000000 --- a/.github/workflows/go-fips.yml +++ /dev/null @@ -1,60 +0,0 @@ -name: FIPS Build Test - -on: - pull_request: - branches: - - master - - next - -# This ensures that previous jobs for the PR are canceled when the PR is -# updated. -concurrency: - group: ${{ github.workflow }}-${{ github.head_ref }} - cancel-in-progress: true - -permissions: - contents: read - -jobs: - build: - name: Go BoringCrypto ${{ matrix.go-version }} on ${{ matrix.os }} - runs-on: ${{ matrix.os }} - strategy: - matrix: - go-version: [1.21.x] - os: [ubuntu-latest] - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 - with: - go-version: ${{ matrix.go-version }} - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Setup dockerfile for build test - run: | - GO_VERSION=$(go version | cut -d ' ' -f 3 | sed 's/go//') - echo Detected go version $GO_VERSION - cat > Dockerfile.fips.test <&1 | grep FAILED | wc -l) -failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) if [ $failed_count_site1 -ne 0 ]; then echo "failed with multipart on site1 uploads" @@ -67,8 +62,8 @@ fi sleep 5 -failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l) -failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) ## we do not need to fail here, since we are going to test ## upgrading to master, healing and being able to recover @@ -96,8 +91,8 @@ for i in $(seq 1 10); do ./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null done -failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l) -failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l) +failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l) if [ $failed_count_site1 -ne 0 ]; then echo "failed with multipart on site1 uploads" @@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then exit 1 fi +# Add user group test +./mc admin user add site1 site-replication-issue-user site-replication-issue-password +./mc admin group add site1 site-replication-issue-group site-replication-issue-user + +max_wait_attempts=30 +wait_interval=5 + +attempt=1 +while true; do + diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group) + + if [[ $? -eq 0 ]]; then + echo "Outputs are consistent." + break + fi + + remaining_attempts=$((max_wait_attempts - attempt)) + if ((attempt >= max_wait_attempts)); then + echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error." + exit 1 + else + echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)." + sleep $wait_interval + fi + + ((attempt++)) +done + +status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"') + +if [[ $status == "enabled" ]]; then + echo "Success" +else + echo "Expected status: enabled, actual status: $status" + exit 1 +fi + cleanup ## change working directory diff --git a/.github/workflows/replication.yaml b/.github/workflows/replication.yaml index e6a674f5a3534..18c3277496b7b 100644 --- a/.github/workflows/replication.yaml +++ b/.github/workflows/replication.yaml @@ -3,8 +3,7 @@ name: MinIO advanced tests on: pull_request: branches: - - master - - next + - master # This ensures that previous jobs for the PR are canceled when the PR is # updated. @@ -22,11 +21,11 @@ jobs: strategy: matrix: - go-version: [1.21.x] + go-version: [1.24.x] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} check-latest: true @@ -36,6 +35,19 @@ jobs: sudo sysctl net.ipv6.conf.default.disable_ipv6=0 make test-decom + - name: Test ILM + run: | + sudo sysctl net.ipv6.conf.all.disable_ipv6=0 + sudo sysctl net.ipv6.conf.default.disable_ipv6=0 + make test-ilm + make test-ilm-transition + + - name: Test PBAC + run: | + sudo sysctl net.ipv6.conf.all.disable_ipv6=0 + sudo sysctl net.ipv6.conf.default.disable_ipv6=0 + make test-pbac + - name: Test Config File run: | sudo sysctl net.ipv6.conf.all.disable_ipv6=0 @@ -59,3 +71,9 @@ jobs: sudo sysctl net.ipv6.conf.all.disable_ipv6=0 sudo sysctl net.ipv6.conf.default.disable_ipv6=0 make test-versioning + + - name: Test Multipart upload with failures + run: | + sudo sysctl net.ipv6.conf.all.disable_ipv6=0 + sudo sysctl net.ipv6.conf.default.disable_ipv6=0 + make test-multipart diff --git a/.github/workflows/root-disable.yml b/.github/workflows/root-disable.yml index bd78854a474a1..c08fb8b1fbd9e 100644 --- a/.github/workflows/root-disable.yml +++ b/.github/workflows/root-disable.yml @@ -3,8 +3,7 @@ name: Root lockdown tests on: pull_request: branches: - - master - - next + - master # This ensures that previous jobs for the PR are canceled when the PR is # updated. @@ -21,12 +20,12 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - go-version: [1.21.x] + go-version: [1.24.x] os: [ubuntu-latest] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} check-latest: true diff --git a/.github/workflows/run-mint.sh b/.github/workflows/run-mint.sh index 6aa2cd502c0cc..0bbc1cbaf6150 100755 --- a/.github/workflows/run-mint.sh +++ b/.github/workflows/run-mint.sh @@ -15,8 +15,11 @@ docker volume rm $(docker volume ls -f dangling=true) || true ## change working directory cd .github/workflows/mint +## always pull latest +docker pull docker.io/minio/mint:edge + docker-compose -f minio-${MODE}.yaml up -d -sleep 30s +sleep 1m docker system prune -f || true docker volume prune -f || true @@ -26,6 +29,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2 [ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6 +# Pause one node, to check that all S3 calls work while one node goes wrong +[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4 + docker run --rm --net=mint_default \ --name="mint-${MODE}-${JOB_NAME}" \ -e SERVER_ENDPOINT="nginx:9000" \ @@ -35,6 +41,18 @@ docker run --rm --net=mint_default \ -e MINT_MODE="${MINT_MODE}" \ docker.io/minio/mint:edge +# FIXME: enable this after fixing aws-sdk-java-v2 tests +# # unpause the node, to check that all S3 calls work while one node goes wrong +# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4 +# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \ +# --name="mint-${MODE}-${JOB_NAME}" \ +# -e SERVER_ENDPOINT="nginx:9000" \ +# -e ACCESS_KEY="${ACCESS_KEY}" \ +# -e SECRET_KEY="${SECRET_KEY}" \ +# -e ENABLE_HTTPS=0 \ +# -e MINT_MODE="${MINT_MODE}" \ +# docker.io/minio/mint:edge + docker-compose -f minio-${MODE}.yaml down || true sleep 10s diff --git a/.github/workflows/shfmt.yml b/.github/workflows/shfmt.yml index 930847733cc5f..3e446306bdd55 100644 --- a/.github/workflows/shfmt.yml +++ b/.github/workflows/shfmt.yml @@ -3,8 +3,7 @@ name: Shell formatting checks on: pull_request: branches: - - master - - next + - master permissions: contents: read @@ -14,7 +13,7 @@ jobs: name: runner / shfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: luizm/action-sh-checker@master env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/typos.yml b/.github/workflows/typos.yml index c31e9daf370c6..7addb3143b61c 100644 --- a/.github/workflows/typos.yml +++ b/.github/workflows/typos.yml @@ -1,5 +1,5 @@ --- -name: Test GitHub Action +name: Spelling on: [pull_request] jobs: diff --git a/.github/workflows/upgrade-ci-cd.yaml b/.github/workflows/upgrade-ci-cd.yaml index fc9dafe3fb605..d3e71ff593641 100644 --- a/.github/workflows/upgrade-ci-cd.yaml +++ b/.github/workflows/upgrade-ci-cd.yaml @@ -3,8 +3,7 @@ name: Upgrade old version tests on: pull_request: branches: - - master - - next + - master # This ensures that previous jobs for the PR are canceled when the PR is # updated. @@ -21,12 +20,12 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - go-version: [1.21.x] + go-version: [1.24.x] os: [ubuntu-latest] steps: - - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ matrix.go-version }} check-latest: true diff --git a/.github/workflows/vulncheck.yml b/.github/workflows/vulncheck.yml index 1a5414f92e3de..5dfcde04bde5e 100644 --- a/.github/workflows/vulncheck.yml +++ b/.github/workflows/vulncheck.yml @@ -17,15 +17,15 @@ jobs: runs-on: ubuntu-latest steps: - name: Check out code into the Go module directory - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@v5 with: - go-version: 1.21.8 - check-latest: true + go-version: 1.24.x + cached: false - name: Get official govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest shell: bash - name: Run govulncheck - run: govulncheck ./... + run: govulncheck -show verbose ./... shell: bash diff --git a/.gitignore b/.gitignore index ebbdebcefc975..3a51a0b284002 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,13 @@ docs/debugging/inspect/inspect docs/debugging/pprofgoparser/pprofgoparser docs/debugging/reorder-disks/reorder-disks docs/debugging/populate-hard-links/populate-hardlinks -docs/debugging/xattr/xattr \ No newline at end of file +docs/debugging/xattr/xattr +hash-set +healing-bin +inspect +pprofgoparser +reorder-disks +s3-check-md5 +s3-verify +xattr +xl-meta diff --git a/.golangci.yml b/.golangci.yml index 239de6e7ce751..0533d7cd9bede 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,36 +1,64 @@ -linters-settings: - gofumpt: - simplify: true - - misspell: - locale: US - - staticcheck: - checks: ['all', '-ST1005', '-ST1000', '-SA4000', '-SA9004', '-SA1019', '-SA1008', '-U1000', '-ST1016'] - +version: "2" linters: - disable-all: true + default: none enable: - durationcheck + - forcetypeassert - gocritic - - gofumpt - - goimports - gomodguard - govet - ineffassign - misspell - revive - staticcheck - - tenv - - typecheck - unconvert - unused - + - usetesting + - whitespace + settings: + misspell: + locale: US + staticcheck: + checks: + - all + - -SA1008 + - -SA1019 + - -SA4000 + - -SA9004 + - -ST1000 + - -ST1005 + - -ST1016 + - -U1000 + exclusions: + generated: lax + rules: + - linters: + - forcetypeassert + path: _test\.go + - path: (.+)\.go$ + text: 'empty-block:' + - path: (.+)\.go$ + text: 'unused-parameter:' + - path: (.+)\.go$ + text: 'dot-imports:' + - path: (.+)\.go$ + text: should have a package comment + - path: (.+)\.go$ + text: error strings should not be capitalized or end with punctuation or a newline + paths: + - third_party$ + - builtin$ + - examples$ issues: - exclude-use-default: false - exclude: - - "empty-block:" - - "unused-parameter:" - - "dot-imports:" - - should have a package comment - - error strings should not be capitalized or end with punctuation or a newline + max-issues-per-linter: 100 + max-same-issues: 100 +formatters: + enable: + - gofumpt + - goimports + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/.typos.toml b/.typos.toml index a32263cad6740..3168e9dd9ce42 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,8 +1,5 @@ [files] -extend-exclude = [ - ".git/", - "docs/", -] +extend-exclude = [".git/", "docs/", "CREDITS", "go.mod", "go.sum"] ignore-hidden = false [default] @@ -12,20 +9,37 @@ extend-ignore-re = [ "[0-9A-Za-z/+=]{64}", "ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF", "eyJmb28iOiJiYXIifQ", + "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*", + "MIIDBTCCAe2gAwIBAgIQWHw7h.*", 'http\.Header\{"X-Amz-Server-Side-Encryptio":', - 'sessionToken', + "ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge", + "ERRO:", + "(?Rm)^.*(#|//)\\s*spellchecker:disable-line$", # ignore line ] [default.extend-words] "encrypter" = "encrypter" +"kms" = "kms" "requestor" = "requestor" [default.extend-identifiers] +"HashiCorp" = "HashiCorp" + +[type.go.extend-identifiers] "bui" = "bui" -"toi" = "toi" -"ot" = "ot" "dm2nd" = "dm2nd" -"HashiCorp" = "HashiCorp" +"ot" = "ot" "ParseND" = "ParseND" "ParseNDStream" = "ParseNDStream" +"pn" = "pn" "TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned" +"thr" = "thr" +"toi" = "toi" + +[type.go] +extend-ignore-identifiers-re = [ + # Variants of `typ` used to mean `type` in golang as it is otherwise a + # keyword - some of these (like typ1 -> type1) can be fixed, but probably + # not worth the effort. + "[tT]yp[0-9]*", +] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 02e869344358e..c99df74406c8e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,8 +12,9 @@ Fork [MinIO upstream](https://github.com/minio/minio/fork) source repository to ```sh git clone https://github.com/minio/minio +cd minio go install -v -ls /go/bin/minio +ls $(go env GOPATH)/bin/minio ``` ### Set up git remote as ``upstream`` diff --git a/CREDITS b/CREDITS index 5cf3ab89230cb..c70aed83fad2d 100644 --- a/CREDITS +++ b/CREDITS @@ -85,8 +85,8 @@ SOFTWARE. ================================================================ -cloud.google.com/go -https://cloud.google.com/go +cel.dev/expr +https://cel.dev/expr ---------------------------------------------------------------- Apache License @@ -293,8 +293,8 @@ https://cloud.google.com/go ================================================================ -cloud.google.com/go/compute -https://cloud.google.com/go/compute +cloud.google.com/go +https://cloud.google.com/go ---------------------------------------------------------------- Apache License @@ -501,8 +501,8 @@ https://cloud.google.com/go/compute ================================================================ -cloud.google.com/go/compute/metadata -https://cloud.google.com/go/compute/metadata +cloud.google.com/go/auth +https://cloud.google.com/go/auth ---------------------------------------------------------------- Apache License @@ -709,8 +709,8 @@ https://cloud.google.com/go/compute/metadata ================================================================ -cloud.google.com/go/iam -https://cloud.google.com/go/iam +cloud.google.com/go/auth/oauth2adapt +https://cloud.google.com/go/auth/oauth2adapt ---------------------------------------------------------------- Apache License @@ -917,8 +917,8 @@ https://cloud.google.com/go/iam ================================================================ -cloud.google.com/go/storage -https://cloud.google.com/go/storage +cloud.google.com/go/compute/metadata +https://cloud.google.com/go/compute/metadata ---------------------------------------------------------------- Apache License @@ -1125,60 +1125,8 @@ https://cloud.google.com/go/storage ================================================================ -github.com/Azure/azure-pipeline-go -https://github.com/Azure/azure-pipeline-go ----------------------------------------------------------------- - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE -================================================================ - -github.com/Azure/azure-storage-blob-go -https://github.com/Azure/azure-storage-blob-go ----------------------------------------------------------------- - MIT License - - Copyright (c) Microsoft Corporation. All rights reserved. - - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included in all - copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - SOFTWARE -================================================================ - -github.com/Azure/go-autorest -https://github.com/Azure/go-autorest +cloud.google.com/go/iam +https://cloud.google.com/go/iam ---------------------------------------------------------------- Apache License @@ -1358,204 +1306,18 @@ https://github.com/Azure/go-autorest END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - -github.com/Azure/go-autorest/autorest -https://github.com/Azure/go-autorest/autorest ----------------------------------------------------------------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + APPENDIX: How to apply the Apache License to your work. - END OF TERMS AND CONDITIONS + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - Copyright 2015 Microsoft Corporation + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1571,8 +1333,8 @@ https://github.com/Azure/go-autorest/autorest ================================================================ -github.com/Azure/go-autorest/autorest/adal -https://github.com/Azure/go-autorest/autorest/adal +cloud.google.com/go/logging +https://cloud.google.com/go/logging ---------------------------------------------------------------- Apache License @@ -1752,7 +1514,18 @@ https://github.com/Azure/go-autorest/autorest/adal END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1768,8 +1541,8 @@ https://github.com/Azure/go-autorest/autorest/adal ================================================================ -github.com/Azure/go-autorest/autorest/date -https://github.com/Azure/go-autorest/autorest/date +cloud.google.com/go/longrunning +https://cloud.google.com/go/longrunning ---------------------------------------------------------------- Apache License @@ -1949,7 +1722,18 @@ https://github.com/Azure/go-autorest/autorest/date END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -1965,8 +1749,8 @@ https://github.com/Azure/go-autorest/autorest/date ================================================================ -github.com/Azure/go-autorest/autorest/mocks -https://github.com/Azure/go-autorest/autorest/mocks +cloud.google.com/go/monitoring +https://cloud.google.com/go/monitoring ---------------------------------------------------------------- Apache License @@ -2146,7 +1930,18 @@ https://github.com/Azure/go-autorest/autorest/mocks END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2162,8 +1957,8 @@ https://github.com/Azure/go-autorest/autorest/mocks ================================================================ -github.com/Azure/go-autorest/logger -https://github.com/Azure/go-autorest/logger +cloud.google.com/go/storage +https://cloud.google.com/go/storage ---------------------------------------------------------------- Apache License @@ -2343,7 +2138,18 @@ https://github.com/Azure/go-autorest/logger END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2359,8 +2165,8 @@ https://github.com/Azure/go-autorest/logger ================================================================ -github.com/Azure/go-autorest/tracing -https://github.com/Azure/go-autorest/tracing +cloud.google.com/go/trace +https://cloud.google.com/go/trace ---------------------------------------------------------------- Apache License @@ -2540,7 +2346,18 @@ https://github.com/Azure/go-autorest/tracing END OF TERMS AND CONDITIONS - Copyright 2015 Microsoft Corporation + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -2556,12 +2373,45 @@ https://github.com/Azure/go-autorest/tracing ================================================================ -github.com/Azure/go-ntlmssp -https://github.com/Azure/go-ntlmssp +filippo.io/edwards25519 +https://filippo.io/edwards25519 ---------------------------------------------------------------- -The MIT License (MIT) +Copyright (c) 2009 The Go Authors. All rights reserved. -Copyright (c) 2016 Microsoft +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/Azure/azure-sdk-for-go/sdk/azcore +https://github.com/Azure/azure-sdk-for-go/sdk/azcore +---------------------------------------------------------------- +MIT License + +Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2579,46 +2429,70 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +SOFTWARE ================================================================ -github.com/IBM/sarama -https://github.com/IBM/sarama +github.com/Azure/azure-sdk-for-go/sdk/azidentity +https://github.com/Azure/azure-sdk-for-go/sdk/azidentity ---------------------------------------------------------------- -# MIT License +MIT License -Copyright (c) 2013 Shopify +Copyright (c) Microsoft Corporation. -Copyright (c) 2023 IBM Corporation +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +================================================================ + +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +https://github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +---------------------------------------------------------------- +MIT License + +Copyright (c) Microsoft Corporation. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE ================================================================ -github.com/VividCortex/ewma -https://github.com/VividCortex/ewma +github.com/Azure/azure-sdk-for-go/sdk/internal +https://github.com/Azure/azure-sdk-for-go/sdk/internal ---------------------------------------------------------------- -The MIT License +MIT License -Copyright (c) 2013 VividCortex +Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2627,25 +2501,25 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE ================================================================ -github.com/acarl005/stripansi -https://github.com/acarl005/stripansi +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage +https://github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage ---------------------------------------------------------------- MIT License -Copyright (c) 2018 Andrew Carlson +Copyright (c) Microsoft Corporation. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2664,20 +2538,47 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +================================================================ + +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +https://github.com/Azure/azure-sdk-for-go/sdk/storage/azblob +---------------------------------------------------------------- + MIT License + + Copyright (c) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE ================================================================ -github.com/alecthomas/participle -https://github.com/alecthomas/participle +github.com/Azure/go-ntlmssp +https://github.com/Azure/go-ntlmssp ---------------------------------------------------------------- -Copyright (C) 2017 Alec Thomas +The MIT License (MIT) -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: +Copyright (c) 2016 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. @@ -2692,41 +2593,62 @@ SOFTWARE. ================================================================ -github.com/alexbrainman/sspi -https://github.com/alexbrainman/sspi +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache +https://github.com/AzureAD/microsoft-authentication-extensions-for-go/cache ---------------------------------------------------------------- -Copyright (c) 2012 The Go Authors. All rights reserved. + MIT License -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Copyright (c) Microsoft Corporation. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE ================================================================ -github.com/apache/thrift -https://github.com/apache/thrift +github.com/AzureAD/microsoft-authentication-library-for-go +https://github.com/AzureAD/microsoft-authentication-library-for-go +---------------------------------------------------------------- + MIT License + + Copyright (c) Microsoft Corporation. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE + +================================================================ + +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp +https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp ---------------------------------------------------------------- Apache License @@ -2931,431 +2853,12 @@ https://github.com/apache/thrift See the License for the specific language governing permissions and limitations under the License. --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) - --------------------------------------------------- -For lib/cpp/src/thrift/windows/SocketPair.cpp - -/* socketpair.c - * Copyright 2007 by Nathan C. Myers ; some rights reserved. - * This code is Free Software. It may be copied freely, in original or - * modified form, subject only to the restrictions that (1) the author is - * relieved from all responsibilities for any use for any purpose, and (2) - * this copyright notice must be retained, unchanged, in its entirety. If - * for any reason the author might be held responsible for any consequences - * of copying or use, license is withheld. - */ - - --------------------------------------------------- -For lib/py/compat/win32/stdint.h - -// ISO C9x compliant stdint.h for Microsoft Visual Studio -// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 -// -// Copyright (c) 2006-2008 Alexander Chemeris -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// 1. Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// 2. Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// 3. The name of the author may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED -// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// -/////////////////////////////////////////////////////////////////////////////// - - --------------------------------------------------- -Codegen template in t_html_generator.h - -* Bootstrap v2.0.3 -* -* Copyright 2012 Twitter, Inc -* Licensed under the Apache License v2.0 -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Designed and built with all the love in the world @twitter by @mdo and @fat. - ---------------------------------------------------- -For t_cl_generator.cc - - * Copyright (c) 2008- Patrick Collison - * Copyright (c) 2006- Facebook - ---------------------------------------------------- - -================================================================ - -github.com/armon/go-metrics -https://github.com/armon/go-metrics ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/asaskevich/govalidator -https://github.com/asaskevich/govalidator ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -================================================================ - -github.com/aymanbagabas/go-osc52/v2 -https://github.com/aymanbagabas/go-osc52/v2 ----------------------------------------------------------------- -MIT License - -Copyright (c) 2022 Ayman Bagabas - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/bcicen/jstream -https://github.com/bcicen/jstream ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2018 Bradley Cicenas - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -================================================================ - -github.com/beevik/ntp -https://github.com/beevik/ntp ----------------------------------------------------------------- -Copyright © 2015-2023 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -github.com/beorn7/perks -https://github.com/beorn7/perks ----------------------------------------------------------------- -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/buger/jsonparser -https://github.com/buger/jsonparser ----------------------------------------------------------------- -MIT License - -Copyright (c) 2016 Leonid Bugaev - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/cespare/xxhash/v2 -https://github.com/cespare/xxhash/v2 ----------------------------------------------------------------- -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/charmbracelet/bubbles -https://github.com/charmbracelet/bubbles ----------------------------------------------------------------- -MIT License - -Copyright (c) 2020-2023 Charmbracelet, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/charmbracelet/bubbletea -https://github.com/charmbracelet/bubbletea ----------------------------------------------------------------- -MIT License - -Copyright (c) 2020-2023 Charmbracelet, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/charmbracelet/lipgloss -https://github.com/charmbracelet/lipgloss ----------------------------------------------------------------- -MIT License - -Copyright (c) 2021-2023 Charmbracelet, Inc - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ================================================================ -github.com/cheggaaa/pb -https://github.com/cheggaaa/pb +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric +https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric ---------------------------------------------------------------- -Copyright (c) 2012-2015, Sergey Cherepanov -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -================================================================ - -github.com/cncf/xds/go -https://github.com/cncf/xds/go ----------------------------------------------------------------- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -3560,13 +3063,13 @@ https://github.com/cncf/xds/go ================================================================ -github.com/containerd/console -https://github.com/containerd/console +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock +https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock ---------------------------------------------------------------- Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -3741,13 +3244,24 @@ https://github.com/containerd/console END OF TERMS AND CONDITIONS - Copyright The containerd Authors + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -3757,9 +3271,10 @@ https://github.com/containerd/console ================================================================ -github.com/coredns/coredns -https://github.com/coredns/coredns +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping +https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping ---------------------------------------------------------------- + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -3940,7 +3455,7 @@ https://github.com/coredns/coredns APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -3948,7 +3463,7 @@ https://github.com/coredns/coredns same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016-2020 The CoreDNS authors and contributors + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -3964,216 +3479,150 @@ https://github.com/coredns/coredns ================================================================ -github.com/coreos/go-oidc -https://github.com/coreos/go-oidc +github.com/IBM/sarama +https://github.com/IBM/sarama ---------------------------------------------------------------- -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +# MIT License - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Copyright (c) 2013 Shopify - 1. Definitions. +Copyright (c) 2023 IBM Corporation - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +================================================================ - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +github.com/VividCortex/ewma +https://github.com/VividCortex/ewma +---------------------------------------------------------------- +The MIT License - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +Copyright (c) 2013 VividCortex - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +================================================================ - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +github.com/acarl005/stripansi +https://github.com/acarl005/stripansi +---------------------------------------------------------------- +MIT License - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +Copyright (c) 2018 Andrew Carlson - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +================================================================ - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +github.com/alecthomas/participle +https://github.com/alecthomas/participle +---------------------------------------------------------------- +Copyright (C) 2017 Alec Thomas - END OF TERMS AND CONDITIONS +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: - APPENDIX: How to apply the Apache License to your work. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - Copyright {yyyy} {name of copyright owner} +================================================================ - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +github.com/alexbrainman/sspi +https://github.com/alexbrainman/sspi +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. - http://www.apache.org/licenses/LICENSE-2.0 +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/coreos/go-semver -https://github.com/coreos/go-semver +github.com/apache/thrift +https://github.com/apache/thrift ---------------------------------------------------------------- Apache License @@ -4378,207 +3827,483 @@ https://github.com/coreos/go-semver See the License for the specific language governing permissions and limitations under the License. -================================================================ +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: -github.com/coreos/go-systemd/v22 -https://github.com/coreos/go-systemd/v22 ----------------------------------------------------------------- -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: -1. Definitions. + lib/erl/src/Makefile.am -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. +Please see doc/otp-base-license.txt for the full terms of this license. -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. +*/ +(By Douglas Crockford ) -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). +-------------------------------------------------- +For lib/cpp/src/thrift/windows/SocketPair.cpp -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. +/* socketpair.c + * Copyright 2007 by Nathan C. Myers ; some rights reserved. + * This code is Free Software. It may be copied freely, in original or + * modified form, subject only to the restrictions that (1) the author is + * relieved from all responsibilities for any use for any purpose, and (2) + * this copyright notice must be retained, unchanged, in its entirety. If + * for any reason the author might be held responsible for any consequences + * of copying or use, license is withheld. + */ -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. +-------------------------------------------------- +For lib/py/compat/win32/stdint.h -2. Grant of Copyright License. +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2008 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. The name of the author may be used to endorse or promote products +// derived from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. -3. Grant of Patent License. +-------------------------------------------------- +Codegen template in t_html_generator.h -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. +* Bootstrap v2.0.3 +* +* Copyright 2012 Twitter, Inc +* Licensed under the Apache License v2.0 +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Designed and built with all the love in the world @twitter by @mdo and @fat. -4. Redistribution. +--------------------------------------------------- +For t_cl_generator.cc -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: + * Copyright (c) 2008- Patrick Collison + * Copyright (c) 2006- Facebook -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. +--------------------------------------------------- -5. Submission of Contributions. +================================================================ -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. +github.com/armon/go-metrics +https://github.com/armon/go-metrics +---------------------------------------------------------------- +The MIT License (MIT) -6. Trademarks. +Copyright (c) 2013 Armon Dadgar -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: -7. Disclaimer of Warranty. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -8. Limitation of Liability. +================================================================ -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. +github.com/asaskevich/govalidator +https://github.com/asaskevich/govalidator +---------------------------------------------------------------- +The MIT License (MIT) -9. Accepting Warranty or Additional Liability. +Copyright (c) 2014-2020 Alex Saskevich -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -END OF TERMS AND CONDITIONS +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -APPENDIX: How to apply the Apache License to your work +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +================================================================ -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. +github.com/aymanbagabas/go-osc52/v2 +https://github.com/aymanbagabas/go-osc52/v2 +---------------------------------------------------------------- +MIT License - Copyright [yyyy] [name of copyright owner] +Copyright (c) 2022 Ayman Bagabas - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. ================================================================ -github.com/cosnicolaou/pbzip2 -https://github.com/cosnicolaou/pbzip2 +github.com/beevik/ntp +https://github.com/beevik/ntp +---------------------------------------------------------------- +Copyright © 2015-2023 Brett Vickers. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/beorn7/perks +https://github.com/beorn7/perks +---------------------------------------------------------------- +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/buger/jsonparser +https://github.com/buger/jsonparser +---------------------------------------------------------------- +MIT License + +Copyright (c) 2016 Leonid Bugaev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/cespare/xxhash/v2 +https://github.com/cespare/xxhash/v2 +---------------------------------------------------------------- +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/charmbracelet/bubbles +https://github.com/charmbracelet/bubbles +---------------------------------------------------------------- +MIT License + +Copyright (c) 2020-2023 Charmbracelet, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/charmbracelet/bubbletea +https://github.com/charmbracelet/bubbletea +---------------------------------------------------------------- +MIT License + +Copyright (c) 2020-2023 Charmbracelet, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/charmbracelet/lipgloss +https://github.com/charmbracelet/lipgloss +---------------------------------------------------------------- +MIT License + +Copyright (c) 2021-2023 Charmbracelet, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/charmbracelet/x/ansi +https://github.com/charmbracelet/x/ansi +---------------------------------------------------------------- +MIT License + +Copyright (c) 2023 Charmbracelet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/charmbracelet/x/exp/golden +https://github.com/charmbracelet/x/exp/golden +---------------------------------------------------------------- +MIT License + +Copyright (c) 2023 Charmbracelet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/charmbracelet/x/term +https://github.com/charmbracelet/x/term +---------------------------------------------------------------- +MIT License + +Copyright (c) 2023 Charmbracelet, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/cheggaaa/pb +https://github.com/cheggaaa/pb +---------------------------------------------------------------- +Copyright (c) 2012-2015, Sergey Cherepanov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +================================================================ + +github.com/cncf/xds/go +https://github.com/cncf/xds/go ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -4784,184 +4509,12 @@ https://github.com/cosnicolaou/pbzip2 ================================================================ -github.com/davecgh/go-spew -https://github.com/davecgh/go-spew ----------------------------------------------------------------- -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -================================================================ - -github.com/dchest/siphash -https://github.com/dchest/siphash ----------------------------------------------------------------- -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. - -================================================================ - -github.com/decred/dcrd/dcrec/secp256k1/v4 -https://github.com/decred/dcrd/dcrec/secp256k1/v4 ----------------------------------------------------------------- -ISC License - -Copyright (c) 2013-2017 The btcsuite developers -Copyright (c) 2015-2020 The Decred developers -Copyright (c) 2017 The Lightning Network Developers - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -================================================================ - -github.com/docker/go-units -https://github.com/docker/go-units +github.com/coreos/go-oidc/v3 +https://github.com/coreos/go-oidc/v3 ---------------------------------------------------------------- - - Apache License +Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -5136,13 +4689,24 @@ https://github.com/docker/go-units END OF TERMS AND CONDITIONS - Copyright 2015 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -5150,426 +4714,20 @@ https://github.com/docker/go-units See the License for the specific language governing permissions and limitations under the License. + ================================================================ -github.com/dustin/go-humanize -https://github.com/dustin/go-humanize +github.com/coreos/go-semver +https://github.com/coreos/go-semver ---------------------------------------------------------------- -Copyright (c) 2005-2008 Dustin Sallings -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - - -================================================================ - -github.com/eapache/go-resiliency -https://github.com/eapache/go-resiliency ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -================================================================ - -github.com/eapache/go-xerial-snappy -https://github.com/eapache/go-xerial-snappy ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2016 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/eapache/queue -https://github.com/eapache/queue ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014 Evan Huus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -================================================================ - -github.com/eclipse/paho.mqtt.golang -https://github.com/eclipse/paho.mqtt.golang ----------------------------------------------------------------- -Eclipse Public License - v 2.0 (EPL-2.0) - -This program and the accompanying materials -are made available under the terms of the Eclipse Public License v2.0 -and Eclipse Distribution License v1.0 which accompany this distribution. - -The Eclipse Public License is available at - https://www.eclipse.org/legal/epl-2.0/ -and the Eclipse Distribution License is available at - http://www.eclipse.org/org/documents/edl-v10.php. - -For an explanation of what dual-licensing means to you, see: -https://www.eclipse.org/legal/eplfaq.php#DUALLIC - -**** -The epl-2.0 is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy). -**** -Eclipse Public License - v 2.0 - - THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE - PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION - OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. - -1. DEFINITIONS - -"Contribution" means: - - a) in the case of the initial Contributor, the initial content - Distributed under this Agreement, and - - b) in the case of each subsequent Contributor: - i) changes to the Program, and - ii) additions to the Program; - where such changes and/or additions to the Program originate from - and are Distributed by that particular Contributor. A Contribution - "originates" from a Contributor if it was added to the Program by - such Contributor itself or anyone acting on such Contributor's behalf. - Contributions do not include changes or additions to the Program that - are not Modified Works. - -"Contributor" means any person or entity that Distributes the Program. - -"Licensed Patents" mean patent claims licensable by a Contributor which -are necessarily infringed by the use or sale of its Contribution alone -or when combined with the Program. - -"Program" means the Contributions Distributed in accordance with this -Agreement. - -"Recipient" means anyone who receives the Program under this Agreement -or any Secondary License (as applicable), including Contributors. - -"Derivative Works" shall mean any work, whether in Source Code or other -form, that is based on (or derived from) the Program and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. - -"Modified Works" shall mean any work in Source Code or other form that -results from an addition to, deletion from, or modification of the -contents of the Program, including, for purposes of clarity any new file -in Source Code form that contains any contents of the Program. Modified -Works shall not include works that contain only declarations, -interfaces, types, classes, structures, or files of the Program solely -in each case in order to link to, bind by name, or subclass the Program -or Modified Works thereof. - -"Distribute" means the acts of a) distributing or b) making available -in any manner that enables the transfer of a copy. - -"Source Code" means the form of a Program preferred for making -modifications, including but not limited to software source code, -documentation source, and configuration files. - -"Secondary License" means either the GNU General Public License, -Version 2.0, or any later versions of that license, including any -exceptions or additional permissions as identified by the initial -Contributor. - -2. GRANT OF RIGHTS - - a) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free copyright - license to reproduce, prepare Derivative Works of, publicly display, - publicly perform, Distribute and sublicense the Contribution of such - Contributor, if any, and such Derivative Works. - - b) Subject to the terms of this Agreement, each Contributor hereby - grants Recipient a non-exclusive, worldwide, royalty-free patent - license under Licensed Patents to make, use, sell, offer to sell, - import and otherwise transfer the Contribution of such Contributor, - if any, in Source Code or other form. This patent license shall - apply to the combination of the Contribution and the Program if, at - the time the Contribution is added by the Contributor, such addition - of the Contribution causes such combination to be covered by the - Licensed Patents. The patent license shall not apply to any other - combinations which include the Contribution. No hardware per se is - licensed hereunder. - - c) Recipient understands that although each Contributor grants the - licenses to its Contributions set forth herein, no assurances are - provided by any Contributor that the Program does not infringe the - patent or other intellectual property rights of any other entity. - Each Contributor disclaims any liability to Recipient for claims - brought by any other entity based on infringement of intellectual - property rights or otherwise. As a condition to exercising the - rights and licenses granted hereunder, each Recipient hereby - assumes sole responsibility to secure any other intellectual - property rights needed, if any. For example, if a third party - patent license is required to allow Recipient to Distribute the - Program, it is Recipient's responsibility to acquire that license - before distributing the Program. - - d) Each Contributor represents that to its knowledge it has - sufficient copyright rights in its Contribution, if any, to grant - the copyright license set forth in this Agreement. - - e) Notwithstanding the terms of any Secondary License, no - Contributor makes additional grants to any Recipient (other than - those set forth in this Agreement) as a result of such Recipient's - receipt of the Program under the terms of a Secondary License - (if permitted under the terms of Section 3). - -3. REQUIREMENTS - -3.1 If a Contributor Distributes the Program in any form, then: - - a) the Program must also be made available as Source Code, in - accordance with section 3.2, and the Contributor must accompany - the Program with a statement that the Source Code for the Program - is available under this Agreement, and informs Recipients how to - obtain it in a reasonable manner on or through a medium customarily - used for software exchange; and - - b) the Contributor may Distribute the Program under a license - different than this Agreement, provided that such license: - i) effectively disclaims on behalf of all other Contributors all - warranties and conditions, express and implied, including - warranties or conditions of title and non-infringement, and - implied warranties or conditions of merchantability and fitness - for a particular purpose; - - ii) effectively excludes on behalf of all other Contributors all - liability for damages, including direct, indirect, special, - incidental and consequential damages, such as lost profits; - - iii) does not attempt to limit or alter the recipients' rights - in the Source Code under section 3.2; and - - iv) requires any subsequent distribution of the Program by any - party to be under a license that satisfies the requirements - of this section 3. - -3.2 When the Program is Distributed as Source Code: - - a) it must be made available under this Agreement, or if the - Program (i) is combined with other material in a separate file or - files made available under a Secondary License, and (ii) the initial - Contributor attached to the Source Code the notice described in - Exhibit A of this Agreement, then the Program may be made available - under the terms of such Secondary Licenses, and - - b) a copy of this Agreement must be included with each copy of - the Program. - -3.3 Contributors may not remove or alter any copyright, patent, -trademark, attribution notices, disclaimers of warranty, or limitations -of liability ("notices") contained within the Program from any copy of -the Program which they Distribute, provided that Contributors may add -their own appropriate notices. - -4. COMMERCIAL DISTRIBUTION - -Commercial distributors of software may accept certain responsibilities -with respect to end users, business partners and the like. While this -license is intended to facilitate the commercial use of the Program, -the Contributor who includes the Program in a commercial product -offering should do so in a manner which does not create potential -liability for other Contributors. Therefore, if a Contributor includes -the Program in a commercial product offering, such Contributor -("Commercial Contributor") hereby agrees to defend and indemnify every -other Contributor ("Indemnified Contributor") against any losses, -damages and costs (collectively "Losses") arising from claims, lawsuits -and other legal actions brought by a third party against the Indemnified -Contributor to the extent caused by the acts or omissions of such -Commercial Contributor in connection with its distribution of the Program -in a commercial product offering. The obligations in this section do not -apply to any claims or Losses relating to any actual or alleged -intellectual property infringement. In order to qualify, an Indemnified -Contributor must: a) promptly notify the Commercial Contributor in -writing of such claim, and b) allow the Commercial Contributor to control, -and cooperate with the Commercial Contributor in, the defense and any -related settlement negotiations. The Indemnified Contributor may -participate in any such claim at its own expense. - -For example, a Contributor might include the Program in a commercial -product offering, Product X. That Contributor is then a Commercial -Contributor. If that Commercial Contributor then makes performance -claims, or offers warranties related to Product X, those performance -claims and warranties are such Commercial Contributor's responsibility -alone. Under this section, the Commercial Contributor would have to -defend claims against the other Contributors related to those performance -claims and warranties, and if a court requires any other Contributor to -pay any damages as a result, the Commercial Contributor must pay -those damages. - -5. NO WARRANTY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" -BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR -IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF -TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR -PURPOSE. Each Recipient is solely responsible for determining the -appropriateness of using and distributing the Program and assumes all -risks associated with its exercise of rights under this Agreement, -including but not limited to the risks and costs of program errors, -compliance with applicable laws, damage to or loss of data, programs -or equipment, and unavailability or interruption of operations. - -6. DISCLAIMER OF LIABILITY - -EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT -PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS -SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST -PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE -EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - -7. GENERAL - -If any provision of this Agreement is invalid or unenforceable under -applicable law, it shall not affect the validity or enforceability of -the remainder of the terms of this Agreement, and without further -action by the parties hereto, such provision shall be reformed to the -minimum extent necessary to make such provision valid and enforceable. - -If Recipient institutes patent litigation against any entity -(including a cross-claim or counterclaim in a lawsuit) alleging that the -Program itself (excluding combinations of the Program with other software -or hardware) infringes such Recipient's patent(s), then such Recipient's -rights granted under Section 2(b) shall terminate as of the date such -litigation is filed. - -All Recipient's rights under this Agreement shall terminate if it -fails to comply with any of the material terms or conditions of this -Agreement and does not cure such failure in a reasonable period of -time after becoming aware of such noncompliance. If all Recipient's -rights under this Agreement terminate, Recipient agrees to cease use -and distribution of the Program as soon as reasonably practicable. -However, Recipient's obligations under this Agreement and any licenses -granted by Recipient relating to the Program shall continue and survive. - -Everyone is permitted to copy and distribute copies of this Agreement, -but in order to avoid inconsistency the Agreement is copyrighted and -may only be modified in the following manner. The Agreement Steward -reserves the right to publish new versions (including revisions) of -this Agreement from time to time. No one other than the Agreement -Steward has the right to modify this Agreement. The Eclipse Foundation -is the initial Agreement Steward. The Eclipse Foundation may assign the -responsibility to serve as the Agreement Steward to a suitable separate -entity. Each new version of the Agreement will be given a distinguishing -version number. The Program (including Contributions) may always be -Distributed subject to the version of the Agreement under which it was -received. In addition, after a new version of the Agreement is published, -Contributor may elect to Distribute the Program (including its -Contributions) under the new version. - -Except as expressly stated in Sections 2(a) and 2(b) above, Recipient -receives no rights or licenses to the intellectual property of any -Contributor under this Agreement, whether expressly, by implication, -estoppel or otherwise. All rights in the Program not expressly granted -under this Agreement are reserved. Nothing in this Agreement is intended -to be enforceable by any entity that is not a Contributor or Recipient. -No third-party beneficiary rights are created under this Agreement. - -Exhibit A - Form of Secondary Licenses Notice - -"This Source Code may also be made available under the following -Secondary Licenses when the conditions for such availability set forth -in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), -version(s), and exceptions or additional permissions here}." - - Simply including a copy of this Agreement, including this Exhibit A - is not sufficient to license the Source Code under Secondary Licenses. - - If it is not possible or desirable to put the notice in a particular - file, then You may include the notice in a location (such as a LICENSE - file in a relevant directory) where a recipient would be likely to - look for such a notice. - - You may add additional accurate notices of copyright ownership. - -================================================================ - -github.com/elastic/go-elasticsearch/v7 -https://github.com/elastic/go-elasticsearch/v7 ----------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. + 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. @@ -5751,7 +4909,7 @@ https://github.com/elastic/go-elasticsearch/v7 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2018 Elasticsearch BV + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -5767,81 +4925,277 @@ https://github.com/elastic/go-elasticsearch/v7 ================================================================ -github.com/envoyproxy/protoc-gen-validate -https://github.com/envoyproxy/protoc-gen-validate +github.com/coreos/go-systemd/v22 +https://github.com/coreos/go-systemd/v22 ---------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +1. Definitions. - 1. Definitions. +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +2. Grant of Copyright License. - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/cosnicolaou/pbzip2 +https://github.com/cosnicolaou/pbzip2 +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, @@ -5975,136 +5329,183 @@ https://github.com/envoyproxy/protoc-gen-validate ================================================================ -github.com/fatih/color -https://github.com/fatih/color +github.com/davecgh/go-spew +https://github.com/davecgh/go-spew ---------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2013 Fatih Arslan +ISC License -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: +Copyright (c) 2012-2016 Dave Collins -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ================================================================ -github.com/fatih/structs -https://github.com/fatih/structs +github.com/dchest/siphash +https://github.com/dchest/siphash ---------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014 Fatih Arslan +Creative Commons Legal Code -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +CC0 1.0 Universal -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -================================================================ +Statement of Purpose -github.com/felixge/fgprof -https://github.com/felixge/fgprof ----------------------------------------------------------------- -The MIT License (MIT) -Copyright © 2020 Felix Geisendörfer +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: -================================================================ + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. -github.com/felixge/httpsnoop -https://github.com/felixge/httpsnoop ----------------------------------------------------------------- -Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. - Permission is hereby granted, free of charge, to any person obtaining a copy - of this software and associated documentation files (the "Software"), to deal - in the Software without restriction, including without limitation the rights - to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - copies of the Software, and to permit persons to whom the Software is - furnished to do so, subject to the following conditions: +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. - The above copyright notice and this permission notice shall be included in - all copies or substantial portions of the Software. +4. Limitations and Disclaimers. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - THE SOFTWARE. + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. ================================================================ -github.com/fortytw2/leaktest -https://github.com/fortytw2/leaktest +github.com/decred/dcrd/dcrec/secp256k1/v4 +https://github.com/decred/dcrd/dcrec/secp256k1/v4 ---------------------------------------------------------------- -Copyright (c) 2012 The Go Authors. All rights reserved. +ISC License -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Copyright (c) 2013-2017 The btcsuite developers +Copyright (c) 2015-2024 The Decred developers +Copyright (c) 2017 The Lightning Network Developers - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ================================================================ -github.com/frankban/quicktest -https://github.com/frankban/quicktest +github.com/dgryski/go-rendezvous +https://github.com/dgryski/go-rendezvous ---------------------------------------------------------------- -MIT License +The MIT License (MIT) -Copyright (c) 2017 Canonical Ltd. +Copyright (c) 2017-2020 Damian Gryski Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -6113,26 +5514,26 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. ================================================================ -github.com/fraugster/parquet-go -https://github.com/fraugster/parquet-go +github.com/docker/go-units +https://github.com/docker/go-units ---------------------------------------------------------------- Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -6307,24 +5708,13 @@ https://github.com/fraugster/parquet-go END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] + Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -6334,38 +5724,445 @@ https://github.com/fraugster/parquet-go ================================================================ -github.com/gdamore/encoding -https://github.com/gdamore/encoding +github.com/dustin/go-humanize +https://github.com/dustin/go-humanize ---------------------------------------------------------------- +Copyright (c) 2005-2008 Dustin Sallings - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - 1. Definitions. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. + - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +================================================================ - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. +github.com/eapache/go-resiliency +https://github.com/eapache/go-resiliency +---------------------------------------------------------------- +The MIT License (MIT) - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +Copyright (c) 2014 Evan Huus - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +================================================================ + +github.com/eapache/go-xerial-snappy +https://github.com/eapache/go-xerial-snappy +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2016 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/eapache/queue +https://github.com/eapache/queue +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Evan Huus + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +================================================================ + +github.com/eclipse/paho.mqtt.golang +https://github.com/eclipse/paho.mqtt.golang +---------------------------------------------------------------- +Eclipse Public License - v 2.0 (EPL-2.0) + +This program and the accompanying materials +are made available under the terms of the Eclipse Public License v2.0 +and Eclipse Distribution License v1.0 which accompany this distribution. + +The Eclipse Public License is available at + https://www.eclipse.org/legal/epl-2.0/ +and the Eclipse Distribution License is available at + http://www.eclipse.org/org/documents/edl-v10.php. + +For an explanation of what dual-licensing means to you, see: +https://www.eclipse.org/legal/eplfaq.php#DUALLIC + +**** +The epl-2.0 is copied below in order to pass the pkg.go.dev license check (https://pkg.go.dev/license-policy). +**** +Eclipse Public License - v 2.0 + + THE ACCOMPANYING PROGRAM IS PROVIDED UNDER THE TERMS OF THIS ECLIPSE + PUBLIC LICENSE ("AGREEMENT"). ANY USE, REPRODUCTION OR DISTRIBUTION + OF THE PROGRAM CONSTITUTES RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT. + +1. DEFINITIONS + +"Contribution" means: + + a) in the case of the initial Contributor, the initial content + Distributed under this Agreement, and + + b) in the case of each subsequent Contributor: + i) changes to the Program, and + ii) additions to the Program; + where such changes and/or additions to the Program originate from + and are Distributed by that particular Contributor. A Contribution + "originates" from a Contributor if it was added to the Program by + such Contributor itself or anyone acting on such Contributor's behalf. + Contributions do not include changes or additions to the Program that + are not Modified Works. + +"Contributor" means any person or entity that Distributes the Program. + +"Licensed Patents" mean patent claims licensable by a Contributor which +are necessarily infringed by the use or sale of its Contribution alone +or when combined with the Program. + +"Program" means the Contributions Distributed in accordance with this +Agreement. + +"Recipient" means anyone who receives the Program under this Agreement +or any Secondary License (as applicable), including Contributors. + +"Derivative Works" shall mean any work, whether in Source Code or other +form, that is based on (or derived from) the Program and for which the +editorial revisions, annotations, elaborations, or other modifications +represent, as a whole, an original work of authorship. + +"Modified Works" shall mean any work in Source Code or other form that +results from an addition to, deletion from, or modification of the +contents of the Program, including, for purposes of clarity any new file +in Source Code form that contains any contents of the Program. Modified +Works shall not include works that contain only declarations, +interfaces, types, classes, structures, or files of the Program solely +in each case in order to link to, bind by name, or subclass the Program +or Modified Works thereof. + +"Distribute" means the acts of a) distributing or b) making available +in any manner that enables the transfer of a copy. + +"Source Code" means the form of a Program preferred for making +modifications, including but not limited to software source code, +documentation source, and configuration files. + +"Secondary License" means either the GNU General Public License, +Version 2.0, or any later versions of that license, including any +exceptions or additional permissions as identified by the initial +Contributor. + +2. GRANT OF RIGHTS + + a) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free copyright + license to reproduce, prepare Derivative Works of, publicly display, + publicly perform, Distribute and sublicense the Contribution of such + Contributor, if any, and such Derivative Works. + + b) Subject to the terms of this Agreement, each Contributor hereby + grants Recipient a non-exclusive, worldwide, royalty-free patent + license under Licensed Patents to make, use, sell, offer to sell, + import and otherwise transfer the Contribution of such Contributor, + if any, in Source Code or other form. This patent license shall + apply to the combination of the Contribution and the Program if, at + the time the Contribution is added by the Contributor, such addition + of the Contribution causes such combination to be covered by the + Licensed Patents. The patent license shall not apply to any other + combinations which include the Contribution. No hardware per se is + licensed hereunder. + + c) Recipient understands that although each Contributor grants the + licenses to its Contributions set forth herein, no assurances are + provided by any Contributor that the Program does not infringe the + patent or other intellectual property rights of any other entity. + Each Contributor disclaims any liability to Recipient for claims + brought by any other entity based on infringement of intellectual + property rights or otherwise. As a condition to exercising the + rights and licenses granted hereunder, each Recipient hereby + assumes sole responsibility to secure any other intellectual + property rights needed, if any. For example, if a third party + patent license is required to allow Recipient to Distribute the + Program, it is Recipient's responsibility to acquire that license + before distributing the Program. + + d) Each Contributor represents that to its knowledge it has + sufficient copyright rights in its Contribution, if any, to grant + the copyright license set forth in this Agreement. + + e) Notwithstanding the terms of any Secondary License, no + Contributor makes additional grants to any Recipient (other than + those set forth in this Agreement) as a result of such Recipient's + receipt of the Program under the terms of a Secondary License + (if permitted under the terms of Section 3). + +3. REQUIREMENTS + +3.1 If a Contributor Distributes the Program in any form, then: + + a) the Program must also be made available as Source Code, in + accordance with section 3.2, and the Contributor must accompany + the Program with a statement that the Source Code for the Program + is available under this Agreement, and informs Recipients how to + obtain it in a reasonable manner on or through a medium customarily + used for software exchange; and + + b) the Contributor may Distribute the Program under a license + different than this Agreement, provided that such license: + i) effectively disclaims on behalf of all other Contributors all + warranties and conditions, express and implied, including + warranties or conditions of title and non-infringement, and + implied warranties or conditions of merchantability and fitness + for a particular purpose; + + ii) effectively excludes on behalf of all other Contributors all + liability for damages, including direct, indirect, special, + incidental and consequential damages, such as lost profits; + + iii) does not attempt to limit or alter the recipients' rights + in the Source Code under section 3.2; and + + iv) requires any subsequent distribution of the Program by any + party to be under a license that satisfies the requirements + of this section 3. + +3.2 When the Program is Distributed as Source Code: + + a) it must be made available under this Agreement, or if the + Program (i) is combined with other material in a separate file or + files made available under a Secondary License, and (ii) the initial + Contributor attached to the Source Code the notice described in + Exhibit A of this Agreement, then the Program may be made available + under the terms of such Secondary Licenses, and + + b) a copy of this Agreement must be included with each copy of + the Program. + +3.3 Contributors may not remove or alter any copyright, patent, +trademark, attribution notices, disclaimers of warranty, or limitations +of liability ("notices") contained within the Program from any copy of +the Program which they Distribute, provided that Contributors may add +their own appropriate notices. + +4. COMMERCIAL DISTRIBUTION + +Commercial distributors of software may accept certain responsibilities +with respect to end users, business partners and the like. While this +license is intended to facilitate the commercial use of the Program, +the Contributor who includes the Program in a commercial product +offering should do so in a manner which does not create potential +liability for other Contributors. Therefore, if a Contributor includes +the Program in a commercial product offering, such Contributor +("Commercial Contributor") hereby agrees to defend and indemnify every +other Contributor ("Indemnified Contributor") against any losses, +damages and costs (collectively "Losses") arising from claims, lawsuits +and other legal actions brought by a third party against the Indemnified +Contributor to the extent caused by the acts or omissions of such +Commercial Contributor in connection with its distribution of the Program +in a commercial product offering. The obligations in this section do not +apply to any claims or Losses relating to any actual or alleged +intellectual property infringement. In order to qualify, an Indemnified +Contributor must: a) promptly notify the Commercial Contributor in +writing of such claim, and b) allow the Commercial Contributor to control, +and cooperate with the Commercial Contributor in, the defense and any +related settlement negotiations. The Indemnified Contributor may +participate in any such claim at its own expense. + +For example, a Contributor might include the Program in a commercial +product offering, Product X. That Contributor is then a Commercial +Contributor. If that Commercial Contributor then makes performance +claims, or offers warranties related to Product X, those performance +claims and warranties are such Commercial Contributor's responsibility +alone. Under this section, the Commercial Contributor would have to +defend claims against the other Contributors related to those performance +claims and warranties, and if a court requires any other Contributor to +pay any damages as a result, the Commercial Contributor must pay +those damages. + +5. NO WARRANTY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, THE PROGRAM IS PROVIDED ON AN "AS IS" +BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR +IMPLIED INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OR CONDITIONS OF +TITLE, NON-INFRINGEMENT, MERCHANTABILITY OR FITNESS FOR A PARTICULAR +PURPOSE. Each Recipient is solely responsible for determining the +appropriateness of using and distributing the Program and assumes all +risks associated with its exercise of rights under this Agreement, +including but not limited to the risks and costs of program errors, +compliance with applicable laws, damage to or loss of data, programs +or equipment, and unavailability or interruption of operations. + +6. DISCLAIMER OF LIABILITY + +EXCEPT AS EXPRESSLY SET FORTH IN THIS AGREEMENT, AND TO THE EXTENT +PERMITTED BY APPLICABLE LAW, NEITHER RECIPIENT NOR ANY CONTRIBUTORS +SHALL HAVE ANY LIABILITY FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING WITHOUT LIMITATION LOST +PROFITS), HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OR DISTRIBUTION OF THE PROGRAM OR THE +EXERCISE OF ANY RIGHTS GRANTED HEREUNDER, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +7. GENERAL + +If any provision of this Agreement is invalid or unenforceable under +applicable law, it shall not affect the validity or enforceability of +the remainder of the terms of this Agreement, and without further +action by the parties hereto, such provision shall be reformed to the +minimum extent necessary to make such provision valid and enforceable. + +If Recipient institutes patent litigation against any entity +(including a cross-claim or counterclaim in a lawsuit) alleging that the +Program itself (excluding combinations of the Program with other software +or hardware) infringes such Recipient's patent(s), then such Recipient's +rights granted under Section 2(b) shall terminate as of the date such +litigation is filed. + +All Recipient's rights under this Agreement shall terminate if it +fails to comply with any of the material terms or conditions of this +Agreement and does not cure such failure in a reasonable period of +time after becoming aware of such noncompliance. If all Recipient's +rights under this Agreement terminate, Recipient agrees to cease use +and distribution of the Program as soon as reasonably practicable. +However, Recipient's obligations under this Agreement and any licenses +granted by Recipient relating to the Program shall continue and survive. + +Everyone is permitted to copy and distribute copies of this Agreement, +but in order to avoid inconsistency the Agreement is copyrighted and +may only be modified in the following manner. The Agreement Steward +reserves the right to publish new versions (including revisions) of +this Agreement from time to time. No one other than the Agreement +Steward has the right to modify this Agreement. The Eclipse Foundation +is the initial Agreement Steward. The Eclipse Foundation may assign the +responsibility to serve as the Agreement Steward to a suitable separate +entity. Each new version of the Agreement will be given a distinguishing +version number. The Program (including Contributions) may always be +Distributed subject to the version of the Agreement under which it was +received. In addition, after a new version of the Agreement is published, +Contributor may elect to Distribute the Program (including its +Contributions) under the new version. + +Except as expressly stated in Sections 2(a) and 2(b) above, Recipient +receives no rights or licenses to the intellectual property of any +Contributor under this Agreement, whether expressly, by implication, +estoppel or otherwise. All rights in the Program not expressly granted +under this Agreement are reserved. Nothing in this Agreement is intended +to be enforceable by any entity that is not a Contributor or Recipient. +No third-party beneficiary rights are created under this Agreement. + +Exhibit A - Form of Secondary Licenses Notice + +"This Source Code may also be made available under the following +Secondary Licenses when the conditions for such availability set forth +in the Eclipse Public License, v. 2.0 are satisfied: {name license(s), +version(s), and exceptions or additional permissions here}." + + Simply including a copy of this Agreement, including this Exhibit A + is not sufficient to license the Source Code under Secondary Licenses. + + If it is not possible or desirable to put the notice in a particular + file, then You may include the notice in a location (such as a LICENSE + file in a relevant directory) where a recipient would be likely to + look for such a notice. + + You may add additional accurate notices of copyright ownership. + +================================================================ + +github.com/elastic/go-elasticsearch/v7 +https://github.com/elastic/go-elasticsearch/v7 +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but @@ -6526,7 +6323,7 @@ https://github.com/gdamore/encoding same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2018 Elasticsearch BV Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6542,10 +6339,9 @@ https://github.com/gdamore/encoding ================================================================ -github.com/gdamore/tcell/v2 -https://github.com/gdamore/tcell/v2 +github.com/envoyproxy/go-control-plane +https://github.com/envoyproxy/go-control-plane ---------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -6726,7 +6522,7 @@ https://github.com/gdamore/tcell/v2 APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -6734,7 +6530,7 @@ https://github.com/gdamore/tcell/v2 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -6750,64 +6546,8 @@ https://github.com/gdamore/tcell/v2 ================================================================ -github.com/go-asn1-ber/asn1-ber -https://github.com/go-asn1-ber/asn1-ber ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) -Portions copyright (c) 2015-2016 go-asn1-ber Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/go-ldap/ldap/v3 -https://github.com/go-ldap/ldap/v3 ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) -Portions copyright (c) 2015-2016 go-ldap Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/go-logr/logr -https://github.com/go-logr/logr +github.com/envoyproxy/go-control-plane/envoy +https://github.com/envoyproxy/go-control-plane/envoy ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -7013,8 +6753,8 @@ https://github.com/go-logr/logr ================================================================ -github.com/go-logr/stdr -https://github.com/go-logr/stdr +github.com/envoyproxy/go-control-plane/ratelimit +https://github.com/envoyproxy/go-control-plane/ratelimit ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -7196,7 +6936,7 @@ https://github.com/go-logr/stdr APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -7204,7 +6944,7 @@ https://github.com/go-logr/stdr same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -7220,35 +6960,8 @@ https://github.com/go-logr/stdr ================================================================ -github.com/go-ole/go-ole -https://github.com/go-ole/go-ole ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright © 2013-2017 Yasuhiro Matsumoto, - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - -github.com/go-openapi/analysis -https://github.com/go-openapi/analysis +github.com/envoyproxy/protoc-gen-validate +https://github.com/envoyproxy/protoc-gen-validate ---------------------------------------------------------------- Apache License @@ -7455,8 +7168,159 @@ https://github.com/go-openapi/analysis ================================================================ -github.com/go-openapi/errors -https://github.com/go-openapi/errors +github.com/erikgeiser/coninput +https://github.com/erikgeiser/coninput +---------------------------------------------------------------- +MIT License + +Copyright (c) 2021 Erik G. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/fatih/color +https://github.com/fatih/color +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/fatih/structs +https://github.com/fatih/structs +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2014 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +================================================================ + +github.com/felixge/fgprof +https://github.com/felixge/fgprof +---------------------------------------------------------------- +The MIT License (MIT) +Copyright © 2020 Felix Geisendörfer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/felixge/httpsnoop +https://github.com/felixge/httpsnoop +---------------------------------------------------------------- +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + +================================================================ + +github.com/fortytw2/leaktest +https://github.com/fortytw2/leaktest +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/fraugster/parquet-go +https://github.com/fraugster/parquet-go ---------------------------------------------------------------- Apache License @@ -7663,8 +7527,233 @@ https://github.com/go-openapi/errors ================================================================ -github.com/go-openapi/jsonpointer -https://github.com/go-openapi/jsonpointer +github.com/go-asn1-ber/asn1-ber +https://github.com/go-asn1-ber/asn1-ber +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2016 go-asn1-ber Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/go-ini/ini +https://github.com/go-ini/ini +---------------------------------------------------------------- +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright 2014 Unknwon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/go-jose/go-jose/v4 +https://github.com/go-jose/go-jose/v4 ---------------------------------------------------------------- Apache License @@ -7871,10 +7960,37 @@ https://github.com/go-openapi/jsonpointer ================================================================ -github.com/go-openapi/jsonreference -https://github.com/go-openapi/jsonreference +github.com/go-ldap/ldap/v3 +https://github.com/go-ldap/ldap/v3 ---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2011-2015 Michael Mitton (mmitton@gmail.com) +Portions copyright (c) 2015-2024 go-ldap Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/go-logr/logr +https://github.com/go-logr/logr +---------------------------------------------------------------- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -8055,7 +8171,7 @@ https://github.com/go-openapi/jsonreference APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -8063,7 +8179,7 @@ https://github.com/go-openapi/jsonreference same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -8079,10 +8195,9 @@ https://github.com/go-openapi/jsonreference ================================================================ -github.com/go-openapi/loads -https://github.com/go-openapi/loads +github.com/go-logr/stdr +https://github.com/go-logr/stdr ---------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -8287,8 +8402,35 @@ https://github.com/go-openapi/loads ================================================================ -github.com/go-openapi/runtime -https://github.com/go-openapi/runtime +github.com/go-ole/go-ole +https://github.com/go-ole/go-ole +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright © 2013-2017 Yasuhiro Matsumoto, + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/go-openapi/analysis +https://github.com/go-openapi/analysis ---------------------------------------------------------------- Apache License @@ -8495,8 +8637,8 @@ https://github.com/go-openapi/runtime ================================================================ -github.com/go-openapi/spec -https://github.com/go-openapi/spec +github.com/go-openapi/errors +https://github.com/go-openapi/errors ---------------------------------------------------------------- Apache License @@ -8703,8 +8845,8 @@ https://github.com/go-openapi/spec ================================================================ -github.com/go-openapi/strfmt -https://github.com/go-openapi/strfmt +github.com/go-openapi/jsonpointer +https://github.com/go-openapi/jsonpointer ---------------------------------------------------------------- Apache License @@ -8911,8 +9053,8 @@ https://github.com/go-openapi/strfmt ================================================================ -github.com/go-openapi/swag -https://github.com/go-openapi/swag +github.com/go-openapi/jsonreference +https://github.com/go-openapi/jsonreference ---------------------------------------------------------------- Apache License @@ -9119,8 +9261,8 @@ https://github.com/go-openapi/swag ================================================================ -github.com/go-openapi/validate -https://github.com/go-openapi/validate +github.com/go-openapi/loads +https://github.com/go-openapi/loads ---------------------------------------------------------------- Apache License @@ -9327,268 +9469,1308 @@ https://github.com/go-openapi/validate ================================================================ -github.com/go-sql-driver/mysql -https://github.com/go-sql-driver/mysql +github.com/go-openapi/runtime +https://github.com/go-openapi/runtime ---------------------------------------------------------------- -Mozilla Public License Version 2.0 -================================== -1. Definitions --------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -1.1. "Contributor" - means each individual or legal entity that creates, contributes to - the creation of, or owns Covered Software. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -1.2. "Contributor Version" - means the combination of the Contributions of others (if any) used - by a Contributor and that particular Contributor's Contribution. + 1. Definitions. -1.3. "Contribution" - means Covered Software of a particular Contributor. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -1.4. "Covered Software" - means Source Code Form to which the initial Contributor has attached - the notice in Exhibit A, the Executable Form of such Source Code - Form, and Modifications of such Source Code Form, in each case - including portions thereof. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -1.5. "Incompatible With Secondary Licenses" - means + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - (a) that the initial Contributor has attached the notice described - in Exhibit B to the Covered Software; or + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - (b) that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the - terms of a Secondary License. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -1.6. "Executable Form" - means any form of the work other than Source Code Form. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -1.7. "Larger Work" - means a work that combines Covered Software with other material, in - a separate file or files, that is not Covered Software. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -1.8. "License" - means this document. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -1.9. "Licensable" - means having the right to grant, to the maximum extent possible, - whether at the time of the initial grant or subsequently, any and - all of the rights conveyed by this License. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -1.10. "Modifications" - means any of the following: + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. - (a) any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered - Software; or + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - (b) any new file in Source Code Form that contains any Covered - Software. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the - License, by the making, using, selling, offering for sale, having - made, import, or transfer of either its Contributions or its - Contributor Version. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -1.12. "Secondary License" - means either the GNU General Public License, Version 2.0, the GNU - Lesser General Public License, Version 2.1, the GNU Affero General - Public License, Version 3.0, or any later versions of those - licenses. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -1.13. "Source Code Form" - means the form of the work preferred for making modifications. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -1.14. "You" (or "Your") - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that - controls, is controlled by, or is under common control with You. For - purposes of this definition, "control" means (a) the power, direct - or indirect, to cause the direction or management of such entity, - whether by contract or otherwise, or (b) ownership of more than - fifty percent (50%) of the outstanding shares or beneficial - ownership of such entity. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -2. License Grants and Conditions --------------------------------- + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -2.1. Grants + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -Each Contributor hereby grants You a world-wide, royalty-free, -non-exclusive license: + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -(a) under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -(b) under Patent Claims of such Contributor to make, use, sell, offer - for sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -2.2. Effective Date + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -The licenses granted in Section 2.1 with respect to any Contribution -become effective for each Contribution on the date the Contributor first -distributes such Contribution. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -2.3. Limitations on Grant Scope + END OF TERMS AND CONDITIONS -The licenses granted in this Section 2 are the only rights granted under -this License. No additional rights or licenses will be implied from the -distribution or licensing of Covered Software under this License. -Notwithstanding Section 2.1(b) above, no patent license is granted by a -Contributor: + APPENDIX: How to apply the Apache License to your work. -(a) for any code that a Contributor has removed from Covered Software; - or + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -(b) for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or + Copyright [yyyy] [name of copyright owner] -(c) under Patent Claims infringed by Covered Software in the absence of - its Contributions. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -This License does not grant any rights in the trademarks, service marks, -or logos of any Contributor (except as may be necessary to comply with -the notice requirements in Section 3.4). + http://www.apache.org/licenses/LICENSE-2.0 -2.4. Subsequent Licenses + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -No Contributor makes additional grants as a result of Your choice to -distribute the Covered Software under a subsequent version of this -License (see Section 10.2) or under the terms of a Secondary License (if -permitted under the terms of Section 3.3). +================================================================ -2.5. Representation +github.com/go-openapi/spec +https://github.com/go-openapi/spec +---------------------------------------------------------------- -Each Contributor represents that the Contributor believes its -Contributions are its original creation(s) or it has sufficient rights -to grant the rights to its Contributions conveyed by this License. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -2.6. Fair Use + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -This License is not intended to limit any rights You have under -applicable copyright doctrines of fair use, fair dealing, or other -equivalents. + 1. Definitions. -2.7. Conditions + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted -in Section 2.1. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -3. Responsibilities -------------------- + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -3.1. Distribution of Source Form + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -All distribution of Covered Software in Source Code Form, including any -Modifications that You create or to which You contribute, must be under -the terms of this License. You must inform recipients that the Source -Code Form of the Covered Software is governed by the terms of this -License, and how they can obtain a copy of this License. You may not -attempt to alter or restrict the recipients' rights in the Source Code -Form. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -3.2. Distribution of Executable Form + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -If You distribute Covered Software in Executable Form then: + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -(a) such Covered Software must also be made available in Source Code - Form, as described in Section 3.1, and You must inform recipients of - the Executable Form how they can obtain a copy of such Source Code - Form by reasonable means in a timely manner, at a charge no more - than the cost of distribution to the recipient; and + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -(b) You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter - the recipients' rights in the Source Code Form under this License. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -3.3. Distribution of a Larger Work + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -You may create and distribute a Larger Work under terms of Your choice, -provided that You also comply with the requirements of this License for -the Covered Software. If the Larger Work is a combination of Covered -Software with a work governed by one or more Secondary Licenses, and the -Covered Software is not Incompatible With Secondary Licenses, this -License permits You to additionally distribute such Covered Software -under the terms of such Secondary License(s), so that the recipient of -the Larger Work may, at their option, further distribute the Covered -Software under the terms of either this License or such Secondary -License(s). + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -3.4. Notices + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -You may not remove or alter the substance of any license notices -(including copyright notices, patent notices, disclaimers of warranty, -or limitations of liability) contained within the Source Code Form of -the Covered Software, except that You may alter any license notices to -the extent required to remedy known factual inaccuracies. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -3.5. Application of Additional Terms + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -You may choose to offer, and to charge a fee for, warranty, support, -indemnity or liability obligations to one or more recipients of Covered -Software. However, You may do so only on Your own behalf, and not on -behalf of any Contributor. You must make it absolutely clear that any -such warranty, support, indemnity, or liability obligation is offered by -You alone, and You hereby agree to indemnify every Contributor for any -liability incurred by such Contributor as a result of warranty, support, -indemnity or liability terms You offer. You may include additional -disclaimers of warranty and limitations of liability specific to any -jurisdiction. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -4. Inability to Comply Due to Statute or Regulation ---------------------------------------------------- + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -If it is impossible for You to comply with any of the terms of this -License with respect to some or all of the Covered Software due to -statute, judicial order, or regulation then You must: (a) comply with -the terms of this License to the maximum extent possible; and (b) -describe the limitations and the code they affect. Such description must -be placed in a text file included with all distributions of the Covered -Software under this License. Except to the extent prohibited by statute -or regulation, such description must be sufficiently detailed for a -recipient of ordinary skill to be able to understand it. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -5. Termination --------------- + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -5.1. The rights granted under this License will terminate automatically -if You fail to comply with any of its terms. However, if You become -compliant, then the rights granted under this License from a particular -Contributor are reinstated (a) provisionally, unless and until such -Contributor explicitly and finally terminates Your grants, and (b) on an -ongoing basis, if such Contributor fails to notify You of the -non-compliance by some reasonable means prior to 60 days after You have -come back into compliance. Moreover, Your grants from a particular -Contributor are reinstated on an ongoing basis if such Contributor -notifies You of the non-compliance by some reasonable means, this is the -first time You have received notice of non-compliance with this License -from such Contributor, and You become compliant prior to 30 days after -Your receipt of the notice. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -5.2. If You initiate litigation against any entity by asserting a patent -infringement claim (excluding declaratory judgment actions, -counter-claims, and cross-claims) alleging that a Contributor Version -directly or indirectly infringes any patent, then the rights granted to -You by any and all Contributors for the Covered Software under Section -2.1 of this License shall terminate. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -5.3. In the event of termination under Sections 5.1 or 5.2 above, all -end user license agreements (excluding distributors and resellers) which -have been validly granted by You or Your distributors under this License -prior to termination shall survive termination. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/go-openapi/strfmt +https://github.com/go-openapi/strfmt +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/go-openapi/swag +https://github.com/go-openapi/swag +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/go-openapi/validate +https://github.com/go-openapi/validate +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/go-sql-driver/mysql +https://github.com/go-sql-driver/mysql +---------------------------------------------------------------- +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. ************************************************************************ * * @@ -9706,31 +10888,6 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice ================================================================ -github.com/go-task/slim-sprig -https://github.com/go-task/slim-sprig ----------------------------------------------------------------- -Copyright (C) 2013-2020 Masterminds - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - -================================================================ - github.com/gobwas/httphead https://github.com/gobwas/httphead ---------------------------------------------------------------- @@ -9893,6 +11050,21 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +================================================================ + +github.com/golang-jwt/jwt/v5 +https://github.com/golang-jwt/jwt/v5 +---------------------------------------------------------------- +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + ================================================================ github.com/golang/groupcache @@ -11207,11 +12379,252 @@ https://github.com/google/shlex ================================================================ -github.com/google/uuid -https://github.com/google/uuid +github.com/google/uuid +https://github.com/google/uuid +---------------------------------------------------------------- +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/googleapis/enterprise-certificate-proxy +https://github.com/googleapis/enterprise-certificate-proxy +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/googleapis/gax-go/v2 +https://github.com/googleapis/gax-go/v2 ---------------------------------------------------------------- -Copyright (c) 2009,2014 Google Inc. All rights reserved. - +Copyright 2016, Google Inc. +All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -11240,230 +12653,22 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/googleapis/enterprise-certificate-proxy -https://github.com/googleapis/enterprise-certificate-proxy +github.com/gorilla/mux +https://github.com/gorilla/mux ---------------------------------------------------------------- +Copyright (c) 2023 The Gorilla Authors. All rights reserved. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - -github.com/googleapis/gax-go/v2 -https://github.com/googleapis/gax-go/v2 ----------------------------------------------------------------- -Copyright 2016, Google Inc. -All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -11484,32 +12689,27 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. github.com/gorilla/websocket https://github.com/gorilla/websocket ---------------------------------------------------------------- -Copyright (c) 2023 The Gorilla Authors. All rights reserved. +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ @@ -11877,9 +13077,7 @@ Exhibit B - “Incompatible With Secondary Licenses” Notice github.com/hashicorp/go-hclog https://github.com/hashicorp/go-hclog ---------------------------------------------------------------- -MIT License - -Copyright (c) 2017 HashiCorp +Copyright (c) 2017 HashiCorp, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -13440,326 +14638,980 @@ Mozilla Public License, version 2.0 means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + + +================================================================ + +github.com/inconshreveable/mousetrap +https://github.com/inconshreveable/mousetrap +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -1.8. “License” + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - means this document. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -1.9. “Licensable” + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + END OF TERMS AND CONDITIONS -1.10. “Modifications” + APPENDIX: How to apply the Apache License to your work. - means any of the following: + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + Copyright 2022 Alan Shreve (@inconshreveable) - b. any new file in Source Code Form that contains any Covered Software. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -1.11. “Patent Claims” of a Contributor + http://www.apache.org/licenses/LICENSE-2.0 - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -1.12. “Secondary License” +================================================================ - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. +github.com/jcmturner/aescts/v2 +https://github.com/jcmturner/aescts/v2 +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -1.13. “Source Code Form” + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - means the form of the work preferred for making modifications. + 1. Definitions. -1.14. “You” (or “Your”) + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -2. License Grants and Conditions + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -2.1. Grants + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -2.2. Effective Date + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -2.3. Limitations on Grant Scope + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - a. for any code that a Contributor has removed from Covered Software; or + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -2.4. Subsequent Licenses + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -2.5. Representation + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -2.6. Fair Use + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -2.7. Conditions + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. + END OF TERMS AND CONDITIONS + APPENDIX: How to apply the Apache License to your work. -3. Responsibilities + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -3.1. Distribution of Source Form + Copyright {yyyy} {name of copyright owner} - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -3.2. Distribution of Executable Form + http://www.apache.org/licenses/LICENSE-2.0 - If You distribute Covered Software in Executable Form then: + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and +================================================================ - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. +github.com/jcmturner/dnsutils/v2 +https://github.com/jcmturner/dnsutils/v2 +---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -3.3. Distribution of a Larger Work + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + 1. Definitions. -3.4. Notices + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -3.5. Application of Additional Terms + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. -4. Inability to Comply Due to Statute or Regulation + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -5. Termination + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -6. Disclaimer of Warranty + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -7. Limitation of Liability + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -8. Litigation + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and -9. Miscellaneous + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -10. Versions of the License + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -10.1. New Versions + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -10.2. Effect of New Versions + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. + END OF TERMS AND CONDITIONS -10.3. Modified Versions + APPENDIX: How to apply the Apache License to your work. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. + Copyright [yyyy] [name of copyright owner] -Exhibit A - Source Code Form License Notice + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. + http://www.apache.org/licenses/LICENSE-2.0 -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -You may add additional accurate notices of copyright ownership. +================================================================ -Exhibit B - “Incompatible With Secondary Licenses” Notice +github.com/jcmturner/gofork +https://github.com/jcmturner/gofork +---------------------------------------------------------------- +Copyright (c) 2009 The Go Authors. All rights reserved. - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/inconshreveable/mousetrap -https://github.com/inconshreveable/mousetrap +github.com/jcmturner/goidentity/v6 +https://github.com/jcmturner/goidentity/v6 ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -13941,7 +15793,7 @@ https://github.com/inconshreveable/mousetrap APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -13949,7 +15801,7 @@ https://github.com/inconshreveable/mousetrap same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2022 Alan Shreve (@inconshreveable) + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13965,8 +15817,8 @@ https://github.com/inconshreveable/mousetrap ================================================================ -github.com/jcmturner/aescts/v2 -https://github.com/jcmturner/aescts/v2 +github.com/jcmturner/gokrb5/v8 +https://github.com/jcmturner/gokrb5/v8 ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -14172,8 +16024,8 @@ https://github.com/jcmturner/aescts/v2 ================================================================ -github.com/jcmturner/dnsutils/v2 -https://github.com/jcmturner/dnsutils/v2 +github.com/jcmturner/rpc/v2 +https://github.com/jcmturner/rpc/v2 ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -14354,35 +16206,374 @@ https://github.com/jcmturner/dnsutils/v2 APPENDIX: How to apply the Apache License to your work. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/jedib0t/go-pretty/v6 +https://github.com/jedib0t/go-pretty/v6 +---------------------------------------------------------------- +MIT License + +Copyright (c) 2018 jedib0t + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/jessevdk/go-flags +https://github.com/jessevdk/go-flags +---------------------------------------------------------------- +Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + * Neither the name of Google Inc. nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/josharian/intern +https://github.com/josharian/intern +---------------------------------------------------------------- +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/json-iterator/go +https://github.com/json-iterator/go +---------------------------------------------------------------- +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/juju/ratelimit +https://github.com/juju/ratelimit +---------------------------------------------------------------- +All files in this repository are licensed as follows. If you contribute +to this repository, it is assumed that you license your contribution +under the same license unless you state otherwise. + +All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. + +This software is licensed under the LGPLv3, included below. + +As a special exception to the GNU Lesser General Public License version 3 +("LGPL3"), the copyright holders of this Library give you permission to +convey to a third party a Combined Work that links statically or dynamically +to this Library without providing any Minimal Corresponding Source or +Minimal Application Code as set out in 4d or providing the installation +information set out in section 4e, provided that you comply with the other +provisions of LGPL3 and provided that you meet, for the Application the +terms and conditions of the license(s) which apply to the Application. + +Except as stated in this special exception, the provisions of LGPL3 will +continue to comply in full to this Library. If you modify this Library, you +may apply this exception to your version of this Library, but you are not +obliged to do so. If you do not wish to do so, delete this exception +statement from your version. This exception does not (and cannot) modify any +license terms which apply to the Application, with which you must still +comply. + + + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. + +================================================================ + +github.com/keybase/go-keychain +https://github.com/keybase/go-keychain +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Keybase - Copyright [yyyy] [name of copyright owner] +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - http://www.apache.org/licenses/LICENSE-2.0 +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. ================================================================ -github.com/jcmturner/gofork -https://github.com/jcmturner/gofork +github.com/klauspost/compress +https://github.com/klauspost/compress ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2019 Klaus Post. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -14410,11 +16601,10 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -================================================================ +------------------ + +Files: gzhttp/* -github.com/jcmturner/goidentity/v6 -https://github.com/jcmturner/goidentity/v6 ----------------------------------------------------------------- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -14595,7 +16785,7 @@ https://github.com/jcmturner/goidentity/v6 APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -14603,7 +16793,7 @@ https://github.com/jcmturner/goidentity/v6 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright 2016-2017 The New York Times Company Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14617,218 +16807,291 @@ https://github.com/jcmturner/goidentity/v6 See the License for the specific language governing permissions and limitations under the License. +------------------ + +Files: s2/cmd/internal/readahead/* + +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +--------------------- +Files: snappy/* +Files: internal/snapref/* + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +----------------- + +Files: s2/cmd/internal/filepathx/* + +Copyright 2016 The filepathx Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ================================================================ -github.com/jcmturner/gokrb5/v8 -https://github.com/jcmturner/gokrb5/v8 +github.com/klauspost/cpuid/v2 +https://github.com/klauspost/cpuid/v2 ---------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +The MIT License (MIT) - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +Copyright (c) 2015 Klaus Post - 1. Definitions. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +================================================================ - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. +github.com/klauspost/filepathx +https://github.com/klauspost/filepathx +---------------------------------------------------------------- +Copyright 2016 The filepathx Authors - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." +================================================================ - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +github.com/klauspost/pgzip +https://github.com/klauspost/pgzip +---------------------------------------------------------------- +The MIT License (MIT) - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. +Copyright (c) 2014 Klaus Post - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +================================================================ - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. +github.com/klauspost/readahead +https://github.com/klauspost/readahead +---------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. +================================================================ - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. +github.com/klauspost/reedsolomon +https://github.com/klauspost/reedsolomon +---------------------------------------------------------------- +The MIT License (MIT) - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Copyright (c) 2015 Klaus Post +Copyright (c) 2015 Backblaze - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. +================================================================ - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +github.com/kr/fs +https://github.com/kr/fs +---------------------------------------------------------------- +Copyright (c) 2012 The Go Authors. All rights reserved. - Copyright {yyyy} {name of copyright owner} +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. - http://www.apache.org/licenses/LICENSE-2.0 +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +================================================================ + +github.com/kr/pretty +https://github.com/kr/pretty +---------------------------------------------------------------- +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. ================================================================ -github.com/jcmturner/rpc/v2 -https://github.com/jcmturner/rpc/v2 +github.com/kr/text +https://github.com/kr/text +---------------------------------------------------------------- +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +================================================================ + +github.com/kylelemons/godebug +https://github.com/kylelemons/godebug ---------------------------------------------------------------- + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -15017,28 +17280,82 @@ https://github.com/jcmturner/rpc/v2 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/lestrrat-go/blackmagic +https://github.com/lestrrat-go/blackmagic +---------------------------------------------------------------- +MIT License + +Copyright (c) 2021 lestrrat-go + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/lestrrat-go/httpcc +https://github.com/lestrrat-go/httpcc +---------------------------------------------------------------- +MIT License + +Copyright (c) 2020 lestrrat-go - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - http://www.apache.org/licenses/LICENSE-2.0 +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. ================================================================ -github.com/jedib0t/go-pretty/v6 -https://github.com/jedib0t/go-pretty/v6 +github.com/lestrrat-go/httprc +https://github.com/lestrrat-go/httprc ---------------------------------------------------------------- MIT License -Copyright (c) 2018 jedib0t +Copyright (c) 2022 lestrrat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -15060,44 +17377,39 @@ SOFTWARE. ================================================================ -github.com/jessevdk/go-flags -https://github.com/jessevdk/go-flags +github.com/lestrrat-go/iter +https://github.com/lestrrat-go/iter ---------------------------------------------------------------- -Copyright (c) 2012 Jesse van den Kieboom. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +MIT License - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +Copyright (c) 2020 lestrrat-go -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. ================================================================ -github.com/josharian/intern -https://github.com/josharian/intern +github.com/lestrrat-go/jwx/v2 +https://github.com/lestrrat-go/jwx/v2 ---------------------------------------------------------------- -MIT License +The MIT License (MIT) -Copyright (c) 2019 Josh Bleecher Snyder +Copyright (c) 2015 lestrrat Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -15117,14 +17429,15 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ================================================================ -github.com/json-iterator/go -https://github.com/json-iterator/go +github.com/lestrrat-go/option +https://github.com/lestrrat-go/option ---------------------------------------------------------------- MIT License -Copyright (c) 2016 json-iterator +Copyright (c) 2021 lestrrat-go Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -15146,239 +17459,209 @@ SOFTWARE. ================================================================ -github.com/juju/ratelimit -https://github.com/juju/ratelimit +github.com/lib/pq +https://github.com/lib/pq ---------------------------------------------------------------- -All files in this repository are licensed as follows. If you contribute -to this repository, it is assumed that you license your contribution -under the same license unless you state otherwise. +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany -All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -This software is licensed under the LGPLv3, included below. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -As a special exception to the GNU Lesser General Public License version 3 -("LGPL3"), the copyright holders of this Library give you permission to -convey to a third party a Combined Work that links statically or dynamically -to this Library without providing any Minimal Corresponding Source or -Minimal Application Code as set out in 4d or providing the installation -information set out in section 4e, provided that you comply with the other -provisions of LGPL3 and provided that you meet, for the Application the -terms and conditions of the license(s) which apply to the Application. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -Except as stated in this special exception, the provisions of LGPL3 will -continue to comply in full to this Library. If you modify this Library, you -may apply this exception to your version of this Library, but you are not -obliged to do so. If you do not wish to do so, delete this exception -statement from your version. This exception does not (and cannot) modify any -license terms which apply to the Application, with which you must still -comply. +================================================================ +github.com/lithammer/shortuuid/v4 +https://github.com/lithammer/shortuuid/v4 +---------------------------------------------------------------- +The MIT License (MIT) - GNU LESSER GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 +Copyright (c) 2018 Peter Lithammer - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - This version of the GNU Lesser General Public License incorporates -the terms and conditions of version 3 of the GNU General Public -License, supplemented by the additional permissions listed below. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 0. Additional Definitions. +================================================================ - As used herein, "this License" refers to version 3 of the GNU Lesser -General Public License, and the "GNU GPL" refers to version 3 of the GNU -General Public License. +github.com/lucasb-eyer/go-colorful +https://github.com/lucasb-eyer/go-colorful +---------------------------------------------------------------- +Copyright (c) 2013 Lucas Beyer - "The Library" refers to a covered work governed by this License, -other than an Application or a Combined Work as defined below. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - An "Application" is any work that makes use of an interface provided -by the Library, but which is not otherwise based on the Library. -Defining a subclass of a class defined by the Library is deemed a mode -of using an interface provided by the Library. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - A "Combined Work" is a work produced by combining or linking an -Application with the Library. The particular version of the Library -with which the Combined Work was made is also called the "Linked -Version". +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - The "Minimal Corresponding Source" for a Combined Work means the -Corresponding Source for the Combined Work, excluding any source code -for portions of the Combined Work that, considered in isolation, are -based on the Application, and not on the Linked Version. +================================================================ - The "Corresponding Application Code" for a Combined Work means the -object code and/or source code for the Application, including any data -and utility programs needed for reproducing the Combined Work from the -Application, but excluding the System Libraries of the Combined Work. +github.com/lufia/plan9stats +https://github.com/lufia/plan9stats +---------------------------------------------------------------- +BSD 3-Clause License - 1. Exception to Section 3 of the GNU GPL. +Copyright (c) 2019, KADOTA, Kyohei +All rights reserved. - You may convey a covered work under sections 3 and 4 of this License -without being bound by section 3 of the GNU GPL. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: - 2. Conveying Modified Versions. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. - If you modify a copy of the Library, and, in your modifications, a -facility refers to a function or data to be supplied by an Application -that uses the facility (other than as an argument passed when the -facility is invoked), then you may convey a copy of the modified -version: +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. - a) under this License, provided that you make a good faith effort to - ensure that, in the event an Application does not supply the - function or data, the facility still operates, and performs - whatever part of its purpose remains meaningful, or +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. - b) under the GNU GPL, with none of the additional permissions of - this License applicable to that copy. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - 3. Object Code Incorporating Material from Library Header Files. +================================================================ - The object code form of an Application may incorporate material from -a header file that is part of the Library. You may convey such object -code under terms of your choice, provided that, if the incorporated -material is not limited to numerical parameters, data structure -layouts and accessors, or small macros, inline functions and templates -(ten or fewer lines in length), you do both of the following: +github.com/mailru/easyjson +https://github.com/mailru/easyjson +---------------------------------------------------------------- +Copyright (c) 2016 Mail.Ru Group - a) Give prominent notice with each copy of the object code that the - Library is used in it and that the Library and its use are - covered by this License. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - b) Accompany the object code with a copy of the GNU GPL and this license - document. +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - 4. Combined Works. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - You may convey a Combined Work under terms of your choice that, -taken together, effectively do not restrict modification of the -portions of the Library contained in the Combined Work and reverse -engineering for debugging such modifications, if you also do each of -the following: +================================================================ - a) Give prominent notice with each copy of the Combined Work that - the Library is used in it and that the Library and its use are - covered by this License. +github.com/mattn/go-colorable +https://github.com/mattn/go-colorable +---------------------------------------------------------------- +The MIT License (MIT) - b) Accompany the Combined Work with a copy of the GNU GPL and this license - document. +Copyright (c) 2016 Yasuhiro Matsumoto - c) For a Combined Work that displays copyright notices during - execution, include the copyright notice for the Library among - these notices, as well as a reference directing the user to the - copies of the GNU GPL and this license document. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - d) Do one of the following: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - 0) Convey the Minimal Corresponding Source under the terms of this - License, and the Corresponding Application Code in a form - suitable for, and under terms that permit, the user to - recombine or relink the Application with a modified version of - the Linked Version to produce a modified Combined Work, in the - manner specified by section 6 of the GNU GPL for conveying - Corresponding Source. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 1) Use a suitable shared library mechanism for linking with the - Library. A suitable mechanism is one that (a) uses at run time - a copy of the Library already present on the user's computer - system, and (b) will operate properly with a modified version - of the Library that is interface-compatible with the Linked - Version. +================================================================ - e) Provide Installation Information, but only if you would otherwise - be required to provide such information under section 6 of the - GNU GPL, and only to the extent that such information is - necessary to install and execute a modified version of the - Combined Work produced by recombining or relinking the - Application with a modified version of the Linked Version. (If - you use option 4d0, the Installation Information must accompany - the Minimal Corresponding Source and Corresponding Application - Code. If you use option 4d1, you must provide the Installation - Information in the manner specified by section 6 of the GNU GPL - for conveying Corresponding Source.) +github.com/mattn/go-ieproxy +https://github.com/mattn/go-ieproxy +---------------------------------------------------------------- +MIT License - 5. Combined Libraries. +Copyright (c) 2014 mattn +Copyright (c) 2017 oliverpool +Copyright (c) 2019 Adele Reed - You may place library facilities that are a work based on the -Library side by side in a single library together with other library -facilities that are not Applications and are not covered by this -License, and convey such a combined library under terms of your -choice, if you do both of the following: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - a) Accompany the combined library with a copy of the same work based - on the Library, uncombined with any other library facilities, - conveyed under the terms of this License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - b) Give prominent notice with the combined library that part of it - is a work based on the Library, and explaining where to find the - accompanying uncombined form of the same work. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - 6. Revised Versions of the GNU Lesser General Public License. +================================================================ - The Free Software Foundation may publish revised and/or new versions -of the GNU Lesser General Public License from time to time. Such new -versions will be similar in spirit to the present version, but may -differ in detail to address new problems or concerns. +github.com/mattn/go-isatty +https://github.com/mattn/go-isatty +---------------------------------------------------------------- +Copyright (c) Yasuhiro MATSUMOTO - Each version is given a distinguishing version number. If the -Library as you received it specifies that a certain numbered version -of the GNU Lesser General Public License "or any later version" -applies to it, you have the option of following the terms and -conditions either of that published version or of any later version -published by the Free Software Foundation. If the Library as you -received it does not specify a version number of the GNU Lesser -General Public License, you may choose any version of the GNU Lesser -General Public License ever published by the Free Software Foundation. +MIT License (Expat) - If the Library as you received it specifies that a proxy can decide -whether future versions of the GNU Lesser General Public License shall -apply, that proxy's public statement of acceptance of any version is -permanent authorization for you to choose that version for the -Library. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================ -github.com/klauspost/compress -https://github.com/klauspost/compress +github.com/mattn/go-runewidth +https://github.com/mattn/go-runewidth ---------------------------------------------------------------- -Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2019 Klaus Post. All rights reserved. +The MIT License (MIT) -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Copyright (c) 2016 Yasuhiro Matsumoto - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. ------------------- +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -Files: gzhttp/* +================================================================ +github.com/matttproud/golang_protobuf_extensions +https://github.com/matttproud/golang_protobuf_extensions +---------------------------------------------------------------- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -15554,152 +17837,76 @@ Files: gzhttp/* incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016-2017 The New York Times Company - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ------------------- - -Files: s2/cmd/internal/readahead/* - -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------- -Files: snappy/* -Files: internal/snapref/* - -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------ - -Files: s2/cmd/internal/filepathx/* - -Copyright 2016 The filepathx Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + END OF TERMS AND CONDITIONS -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + APPENDIX: How to apply the Apache License to your work. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -================================================================ + Copyright {yyyy} {name of copyright owner} -github.com/klauspost/cpuid/v2 -https://github.com/klauspost/cpuid/v2 ----------------------------------------------------------------- -The MIT License (MIT) + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -Copyright (c) 2015 Klaus Post + http://www.apache.org/licenses/LICENSE-2.0 -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +================================================================ -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +github.com/miekg/dns +https://github.com/miekg/dns +---------------------------------------------------------------- +BSD 3-Clause License +Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben. +All rights reserved. -================================================================ +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -github.com/klauspost/filepathx -https://github.com/klauspost/filepathx ----------------------------------------------------------------- -Copyright 2016 The filepathx Authors +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/klauspost/pgzip -https://github.com/klauspost/pgzip +github.com/minio/cli +https://github.com/minio/cli ---------------------------------------------------------------- -The MIT License (MIT) +MIT License -Copyright (c) 2014 Klaus Post +Copyright (c) 2016 Jeremy Saenz & Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -15719,515 +17926,679 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ================================================================ -github.com/klauspost/readahead -https://github.com/klauspost/readahead +github.com/minio/console +https://github.com/minio/console ---------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2015 Klaus Post - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -================================================================ + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -github.com/klauspost/reedsolomon -https://github.com/klauspost/reedsolomon ----------------------------------------------------------------- -The MIT License (MIT) + Preamble -Copyright (c) 2015 Klaus Post -Copyright (c) 2015 Backblaze + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. -================================================================ + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. -github.com/kr/fs -https://github.com/kr/fs ----------------------------------------------------------------- -Copyright (c) 2012 The Go Authors. All rights reserved. + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + The precise terms and conditions for copying, distribution and +modification follow. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + TERMS AND CONDITIONS -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + 0. Definitions. -================================================================ + "This License" refers to version 3 of the GNU Affero General Public License. -github.com/kr/pretty -https://github.com/kr/pretty ----------------------------------------------------------------- -Copyright 2012 Keith Rarick + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + A "covered work" means either the unmodified Program or a work based +on the Program. -================================================================ + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. -github.com/kr/text -https://github.com/kr/text ----------------------------------------------------------------- -Copyright 2012 Keith Rarick + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + 1. Source Code. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. -================================================================ + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. -github.com/lestrrat-go/backoff/v2 -https://github.com/lestrrat-go/backoff/v2 ----------------------------------------------------------------- -MIT License + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. -Copyright (c) 2018 lestrrat + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + The Corresponding Source for a work in source code form is that +same work. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + 2. Basic Permissions. -================================================================ + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. -github.com/lestrrat-go/blackmagic -https://github.com/lestrrat-go/blackmagic ----------------------------------------------------------------- -MIT License + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. -Copyright (c) 2021 lestrrat-go + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. -================================================================ + 4. Conveying Verbatim Copies. -github.com/lestrrat-go/httpcc -https://github.com/lestrrat-go/httpcc ----------------------------------------------------------------- -MIT License + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. -Copyright (c) 2020 lestrrat-go + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 5. Conveying Modified Source Versions. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. -================================================================ + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". -github.com/lestrrat-go/iter -https://github.com/lestrrat-go/iter ----------------------------------------------------------------- -MIT License + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. -Copyright (c) 2020 lestrrat-go + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 6. Conveying Non-Source Forms. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. -================================================================ + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. -github.com/lestrrat-go/jwx -https://github.com/lestrrat-go/jwx ----------------------------------------------------------------- -The MIT License (MIT) + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. -Copyright (c) 2015 lestrrat + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. -================================================================ + 7. Additional Terms. -github.com/lestrrat-go/option -https://github.com/lestrrat-go/option ----------------------------------------------------------------- -MIT License + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. -Copyright (c) 2021 lestrrat-go + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or -================================================================ + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or -github.com/lib/pq -https://github.com/lib/pq ----------------------------------------------------------------- -Copyright (c) 2011-2013, 'pq' Contributors -Portions Copyright (C) 2011 Blake Mizerany + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. -================================================================ + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. -github.com/lithammer/shortuuid/v4 -https://github.com/lithammer/shortuuid/v4 ----------------------------------------------------------------- -The MIT License (MIT) + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. -Copyright (c) 2018 Peter Lithammer + 8. Termination. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. -================================================================ + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. -github.com/lucasb-eyer/go-colorful -https://github.com/lucasb-eyer/go-colorful ----------------------------------------------------------------- -Copyright (c) 2013 Lucas Beyer + 9. Acceptance Not Required for Having Copies. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + 10. Automatic Licensing of Downstream Recipients. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. -================================================================ + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. -github.com/lufia/plan9stats -https://github.com/lufia/plan9stats ----------------------------------------------------------------- -BSD 3-Clause License + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. -Copyright (c) 2019, KADOTA, Kyohei -All rights reserved. + 11. Patents. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. -================================================================ + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. -github.com/mailru/easyjson -https://github.com/mailru/easyjson ----------------------------------------------------------------- -Copyright (c) 2016 Mail.Ru Group + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + 12. No Surrender of Others' Freedom. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. -================================================================ + 13. Remote Network Interaction; Use with the GNU General Public License. -github.com/mattn/go-colorable -https://github.com/mattn/go-colorable ----------------------------------------------------------------- -The MIT License (MIT) + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. -Copyright (c) 2016 Yasuhiro Matsumoto + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 14. Revised Versions of this License. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. -================================================================ + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. -github.com/mattn/go-ieproxy -https://github.com/mattn/go-ieproxy ----------------------------------------------------------------- -MIT License + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. -Copyright (c) 2014 mattn -Copyright (c) 2017 oliverpool -Copyright (c) 2019 Adele Reed + 15. Disclaimer of Warranty. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 16. Limitation of Liability. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. -================================================================ + 17. Interpretation of Sections 15 and 16. -github.com/mattn/go-isatty -https://github.com/mattn/go-isatty ----------------------------------------------------------------- -Copyright (c) Yasuhiro MATSUMOTO + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. -MIT License (Expat) + END OF TERMS AND CONDITIONS -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + How to Apply These Terms to Your New Programs -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. -================================================================ + + Copyright (C) -github.com/mattn/go-runewidth -https://github.com/mattn/go-runewidth ----------------------------------------------------------------- -The MIT License (MIT) + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. -Copyright (c) 2016 Yasuhiro Matsumoto + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +Also add information on how to contact you by electronic and paper mail. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. ================================================================ -github.com/matttproud/golang_protobuf_extensions -https://github.com/matttproud/golang_protobuf_extensions +github.com/minio/crc64nvme +https://github.com/minio/crc64nvme ---------------------------------------------------------------- + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -16408,7 +18779,7 @@ https://github.com/matttproud/golang_protobuf_extensions APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -16416,7 +18787,7 @@ https://github.com/matttproud/golang_protobuf_extensions same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16432,47 +18803,13 @@ https://github.com/matttproud/golang_protobuf_extensions ================================================================ -github.com/miekg/dns -https://github.com/miekg/dns ----------------------------------------------------------------- -BSD 3-Clause License - -Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -github.com/minio/cli -https://github.com/minio/cli +github.com/minio/dnscache +https://github.com/minio/dnscache ---------------------------------------------------------------- MIT License -Copyright (c) 2016 Jeremy Saenz & Contributors +Copyright (c) 2023 MinIO, Inc. +Copyright (c) 2018 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -16494,8 +18831,8 @@ SOFTWARE. ================================================================ -github.com/minio/console -https://github.com/minio/console +github.com/minio/dperf +https://github.com/minio/dperf ---------------------------------------------------------------- GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 @@ -17132,8 +19469,8 @@ the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify - it under the terms of the GNU Affero General Public License as published by - the Free Software Foundation, either version 3 of the License, or + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, @@ -17161,36 +19498,216 @@ For more information on this, and how to apply and follow the GNU AGPL, see ================================================================ -github.com/minio/dnscache -https://github.com/minio/dnscache +github.com/minio/highwayhash +https://github.com/minio/highwayhash ---------------------------------------------------------------- -MIT License -Copyright (c) 2023 MinIO, Inc. -Copyright (c) 2018 Olivier Poitrey + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 1. Definitions. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ================================================================ -github.com/minio/dperf -https://github.com/minio/dperf +github.com/minio/kms-go/kes +https://github.com/minio/kms-go/kes ---------------------------------------------------------------- GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 @@ -17856,216 +20373,8 @@ For more information on this, and how to apply and follow the GNU AGPL, see ================================================================ -github.com/minio/highwayhash -https://github.com/minio/highwayhash ----------------------------------------------------------------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - -github.com/minio/kms-go/kes -https://github.com/minio/kms-go/kes +github.com/minio/kms-go/kms +https://github.com/minio/kms-go/kms ---------------------------------------------------------------- GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 @@ -20484,7 +22793,7 @@ https://github.com/minio/minio-go/v7 github.com/minio/mux https://github.com/minio/mux ---------------------------------------------------------------- -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. +Copyright (c) 2023 The Gorilla Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -20514,8 +22823,8 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/minio/pkg/v2 -https://github.com/minio/pkg/v2 +github.com/minio/pkg/v3 +https://github.com/minio/pkg/v3 ---------------------------------------------------------------- GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 @@ -21389,214 +23698,6 @@ https://github.com/minio/selfupdate ================================================================ -github.com/minio/sha256-simd -https://github.com/minio/sha256-simd ----------------------------------------------------------------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - github.com/minio/simdjson-go https://github.com/minio/simdjson-go ---------------------------------------------------------------- @@ -22750,33 +24851,6 @@ https://github.com/modern-go/reflect2 ================================================================ -github.com/montanaflynn/stats -https://github.com/montanaflynn/stats ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014-2023 Montana Flynn (https://montanaflynn.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -================================================================ - github.com/muesli/ansi https://github.com/muesli/ansi ---------------------------------------------------------------- @@ -22885,6 +24959,43 @@ SOFTWARE. ================================================================ +github.com/munnerz/goautoneg +https://github.com/munnerz/goautoneg +---------------------------------------------------------------- +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + github.com/nats-io/jwt/v2 https://github.com/nats-io/jwt/v2 ---------------------------------------------------------------- @@ -24123,240 +26234,213 @@ https://github.com/nats-io/nuid WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -================================================================ - -github.com/nats-io/stan.go -https://github.com/nats-io/stan.go ----------------------------------------------------------------- - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + ================================================================ -github.com/navidys/tvxwidgets -https://github.com/navidys/tvxwidgets +github.com/nats-io/stan.go +https://github.com/nats-io/stan.go ---------------------------------------------------------------- -MIT License + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -Copyright (c) 2021 Navid Yaghoobi + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 1. Definitions. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ================================================================ github.com/ncw/directio @@ -24640,58 +26724,6 @@ THE SOFTWARE. ================================================================ -github.com/onsi/ginkgo/v2 -https://github.com/onsi/ginkgo/v2 ----------------------------------------------------------------- -Copyright (c) 2013-2014 Onsi Fakhouri - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/onsi/gomega -https://github.com/onsi/gomega ----------------------------------------------------------------- -Copyright (c) 2013-2014 Onsi Fakhouri - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - github.com/philhofer/fwd https://github.com/philhofer/fwd ---------------------------------------------------------------- @@ -24704,8 +26736,8 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================ -github.com/pierrec/lz4 -https://github.com/pierrec/lz4 +github.com/pierrec/lz4/v4 +https://github.com/pierrec/lz4/v4 ---------------------------------------------------------------- Copyright (c) 2015, Pierre Curto All rights reserved. @@ -24738,10 +26770,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/pierrec/lz4/v4 -https://github.com/pierrec/lz4/v4 +github.com/pkg/browser +https://github.com/pkg/browser ---------------------------------------------------------------- -Copyright (c) 2015, Pierre Curto +Copyright (c) 2014, Dave Cheney All rights reserved. Redistribution and use in source and binary forms, with or without @@ -24754,10 +26786,6 @@ modification, are permitted provided that the following conditions are met: this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -* Neither the name of xxHash nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE @@ -24769,7 +26797,6 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ================================================================ github.com/pkg/errors @@ -24847,6 +26874,41 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ +github.com/planetscale/vtprotobuf +https://github.com/planetscale/vtprotobuf +---------------------------------------------------------------- +Copyright (c) 2021, PlanetScale Inc. All rights reserved. +Copyright (c) 2013, The GoGo Authors. All rights reserved. +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + github.com/pmezard/go-difflib https://github.com/pmezard/go-difflib ---------------------------------------------------------------- @@ -24935,10 +26997,216 @@ SOFTWARE. ================================================================ -github.com/pquerna/cachecontrol -https://github.com/pquerna/cachecontrol +github.com/prometheus/client_golang +https://github.com/prometheus/client_golang ---------------------------------------------------------------- + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + +github.com/prometheus/client_model +https://github.com/prometheus/client_model +---------------------------------------------------------------- Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -25143,8 +27411,8 @@ https://github.com/pquerna/cachecontrol ================================================================ -github.com/prometheus/client_golang -https://github.com/prometheus/client_golang +github.com/prometheus/common +https://github.com/prometheus/common ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -25350,8 +27618,8 @@ https://github.com/prometheus/client_golang ================================================================ -github.com/prometheus/client_model -https://github.com/prometheus/client_model +github.com/prometheus/procfs +https://github.com/prometheus/procfs ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -25557,8 +27825,8 @@ https://github.com/prometheus/client_model ================================================================ -github.com/prometheus/common -https://github.com/prometheus/common +github.com/prometheus/prom2json +https://github.com/prometheus/prom2json ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -25764,8 +28032,8 @@ https://github.com/prometheus/common ================================================================ -github.com/prometheus/procfs -https://github.com/prometheus/procfs +github.com/prometheus/prometheus +https://github.com/prometheus/prometheus ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -25971,8 +28239,8 @@ https://github.com/prometheus/procfs ================================================================ -github.com/prometheus/prom2json -https://github.com/prometheus/prom2json +github.com/puzpuzpuz/xsync/v3 +https://github.com/puzpuzpuz/xsync/v3 ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -26244,30 +28512,34 @@ official policies, either expressed or implied, of Richard Crowley. ================================================================ -github.com/rivo/tview -https://github.com/rivo/tview +github.com/redis/go-redis/v9 +https://github.com/redis/go-redis/v9 ---------------------------------------------------------------- -MIT License - -Copyright (c) 2018 Oliver Kuederle +Copyright (c) 2013 The github.com/redis/go-redis Authors. +All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ @@ -26586,131 +28858,527 @@ https://github.com/safchain/ethtool incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2015 The Ethtool Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +================================================================ + +github.com/secure-io/sio-go +https://github.com/secure-io/sio-go +---------------------------------------------------------------- +MIT License + +Copyright (c) 2019 SecureIO + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/segmentio/asm +https://github.com/segmentio/asm +---------------------------------------------------------------- +MIT License + +Copyright (c) 2021 Segment + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +================================================================ + +github.com/shirou/gopsutil/v3 +https://github.com/shirou/gopsutil/v3 +---------------------------------------------------------------- +gopsutil is distributed under BSD license reproduced below. + +Copyright (c) 2014, WAKAYAMA Shirou +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the gopsutil authors nor the names of its contributors + may be used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +------- +internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. + + + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +================================================================ + +github.com/shoenig/go-m1cpu +https://github.com/shoenig/go-m1cpu +---------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. - APPENDIX: How to apply the Apache License to your work. +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. - Copyright {yyyy} {name of copyright owner} +6. Disclaimer of Warranty - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. - http://www.apache.org/licenses/LICENSE-2.0 +7. Limitation of Liability - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. +8. Litigation -================================================================ + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. -github.com/secure-io/sio-go -https://github.com/secure-io/sio-go ----------------------------------------------------------------- -MIT License +9. Miscellaneous -Copyright (c) 2019 SecureIO + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +10. Versions of the License -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +10.1. New Versions -================================================================ + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. -github.com/shirou/gopsutil/v3 -https://github.com/shirou/gopsutil/v3 ----------------------------------------------------------------- -gopsutil is distributed under BSD license reproduced below. +10.2. Effect of New Versions -Copyright (c) 2014, WAKAYAMA Shirou -All rights reserved. + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: +10.3. Modified Versions - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the gopsutil authors nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. +Exhibit A - Source Code Form License Notice -------- -internal/common/binary.go in the gopsutil is copied and modified from golang/encoding/binary.go. + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. +You may add additional accurate notices of copyright ownership. -Copyright (c) 2009 The Go Authors. All rights reserved. +Exhibit B - "Incompatible With Secondary Licenses" Notice -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -github.com/shoenig/go-m1cpu -https://github.com/shoenig/go-m1cpu +github.com/shoenig/test +https://github.com/shoenig/test ---------------------------------------------------------------- Mozilla Public License, version 2.0 @@ -27078,408 +29746,819 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice ================================================================ -github.com/shoenig/test -https://github.com/shoenig/test +github.com/stretchr/testify +https://github.com/stretchr/testify ---------------------------------------------------------------- -Mozilla Public License, version 2.0 +MIT License -1. Definitions +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. -1.1. "Contributor" +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -1.2. "Contributor Version" +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. +================================================================ -1.3. "Contribution" +github.com/tidwall/gjson +https://github.com/tidwall/gjson +---------------------------------------------------------------- +The MIT License (MIT) - means Covered Software of a particular Contributor. +Copyright (c) 2016 Josh Baker -1.4. "Covered Software" +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -1.5. "Incompatible With Secondary Licenses" - means +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or +================================================================ - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. +github.com/tidwall/match +https://github.com/tidwall/match +---------------------------------------------------------------- +The MIT License (MIT) -1.6. "Executable Form" +Copyright (c) 2016 Josh Baker - means any form of the work other than Source Code Form. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: -1.7. "Larger Work" +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -1.8. "License" +================================================================ - means this document. +github.com/tidwall/pretty +https://github.com/tidwall/pretty +---------------------------------------------------------------- +The MIT License (MIT) -1.9. "Licensable" +Copyright (c) 2017 Josh Baker - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +================================================================ + +github.com/tinylib/msgp +https://github.com/tinylib/msgp +---------------------------------------------------------------- +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +================================================================ + +github.com/tklauser/go-sysconf +https://github.com/tklauser/go-sysconf +---------------------------------------------------------------- +BSD 3-Clause License + +Copyright (c) 2018-2022, Tobias Klauser +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +github.com/tklauser/numcpus +https://github.com/tklauser/numcpus +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -1.10. "Modifications" + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - means any of the following: + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + END OF TERMS AND CONDITIONS - b. any new file in Source Code Form that contains any Covered Software. + APPENDIX: How to apply the Apache License to your work. -1.11. "Patent Claims" of a Contributor + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + Copyright [yyyy] [name of copyright owner] -1.12. "Secondary License" + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. + http://www.apache.org/licenses/LICENSE-2.0 -1.13. "Source Code Form" + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. - means the form of the work preferred for making modifications. +================================================================ -1.14. "You" (or "Your") +github.com/unrolled/secure +https://github.com/unrolled/secure +---------------------------------------------------------------- +The MIT License (MIT) - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. +Copyright (c) 2014 Cory Jacobsen +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: -2. License Grants and Conditions +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -2.1. Grants +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: +================================================================ - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and +github.com/valyala/bytebufferpool +https://github.com/valyala/bytebufferpool +---------------------------------------------------------------- +The MIT License (MIT) - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. +Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia -2.2. Effective Date +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -2.3. Limitations on Grant Scope +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - a. for any code that a Contributor has removed from Covered Software; or +================================================================ - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or +github.com/vbauerster/mpb/v8 +https://github.com/vbauerster/mpb/v8 +---------------------------------------------------------------- +This is free and unencumbered software released into the public domain. - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. -2.4. Subsequent Licenses +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). +For more information, please refer to -2.5. Representation +================================================================ - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. +github.com/xdg/scram +https://github.com/xdg/scram +---------------------------------------------------------------- -2.6. Fair Use + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -2.7. Conditions + 1. Definitions. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -3. Responsibilities + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -3.1. Distribution of Source Form + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -3.2. Distribution of Executable Form + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - If You distribute Covered Software in Executable Form then: + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -3.3. Distribution of a Larger Work + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. -3.4. Notices + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -3.5. Application of Additional Terms + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and -4. Inability to Comply Due to Statute or Regulation + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -5. Termination + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -6. Disclaimer of Warranty + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. -7. Limitation of Liability +================================================================ - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. +github.com/xdg/stringprep +https://github.com/xdg/stringprep +---------------------------------------------------------------- -8. Litigation + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION -9. Miscellaneous + 1. Definitions. - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -10. Versions of the License + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. -10.1. New Versions + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -10.2. Effect of New Versions + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). -10.3. Modified Versions + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -Exhibit A - Source Code Form License Notice + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: -You may add additional accurate notices of copyright ownership. + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and -Exhibit B - "Incompatible With Secondary Licenses" Notice + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. -================================================================ + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -github.com/stretchr/testify -https://github.com/stretchr/testify ----------------------------------------------------------------- -MIT License + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. ================================================================ -github.com/tidwall/gjson -https://github.com/tidwall/gjson +github.com/yusufpapurcu/wmi +https://github.com/yusufpapurcu/wmi ---------------------------------------------------------------- The MIT License (MIT) -Copyright (c) 2016 Josh Baker +Copyright (c) 2013 Stack Exchange Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -27500,38 +30579,170 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================ -github.com/tidwall/match -https://github.com/tidwall/match +github.com/zeebo/assert +https://github.com/zeebo/assert ---------------------------------------------------------------- -The MIT License (MIT) +Creative Commons Legal Code -Copyright (c) 2016 Josh Baker +CC0 1.0 Universal -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: + CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE + LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN + ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS + INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES + REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS + PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM + THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED + HEREUNDER. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +Statement of Purpose -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator +and subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for +the purpose of contributing to a commons of creative, cultural and +scientific works ("Commons") that the public can reliably and without fear +of later claims of infringement build upon, modify, incorporate in other +works, reuse and redistribute as freely as possible in any form whatsoever +and for any purposes, including without limitation commercial purposes. +These owners may contribute to the Commons to promote the ideal of a free +culture and the further production of creative, cultural and scientific +works, or to gain reputation or greater distribution for their Work in +part through the use and efforts of others. + +For these and/or other purposes and motivations, and without any +expectation of additional consideration or compensation, the person +associating CC0 with a Work (the "Affirmer"), to the extent that he or she +is an owner of Copyright and Related Rights in the Work, voluntarily +elects to apply CC0 to the Work and publicly distribute the Work under its +terms, with knowledge of his or her Copyright and Related Rights in the +Work and the meaning and intended legal effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not +limited to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, + communicate, and translate a Work; + ii. moral rights retained by the original author(s) and/or performer(s); +iii. publicity and privacy rights pertaining to a person's image or + likeness depicted in a Work; + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + v. rights protecting the extraction, dissemination, use and reuse of data + in a Work; + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation + thereof, including any amended or successor version of such + directive); and +vii. other similar, equivalent or corresponding rights throughout the + world based on applicable law or treaty, and any national + implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention +of, applicable law, Affirmer hereby overtly, fully, permanently, +irrevocably and unconditionally waives, abandons, and surrenders all of +Affirmer's Copyright and Related Rights and associated claims and causes +of action, whether now known or unknown (including existing as well as +future claims and causes of action), in the Work (i) in all territories +worldwide, (ii) for the maximum duration provided by applicable law or +treaty (including future time extensions), (iii) in any current or future +medium and for any number of copies, and (iv) for any purpose whatsoever, +including without limitation commercial, advertising or promotional +purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each +member of the public at large and to the detriment of Affirmer's heirs and +successors, fully intending that such Waiver shall not be subject to +revocation, rescission, cancellation, termination, or any other legal or +equitable action to disrupt the quiet enjoyment of the Work by the public +as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason +be judged legally invalid or ineffective under applicable law, then the +Waiver shall be preserved to the maximum extent permitted taking into +account Affirmer's express Statement of Purpose. In addition, to the +extent the Waiver is so judged Affirmer hereby grants to each affected +person a royalty-free, non transferable, non sublicensable, non exclusive, +irrevocable and unconditional license to exercise Affirmer's Copyright and +Related Rights in the Work (i) in all territories worldwide, (ii) for the +maximum duration provided by applicable law or treaty (including future +time extensions), (iii) in any current or future medium and for any number +of copies, and (iv) for any purpose whatsoever, including without +limitation commercial, advertising or promotional purposes (the +"License"). The License shall be deemed effective as of the date CC0 was +applied by Affirmer to the Work. Should any part of the License for any +reason be judged legally invalid or ineffective under applicable law, such +partial invalidity or ineffectiveness shall not invalidate the remainder +of the License, and in such case Affirmer hereby affirms that he or she +will not (i) exercise any of his or her remaining Copyright and Related +Rights in the Work or (ii) assert any associated claims and causes of +action with respect to the Work, in either case contrary to Affirmer's +express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + b. Affirmer offers the Work as-is and makes no representations or + warranties of any kind concerning the Work, express, implied, + statutory or otherwise, including without limitation warranties of + title, merchantability, fitness for a particular purpose, non + infringement, or the absence of latent or other defects, accuracy, or + the present or absence of errors, whether or not discoverable, all to + the greatest extent permissible under applicable law. + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without + limitation any person's Copyright and Related Rights in the Work. + Further, Affirmer disclaims responsibility for obtaining any necessary + consents, permissions or other rights required for any use of the + Work. + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to + this CC0 or use of the Work. ================================================================ -github.com/tidwall/pretty -https://github.com/tidwall/pretty +github.com/zeebo/xxh3 +https://github.com/zeebo/xxh3 +---------------------------------------------------------------- +xxHash Library +Copyright (c) 2012-2014, Yann Collet +Copyright (c) 2019, Jeff Wendling +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +go.etcd.io/bbolt +https://go.etcd.io/bbolt ---------------------------------------------------------------- The MIT License (MIT) -Copyright (c) 2017 Josh Baker +Copyright (c) 2013 Ben Johnson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -27552,56 +30763,8 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================================ -github.com/tinylib/msgp -https://github.com/tinylib/msgp ----------------------------------------------------------------- -Copyright (c) 2014 Philip Hofer -Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -================================================================ - -github.com/tklauser/go-sysconf -https://github.com/tklauser/go-sysconf ----------------------------------------------------------------- -BSD 3-Clause License - -Copyright (c) 2018-2022, Tobias Klauser -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -github.com/tklauser/numcpus -https://github.com/tklauser/numcpus +go.etcd.io/etcd/api/v3 +https://go.etcd.io/etcd/api/v3 ---------------------------------------------------------------- Apache License @@ -27808,92 +30971,8 @@ https://github.com/tklauser/numcpus ================================================================ -github.com/unrolled/secure -https://github.com/unrolled/secure ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2014 Cory Jacobsen - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/valyala/bytebufferpool -https://github.com/valyala/bytebufferpool ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -================================================================ - -github.com/vbauerster/mpb/v8 -https://github.com/vbauerster/mpb/v8 ----------------------------------------------------------------- -This is free and unencumbered software released into the public domain. - -Anyone is free to copy, modify, publish, use, compile, sell, or -distribute this software, either in source code form or as a compiled -binary, for any purpose, commercial or non-commercial, and by any -means. - -In jurisdictions that recognize copyright laws, the author or authors -of this software dedicate any and all copyright interest in the -software to the public domain. We make this dedication for the benefit -of the public at large and to the detriment of our heirs and -successors. We intend this dedication to be an overt act of -relinquishment in perpetuity of all present and future rights to this -software under copyright law. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -For more information, please refer to - -================================================================ - -github.com/xdg/scram -https://github.com/xdg/scram +go.etcd.io/etcd/client/pkg/v3 +https://go.etcd.io/etcd/client/pkg/v3 ---------------------------------------------------------------- Apache License @@ -28071,10 +31150,37 @@ https://github.com/xdg/scram incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ================================================================ -github.com/xdg/stringprep -https://github.com/xdg/stringprep +go.etcd.io/etcd/client/v3 +https://go.etcd.io/etcd/client/v3 ---------------------------------------------------------------- Apache License @@ -28252,222 +31358,38 @@ https://github.com/xdg/stringprep incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. -================================================================ - -github.com/yusufpapurcu/wmi -https://github.com/yusufpapurcu/wmi ----------------------------------------------------------------- -The MIT License (MIT) - -Copyright (c) 2013 Stack Exchange - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -github.com/zeebo/assert -https://github.com/zeebo/assert ----------------------------------------------------------------- -Creative Commons Legal Code - -CC0 1.0 Universal - - CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE - LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN - ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS - INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES - REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS - PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM - THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED - HEREUNDER. - -Statement of Purpose - -The laws of most jurisdictions throughout the world automatically confer -exclusive Copyright and Related Rights (defined below) upon the creator -and subsequent owner(s) (each and all, an "owner") of an original work of -authorship and/or a database (each, a "Work"). - -Certain owners wish to permanently relinquish those rights to a Work for -the purpose of contributing to a commons of creative, cultural and -scientific works ("Commons") that the public can reliably and without fear -of later claims of infringement build upon, modify, incorporate in other -works, reuse and redistribute as freely as possible in any form whatsoever -and for any purposes, including without limitation commercial purposes. -These owners may contribute to the Commons to promote the ideal of a free -culture and the further production of creative, cultural and scientific -works, or to gain reputation or greater distribution for their Work in -part through the use and efforts of others. - -For these and/or other purposes and motivations, and without any -expectation of additional consideration or compensation, the person -associating CC0 with a Work (the "Affirmer"), to the extent that he or she -is an owner of Copyright and Related Rights in the Work, voluntarily -elects to apply CC0 to the Work and publicly distribute the Work under its -terms, with knowledge of his or her Copyright and Related Rights in the -Work and the meaning and intended legal effect of CC0 on those rights. - -1. Copyright and Related Rights. A Work made available under CC0 may be -protected by copyright and related or neighboring rights ("Copyright and -Related Rights"). Copyright and Related Rights include, but are not -limited to, the following: - - i. the right to reproduce, adapt, distribute, perform, display, - communicate, and translate a Work; - ii. moral rights retained by the original author(s) and/or performer(s); -iii. publicity and privacy rights pertaining to a person's image or - likeness depicted in a Work; - iv. rights protecting against unfair competition in regards to a Work, - subject to the limitations in paragraph 4(a), below; - v. rights protecting the extraction, dissemination, use and reuse of data - in a Work; - vi. database rights (such as those arising under Directive 96/9/EC of the - European Parliament and of the Council of 11 March 1996 on the legal - protection of databases, and under any national implementation - thereof, including any amended or successor version of such - directive); and -vii. other similar, equivalent or corresponding rights throughout the - world based on applicable law or treaty, and any national - implementations thereof. - -2. Waiver. To the greatest extent permitted by, but not in contravention -of, applicable law, Affirmer hereby overtly, fully, permanently, -irrevocably and unconditionally waives, abandons, and surrenders all of -Affirmer's Copyright and Related Rights and associated claims and causes -of action, whether now known or unknown (including existing as well as -future claims and causes of action), in the Work (i) in all territories -worldwide, (ii) for the maximum duration provided by applicable law or -treaty (including future time extensions), (iii) in any current or future -medium and for any number of copies, and (iv) for any purpose whatsoever, -including without limitation commercial, advertising or promotional -purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each -member of the public at large and to the detriment of Affirmer's heirs and -successors, fully intending that such Waiver shall not be subject to -revocation, rescission, cancellation, termination, or any other legal or -equitable action to disrupt the quiet enjoyment of the Work by the public -as contemplated by Affirmer's express Statement of Purpose. - -3. Public License Fallback. Should any part of the Waiver for any reason -be judged legally invalid or ineffective under applicable law, then the -Waiver shall be preserved to the maximum extent permitted taking into -account Affirmer's express Statement of Purpose. In addition, to the -extent the Waiver is so judged Affirmer hereby grants to each affected -person a royalty-free, non transferable, non sublicensable, non exclusive, -irrevocable and unconditional license to exercise Affirmer's Copyright and -Related Rights in the Work (i) in all territories worldwide, (ii) for the -maximum duration provided by applicable law or treaty (including future -time extensions), (iii) in any current or future medium and for any number -of copies, and (iv) for any purpose whatsoever, including without -limitation commercial, advertising or promotional purposes (the -"License"). The License shall be deemed effective as of the date CC0 was -applied by Affirmer to the Work. Should any part of the License for any -reason be judged legally invalid or ineffective under applicable law, such -partial invalidity or ineffectiveness shall not invalidate the remainder -of the License, and in such case Affirmer hereby affirms that he or she -will not (i) exercise any of his or her remaining Copyright and Related -Rights in the Work or (ii) assert any associated claims and causes of -action with respect to the Work, in either case contrary to Affirmer's -express Statement of Purpose. - -4. Limitations and Disclaimers. - - a. No trademark or patent rights held by Affirmer are waived, abandoned, - surrendered, licensed or otherwise affected by this document. - b. Affirmer offers the Work as-is and makes no representations or - warranties of any kind concerning the Work, express, implied, - statutory or otherwise, including without limitation warranties of - title, merchantability, fitness for a particular purpose, non - infringement, or the absence of latent or other defects, accuracy, or - the present or absence of errors, whether or not discoverable, all to - the greatest extent permissible under applicable law. - c. Affirmer disclaims responsibility for clearing rights of other persons - that may apply to the Work or any use thereof, including without - limitation any person's Copyright and Related Rights in the Work. - Further, Affirmer disclaims responsibility for obtaining any necessary - consents, permissions or other rights required for any use of the - Work. - d. Affirmer understands and acknowledges that Creative Commons is not a - party to this document and has no duty or obligation with respect to - this CC0 or use of the Work. - -================================================================ - -github.com/zeebo/xxh3 -https://github.com/zeebo/xxh3 ----------------------------------------------------------------- -xxHash Library -Copyright (c) 2012-2014, Yann Collet -Copyright (c) 2019, Jeff Wendling -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + END OF TERMS AND CONDITIONS -================================================================ + APPENDIX: How to apply the Apache License to your work. -go.etcd.io/bbolt -https://go.etcd.io/bbolt ----------------------------------------------------------------- -The MIT License (MIT) + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. -Copyright (c) 2013 Ben Johnson + Copyright [yyyy] [name of copyright owner] -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + http://www.apache.org/licenses/LICENSE-2.0 -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. ================================================================ -go.etcd.io/etcd/api/v3 -https://go.etcd.io/etcd/api/v3 +go.mongodb.org/mongo-driver +https://go.mongodb.org/mongo-driver ---------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -28672,8 +31594,8 @@ https://go.etcd.io/etcd/api/v3 ================================================================ -go.etcd.io/etcd/client/pkg/v3 -https://go.etcd.io/etcd/client/pkg/v3 +go.opencensus.io +https://go.opencensus.io ---------------------------------------------------------------- Apache License @@ -28877,13 +31799,11 @@ https://go.etcd.io/etcd/client/pkg/v3 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - ================================================================ -go.etcd.io/etcd/client/v3 -https://go.etcd.io/etcd/client/v3 +go.opentelemetry.io/auto/sdk +https://go.opentelemetry.io/auto/sdk ---------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -29088,8 +32008,8 @@ https://go.etcd.io/etcd/client/v3 ================================================================ -go.mongodb.org/mongo-driver -https://go.mongodb.org/mongo-driver +go.opentelemetry.io/contrib/detectors/gcp +https://go.opentelemetry.io/contrib/detectors/gcp ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -29295,10 +32215,9 @@ https://go.mongodb.org/mongo-driver ================================================================ -go.opencensus.io -https://go.opencensus.io +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc ---------------------------------------------------------------- - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -29500,10 +32419,11 @@ https://go.opencensus.io WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + ================================================================ -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc -https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +https://go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -29709,8 +32629,8 @@ https://go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelg ================================================================ -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp -https://go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +go.opentelemetry.io/otel +https://go.opentelemetry.io/otel ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -29916,8 +32836,8 @@ https://go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp ================================================================ -go.opentelemetry.io/otel -https://go.opentelemetry.io/otel +go.opentelemetry.io/otel/metric +https://go.opentelemetry.io/otel/metric ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -30123,8 +33043,8 @@ https://go.opentelemetry.io/otel ================================================================ -go.opentelemetry.io/otel/metric -https://go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/sdk +https://go.opentelemetry.io/otel/sdk ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -30330,8 +33250,8 @@ https://go.opentelemetry.io/otel/metric ================================================================ -go.opentelemetry.io/otel/sdk -https://go.opentelemetry.io/otel/sdk +go.opentelemetry.io/otel/sdk/metric +https://go.opentelemetry.io/otel/sdk/metric ---------------------------------------------------------------- Apache License Version 2.0, January 2004 @@ -30875,40 +33795,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. golang.org/x/crypto https://golang.org/x/crypto ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -golang.org/x/exp -https://golang.org/x/exp ----------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -30920,7 +33807,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -30941,7 +33828,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/mod https://golang.org/x/mod ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -30953,7 +33840,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -30974,7 +33861,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/net https://golang.org/x/net ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -30986,7 +33873,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31007,7 +33894,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/oauth2 https://golang.org/x/oauth2 ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31019,7 +33906,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31040,7 +33927,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/sync https://golang.org/x/sync ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31052,7 +33939,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31073,7 +33960,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/sys https://golang.org/x/sys ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31085,7 +33972,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31106,7 +33993,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/term https://golang.org/x/term ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31118,7 +34005,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31139,7 +34026,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/text https://golang.org/x/text ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31151,7 +34038,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31172,7 +34059,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/time https://golang.org/x/time ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31184,7 +34071,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31205,40 +34092,7 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. golang.org/x/tools https://golang.org/x/tools ---------------------------------------------------------------- -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -golang.org/x/xerrors -https://golang.org/x/xerrors ----------------------------------------------------------------- -Copyright (c) 2019 The Go Authors. All rights reserved. +Copyright 2009 The Go Authors. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -31250,7 +34104,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google LLC nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -31301,214 +34155,6 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================================ -google.golang.org/appengine -https://google.golang.org/appengine ----------------------------------------------------------------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - google.golang.org/genproto https://google.golang.org/genproto ---------------------------------------------------------------- @@ -31925,216 +34571,8 @@ https://google.golang.org/genproto/googleapis/api ================================================================ -google.golang.org/genproto/googleapis/rpc -https://google.golang.org/genproto/googleapis/rpc ----------------------------------------------------------------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - -google.golang.org/grpc -https://google.golang.org/grpc +google.golang.org/genproto/googleapis/rpc +https://google.golang.org/genproto/googleapis/rpc ---------------------------------------------------------------- Apache License @@ -32341,266 +34779,8 @@ https://google.golang.org/grpc ================================================================ -gopkg.in/check.v1 -https://gopkg.in/check.v1 ----------------------------------------------------------------- -Gocheck - A rich testing framework for Go - -Copyright (c) 2010-2013 Gustavo Niemeyer - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -================================================================ - -gopkg.in/h2non/filetype.v1 -https://gopkg.in/h2non/filetype.v1 ----------------------------------------------------------------- -The MIT License - -Copyright (c) Tomas Aparicio - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - -================================================================ - -gopkg.in/ini.v1 -https://gopkg.in/ini.v1 ----------------------------------------------------------------- -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -================================================================ - -gopkg.in/square/go-jose.v2 -https://gopkg.in/square/go-jose.v2 +google.golang.org/grpc +https://google.golang.org/grpc ---------------------------------------------------------------- Apache License @@ -32807,6 +34987,70 @@ https://gopkg.in/square/go-jose.v2 ================================================================ +google.golang.org/protobuf +https://google.golang.org/protobuf +---------------------------------------------------------------- +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + +gopkg.in/check.v1 +https://gopkg.in/check.v1 +---------------------------------------------------------------- +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +================================================================ + gopkg.in/yaml.v2 https://gopkg.in/yaml.v2 ---------------------------------------------------------------- diff --git a/Dockerfile b/Dockerfile index 3318f7435b423..84a9634559a30 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,14 @@ FROM minio/minio:latest -COPY ./minio /usr/bin/minio +ARG TARGETARCH +ARG RELEASE + +RUN chmod -R 777 /usr/bin + +COPY ./minio-${TARGETARCH}.${RELEASE} /usr/bin/minio +COPY ./minio-${TARGETARCH}.${RELEASE}.minisig /usr/bin/minio.minisig +COPY ./minio-${TARGETARCH}.${RELEASE}.sha256sum /usr/bin/minio.sha256sum + COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] diff --git a/Dockerfile.dev b/Dockerfile.dev deleted file mode 100644 index 6b98cd4a6a713..0000000000000 --- a/Dockerfile.dev +++ /dev/null @@ -1,12 +0,0 @@ -FROM minio/minio:latest - -ENV PATH=/opt/bin:$PATH - -COPY ./minio /opt/bin/minio -COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh - -ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] - -VOLUME ["/data"] - -CMD ["minio"] diff --git a/Dockerfile.hotfix b/Dockerfile.hotfix index 885fd2fe3b7f3..4dc428cf6395c 100644 --- a/Dockerfile.hotfix +++ b/Dockerfile.hotfix @@ -1,26 +1,33 @@ -FROM golang:1.21-alpine as build +FROM golang:1.24-alpine as build ARG TARGETARCH ARG RELEASE -ENV GOPATH /go -ENV CGO_ENABLED 0 +ENV GOPATH=/go +ENV CGO_ENABLED=0 # Install curl and minisign RUN apk add -U --no-cache ca-certificates && \ apk add -U --no-cache curl && \ go install aead.dev/minisign/cmd/minisign@v0.2.1 -# Download minio binary and signature file +# Download minio binary and signature files RUN curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \ curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \ + curl -s -q https://dl.min.io/server/minio/hotfixes/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \ chmod +x /go/bin/minio -# Download mc binary and signature file +# Download mc binary and signature files RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \ curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \ + curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \ chmod +x /go/bin/mc +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \ + chmod +x /go/bin/curl; \ + fi + # Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN" RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \ minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav @@ -46,9 +53,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \ MINIO_CONFIG_ENV_FILE=config.env \ MC_CONFIG_DIR=/tmp/.mc +RUN chmod -R 777 /usr/bin + COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=build /go/bin/minio /usr/bin/minio -COPY --from=build /go/bin/mc /usr/bin/mc +COPY --from=build /go/bin/minio* /usr/bin/ +COPY --from=build /go/bin/mc* /usr/bin/ +COPY --from=build /go/bin/cur* /usr/bin/ COPY CREDITS /licenses/CREDITS COPY LICENSE /licenses/LICENSE diff --git a/Dockerfile.release b/Dockerfile.release index 0991b8bd5dd73..b2d05283850f8 100644 --- a/Dockerfile.release +++ b/Dockerfile.release @@ -1,30 +1,39 @@ -FROM golang:1.21-alpine as build +FROM golang:1.24-alpine AS build ARG TARGETARCH ARG RELEASE -ENV GOPATH /go -ENV CGO_ENABLED 0 +ENV GOPATH=/go +ENV CGO_ENABLED=0 + +WORKDIR /build # Install curl and minisign RUN apk add -U --no-cache ca-certificates && \ apk add -U --no-cache curl && \ + apk add -U --no-cache bash && \ go install aead.dev/minisign/cmd/minisign@v0.2.1 -# Download minio binary and signature file +# Download minio binary and signature files RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \ curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \ + curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \ chmod +x /go/bin/minio -# Download mc binary and signature file +# Download mc binary and signature files RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \ curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \ + curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \ chmod +x /go/bin/mc # Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN" RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \ minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav +COPY dockerscripts/download-static-curl.sh /build/download-static-curl +RUN chmod +x /build/download-static-curl && \ + /build/download-static-curl + FROM registry.access.redhat.com/ubi9/ubi-micro:latest ARG RELEASE @@ -46,9 +55,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \ MINIO_CONFIG_ENV_FILE=config.env \ MC_CONFIG_DIR=/tmp/.mc +RUN chmod -R 777 /usr/bin + COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=build /go/bin/minio /usr/bin/minio -COPY --from=build /go/bin/mc /usr/bin/mc +COPY --from=build /go/bin/minio* /usr/bin/ +COPY --from=build /go/bin/mc* /usr/bin/ +COPY --from=build /go/bin/curl* /usr/bin/ COPY CREDITS /licenses/CREDITS COPY LICENSE /licenses/LICENSE diff --git a/Dockerfile.release.fips b/Dockerfile.release.fips deleted file mode 100644 index 0b66a03437950..0000000000000 --- a/Dockerfile.release.fips +++ /dev/null @@ -1,53 +0,0 @@ -FROM golang:1.21-alpine as build - -ARG TARGETARCH -ARG RELEASE - -ENV GOPATH /go -ENV CGO_ENABLED 0 - -# Install curl and minisign -RUN apk add -U --no-cache ca-certificates && \ - apk add -U --no-cache curl && \ - go install aead.dev/minisign/cmd/minisign@v0.2.1 - -# Download minio binary and signature file -RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips -o /go/bin/minio && \ - curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \ - chmod +x /go/bin/minio - -# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN" -RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav - -FROM registry.access.redhat.com/ubi9/ubi-micro:latest - -ARG RELEASE - -LABEL name="MinIO" \ - vendor="MinIO Inc " \ - maintainer="MinIO Inc " \ - version="${RELEASE}" \ - release="${RELEASE}" \ - summary="MinIO is a High Performance Object Storage, API compatible with Amazon S3 cloud storage service." \ - description="MinIO object storage is fundamentally different. Designed for performance and the S3 API, it is 100% open-source. MinIO is ideal for large, private cloud environments with stringent security requirements and delivers mission-critical availability across a diverse range of workloads." - -ENV MINIO_ACCESS_KEY_FILE=access_key \ - MINIO_SECRET_KEY_FILE=secret_key \ - MINIO_ROOT_USER_FILE=access_key \ - MINIO_ROOT_PASSWORD_FILE=secret_key \ - MINIO_KMS_SECRET_KEY_FILE=kms_master_key \ - MINIO_UPDATE_MINISIGN_PUBKEY="RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav" \ - MINIO_CONFIG_ENV_FILE=config.env - -COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=build /go/bin/minio /usr/bin/minio - -COPY CREDITS /licenses/CREDITS -COPY LICENSE /licenses/LICENSE -COPY dockerscripts/docker-entrypoint.sh /usr/bin/docker-entrypoint.sh - -EXPOSE 9000 -VOLUME ["/data"] - -ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] -CMD ["minio"] diff --git a/Dockerfile.release.old_cpu b/Dockerfile.release.old_cpu index 2f9f1c3732156..5fc0f3e3a5983 100644 --- a/Dockerfile.release.old_cpu +++ b/Dockerfile.release.old_cpu @@ -1,26 +1,33 @@ -FROM golang:1.21-alpine as build +FROM golang:1.24-alpine AS build ARG TARGETARCH ARG RELEASE -ENV GOPATH /go -ENV CGO_ENABLED 0 +ENV GOPATH=/go +ENV CGO_ENABLED=0 # Install curl and minisign RUN apk add -U --no-cache ca-certificates && \ apk add -U --no-cache curl && \ go install aead.dev/minisign/cmd/minisign@v0.2.1 -# Download minio binary and signature file +# Download minio binary and signature files RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE} -o /go/bin/minio && \ curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.minisig -o /go/bin/minio.minisig && \ + curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.sha256sum -o /go/bin/minio.sha256sum && \ chmod +x /go/bin/minio -# Download mc binary and signature file +# Download mc binary and signature files RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go/bin/mc && \ curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \ + curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.sha256sum -o /go/bin/mc.sha256sum && \ chmod +x /go/bin/mc +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \ + chmod +x /go/bin/curl; \ + fi + # Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN" RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \ minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav @@ -46,9 +53,12 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \ MINIO_CONFIG_ENV_FILE=config.env \ MC_CONFIG_DIR=/tmp/.mc +RUN chmod -R 777 /usr/bin + COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ -COPY --from=build /go/bin/minio /usr/bin/minio -COPY --from=build /go/bin/mc /usr/bin/mc +COPY --from=build /go/bin/minio* /usr/bin/ +COPY --from=build /go/bin/mc* /usr/bin/ +COPY --from=build /go/bin/cur* /usr/bin/ COPY CREDITS /licenses/CREDITS COPY LICENSE /licenses/LICENSE diff --git a/Makefile b/Makefile index 485e1b1ebbefe..c7ea4b2757951 100644 --- a/Makefile +++ b/Makefile @@ -2,8 +2,8 @@ PWD := $(shell pwd) GOPATH := $(shell go env GOPATH) LDFLAGS := $(shell go run buildscripts/gen-ldflags.go) -GOARCH := $(shell go env GOARCH) -GOOS := $(shell go env GOOS) +GOOS ?= $(shell go env GOOS) +GOARCH ?= $(shell go env GOARCH) VERSION ?= $(shell git describe --tags) REPO ?= quay.io/minio @@ -24,8 +24,6 @@ help: ## print this help getdeps: ## fetch necessary dependencies @mkdir -p ${GOPATH}/bin @echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR) - @echo "Installing msgp" && go install -v github.com/tinylib/msgp@6ac204f0b4d48d17ab4fa442134c7fba13127a4e - @echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest crosscompile: ## cross compile minio @(env bash $(PWD)/buildscripts/cross-compile.sh) @@ -34,31 +32,47 @@ verifiers: lint check-gen check-gen: ## check for updated autogenerated files @go generate ./... >/dev/null + @go mod tidy -compat=1.21 @(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false) + @(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false) lint: getdeps ## runs golangci-lint suite of linters @echo "Running $@ check" @$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml + @command typos && typos ./ || echo "typos binary is not found.. skipping.." lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes @echo "Running $@ check" @$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix check: test -test: verifiers build build-debugging ## builds minio, runs linters, tests +test: verifiers build ## builds minio, runs linters, tests @echo "Running unit tests" - @MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./... + @MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue,dev ./... test-root-disable: install-race @echo "Running minio root lockdown tests" @env bash $(PWD)/buildscripts/disable-root.sh +test-ilm: install-race + @echo "Running ILM tests" + @env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh + +test-ilm-transition: install-race + @echo "Running ILM tiering tests with healing" + @env bash $(PWD)/docs/bucket/lifecycle/setup_ilm_transition.sh + +test-pbac: install-race + @echo "Running bucket policies tests" + @env bash $(PWD)/docs/iam/policies/pbac-tests.sh + test-decom: install-race @echo "Running minio decom tests" @env bash $(PWD)/docs/distributed/decom.sh @env bash $(PWD)/docs/distributed/decom-encrypted.sh @env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh @env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh + @env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh test-versioning: install-race @echo "Running minio versioning tests" @@ -75,11 +89,23 @@ test-race: verifiers build ## builds minio, runs linters, tests (race) @echo "Running unit tests under -race" @(env bash $(PWD)/buildscripts/race.sh) -test-iam: build ## verify IAM (external IDP, etcd backends) +test-iam: install-race ## verify IAM (external IDP, etcd backends) @echo "Running tests for IAM (external IDP, etcd backends)" - @MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue -v -run TestIAM* ./cmd + @MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd @echo "Running tests for IAM (external IDP, etcd backends) with -race" - @MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd + @MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd + +test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP) + @echo "Running upgrade tests for IAM (LDAP backend)" + @env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh + +test-iam-import-with-missing-entities: install-race ## test import of external iam config withg missing entities + @echo "Test IAM import configurations with missing entities" + @env bash $(PWD)/docs/distributed/iam-import-with-missing-entities.sh + +test-iam-import-with-openid: install-race + @echo "Test IAM import configurations with openid" + @env bash $(PWD)/docs/distributed/iam-import-with-openid.sh test-sio-error: @(env bash $(PWD)/docs/bucket/replication/sio-error.sh) @@ -93,7 +119,10 @@ test-replication-3site: test-delete-replication: @(env bash $(PWD)/docs/bucket/replication/delete-replication.sh) -test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error ## verify multi site replication +test-delete-marker-proxying: + @(env bash $(PWD)/docs/bucket/replication/test_del_marker_proxying.sh) + +test-replication: install-race test-replication-2site test-replication-3site test-delete-replication test-sio-error test-delete-marker-proxying ## verify multi site replication @echo "Running tests for replicating three sites" test-site-replication-ldap: install-race ## verify automatic site replication @@ -107,39 +136,49 @@ test-site-replication-oidc: install-race ## verify automatic site replication test-site-replication-minio: install-race ## verify automatic site replication @echo "Running tests for automatic site replication of IAM (with MinIO IDP)" @(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh) - -verify: ## verify minio various setups + @echo "Running tests for automatic site replication of SSE-C objects" + @(env bash $(PWD)/docs/site-replication/run-ssec-object-replication.sh) + @echo "Running tests for automatic site replication of SSE-C objects with SSE-KMS enabled for bucket" + @(env bash $(PWD)/docs/site-replication/run-sse-kms-object-replication.sh) + @echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site" + @(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh) + +test-multipart: install-race ## test multipart + @echo "Test multipart behavior when part files are missing" + @(env bash $(PWD)/buildscripts/multipart-quorum-test.sh) + +test-timeout: install-race ## test multipart + @echo "Test server timeout" + @(env bash $(PWD)/buildscripts/test-timeout.sh) + +verify: install-race ## verify minio various setups @echo "Verifying build with race" - @GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/verify-build.sh) -verify-healing: ## verify healing and replacing disks with minio binary +verify-healing: install-race ## verify healing and replacing disks with minio binary @echo "Verify healing build with race" - @GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/verify-healing.sh) + @(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh) @(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh) -verify-healing-with-root-disks: ## verify healing root disks +verify-healing-with-root-disks: install-race ## verify healing root disks @echo "Verify healing with root drives" - @GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh) -verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta +verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta @echo "Verify healing with rewrite" - @GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/rewrite-old-new.sh) -verify-healing-inconsistent-versions: ## verify resolving inconsistent versions +verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions @echo "Verify resolving inconsistent versions build with race" - @GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @(env bash $(PWD)/buildscripts/resolve-right-versions.sh) build-debugging: @(env bash $(PWD)/docs/debugging/build.sh) -build: checks ## builds minio to $(PWD) +build: checks build-debugging ## builds minio to $(PWD) @echo "Building minio binary to './minio'" - @CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null + @CGO_ENABLED=0 GOOS=$(GOOS) GOARCH=$(GOARCH) go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null hotfix-vars: $(eval LDFLAGS := $(shell MINIO_RELEASE="RELEASE" MINIO_HOTFIX="hotfix.$(shell git rev-parse --short HEAD)" go run buildscripts/gen-ldflags.go $(shell git describe --tags --abbrev=0 | \ @@ -147,9 +186,9 @@ hotfix-vars: $(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD)) hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags - @wget -q -c https://github.com/minio/pkger/releases/download/v2.2.1/pkger_2.2.1_linux_amd64.deb - @wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service - @sudo apt install ./pkger_2.2.1_linux_amd64.deb --yes + @wget -q -c https://github.com/minio/pkger/releases/download/v2.3.11/pkger_2.3.11_linux_amd64.deb + @wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.1.1/linux-systemd/distributed/minio.service + @sudo apt install ./pkger_2.3.11_linux_amd64.deb --yes @mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive @cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio @cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION) @@ -159,11 +198,11 @@ hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags @pkger -r $(VERSION) --ignore hotfix-push: hotfix - @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/ - @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive - @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/ - @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-amd64/archive - @echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-amd64/archive/minio.$(VERSION)" + @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/ + @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-0.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive + @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/ + @scp -q -r minio-release/$(GOOS)-$(GOARCH)/* minio@dl-1.minio.io:~/releases/server/minio/hotfixes/linux-$(GOOS)/archive + @echo "Published new hotfix binaries at https://dl.min.io/server/minio/hotfixes/linux-$(GOOS)/archive/minio.$(VERSION)" docker-hotfix-push: docker-hotfix @docker push -q $(TAG) && echo "Published new container $(TAG)" @@ -176,15 +215,19 @@ docker: build ## builds minio docker container @echo "Building minio docker image '$(TAG)'" @docker build -q --no-cache -t $(TAG) . -f Dockerfile -install-race: checks ## builds minio to $(PWD) +test-resiliency: build + @echo "Running resiliency tests" + @(DOCKER_COMPOSE_FILE=$(PWD)/docs/resiliency/docker-compose.yaml env bash $(PWD)/docs/resiliency/resiliency-tests.sh) + +install-race: checks build-debugging ## builds minio to $(PWD) @echo "Building minio binary with -race to './minio'" - @GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null + @GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue,dev -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null @echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'" - @mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio + @mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio install: build ## builds minio and installs it to $GOPATH/bin. @echo "Installing minio binary to '$(GOPATH)/bin/minio'" - @mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio + @mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio @echo "Installation successful. To learn more, try \"minio --help\"." clean: ## cleanup all generated assets diff --git a/PULL_REQUESTS_ETIQUETTE.md b/PULL_REQUESTS_ETIQUETTE.md new file mode 100644 index 0000000000000..a4a9008f4520c --- /dev/null +++ b/PULL_REQUESTS_ETIQUETTE.md @@ -0,0 +1,93 @@ +# MinIO Pull Request Guidelines + +These guidelines ensure high-quality commits in MinIO’s GitHub repositories, maintaining +a clear, valuable commit history for our open-source projects. They apply to all contributors, +fostering efficient reviews and robust code. + +## Why Pull Requests? + +Pull Requests (PRs) drive quality in MinIO’s codebase by: +- Enabling peer review without pair programming. +- Documenting changes for future reference. +- Ensuring commits tell a clear story of development. + +**A poor commit lasts forever, even if code is refactored.** + +## Crafting a Quality PR + +A strong MinIO PR: +- Delivers a complete, valuable change (feature, bug fix, or improvement). +- Has a concise title (e.g., `[S3] Fix bucket policy parsing #1234`) and a summary with context, referencing issues (e.g., `#1234`). +- Contains well-written, logical commits explaining *why* changes were made (e.g., “Add S3 bucket tagging support so that users can organize resources efficiently”). +- Is small, focused, and easy to review—ideally one commit, unless multiple commits better narrate complex work. +- Adheres to MinIO’s coding standards (e.g., Go style, error handling, testing). + +PRs must flow smoothly through review to reach production. Large PRs should be split into smaller, manageable ones. + +## Submitting PRs + +1. **Title and Summary**: + - Use a scannable title: `[Subsystem] Action Description #Issue` (e.g., `[IAM] Add role-based access control #567`). + - Include context in the summary: what changed, why, and any issue references. + - Use `[WIP]` for in-progress PRs to avoid premature merging or choose GitHub draft PRs. + +2. **Commits**: + - Write clear messages: what changed and why (e.g., “Refactor S3 API handler to reduce latency so that requests process 20% faster”). + - Rebase to tidy commits before submitting (e.g., `git rebase -i main` to squash typos or reword messages), unless multiple contributors worked on the branch. + - Keep PRs focused—one feature or fix. Split large changes into multiple PRs. + +3. **Testing**: + - Include unit tests for new functionality or bug fixes. + - Ensure existing tests pass (`make test`). + - Document testing steps in the PR summary if manual testing was performed. + +4. **Before Submitting**: + - Run `make verify` to check formatting, linting, and tests. + - Reference related issues (e.g., “Closes #1234”). + - Notify team members via GitHub `@mentions` if urgent or complex. + +## Reviewing PRs + +Reviewers ensure MinIO’s commit history remains a clear, reliable record. Responsibilities include: + +1. **Commit Quality**: + - Verify each commit explains *why* the change was made (e.g., “So that…”). + - Request rebasing if commits are unclear, redundant, or lack context (e.g., “Please squash typo fixes into the parent commit”). + +2. **Code Quality**: + - Check adherence to MinIO’s Go standards (e.g., error handling, documentation). + - Ensure tests cover new code and pass CI. + - Flag bugs or critical issues for immediate fixes; suggest non-blocking improvements as follow-up issues. + +3. **Flow**: + - Review promptly to avoid blocking progress. + - Balance quality and speed—minor issues can be addressed later via issues, not PR blocks. + - If unable to complete the review, tag another reviewer (e.g., `@username please take over`). + +4. **Shared Responsibility**: + - All MinIO contributors are reviewers. The first commenter on a PR owns the review unless they delegate. + - Multiple reviewers are encouraged for complex PRs. + +5. **No Self-Edits**: + - Don’t modify the PR directly (e.g., fixing bugs). Request changes from the submitter or create a follow-up PR. + - If you edit, you’re a collaborator, not a reviewer, and cannot merge. + +6. **Testing**: + - Assume the submitter tested the code. If testing is unclear, ask for details (e.g., “How was this tested?”). + - Reject untested PRs unless testing is infeasible, then assist with test setup. + +## Tips for Success + +- **Small PRs**: Easier to review, faster to merge. Split large changes logically. +- **Clear Commits**: Use `git rebase -i` to refine history before submitting. +- **Engage Early**: Discuss complex changes in issues or Slack (https://slack.min.io) before coding. +- **Be Responsive**: Address reviewer feedback promptly to keep PRs moving. +- **Learn from Reviews**: Use feedback to improve future contributions. + +## Resources + +- [MinIO Coding Standards](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) +- [Effective Commit Messages](https://mislav.net/2014/02/hidden-documentation/) +- [GitHub PR Tips](https://github.com/blog/1943-how-to-write-the-perfect-pull-request) + +By following these guidelines, we ensure MinIO’s codebase remains high-quality, maintainable, and a joy to contribute to. Happy coding! diff --git a/README.fips.md b/README.fips.md deleted file mode 100644 index a87ef8e0b7d09..0000000000000 --- a/README.fips.md +++ /dev/null @@ -1,7 +0,0 @@ -# MinIO FIPS Builds - -MinIO creates FIPS builds using a patched version of the Go compiler (that uses BoringCrypto, from BoringSSL, which is [FIPS 140-2 validated](https://csrc.nist.gov/csrc/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp2964.pdf)) published by the Golang Team [here](https://github.com/golang/go/tree/dev.boringcrypto/misc/boring). - -MinIO FIPS executables are available at - they are only published for `linux-amd64` architecture as binary files with the suffix `.fips`. We also publish corresponding container images to our official image repositories. - -We are not making any statements or representations about the suitability of this code or build in relation to the FIPS 140-2 standard. Interested users will have to evaluate for themselves whether this is useful for their own purposes. diff --git a/README.md b/README.md index fb10fc656a53c..481995aa93166 100644 --- a/README.md +++ b/README.md @@ -1,261 +1,171 @@ -# MinIO Quickstart Guide - -[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) - -[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io) +# Maintenance Mode -MinIO is a High Performance Object Storage released under GNU Affero General Public License v3.0. It is API compatible with Amazon S3 cloud storage service. Use MinIO to build high performance infrastructure for machine learning, analytics and application data workloads. +**This project is currently under maintenance and is not accepting new changes.** -This README provides quickstart instructions on running MinIO on bare metal hardware, including container-based installations. For Kubernetes environments, use the [MinIO Kubernetes Operator](https://github.com/minio/operator/blob/master/README.md). +- The codebase is in a maintenance-only state +- No new features, enhancements, or pull requests will be accepted +- Critical security fixes may be evaluated on a case-by-case basis +- Existing issues and pull requests will not be actively reviewed +- Community support continues on a best-effort basis through [Slack](https://slack.min.io) -## Container Installation +For enterprise support and actively maintained versions, please see [MinIO AIStor](https://www.min.io/product/aistor). -Use the following commands to run a standalone MinIO server as a container. +--- -Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication -require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, -with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) -for more complete documentation. - -### Stable - -Run the following command to run the latest stable image of MinIO as a container using an ephemeral data volume: +# MinIO Quickstart Guide -```sh -podman run -p 9000:9000 -p 9001:9001 \ - quay.io/minio/minio server /data --console-address ":9001" -``` +[![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) [![license](https://img.shields.io/badge/license-AGPL%20V3-blue)](https://github.com/minio/minio/blob/master/LICENSE) -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded -object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the -root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. +[![MinIO](https://raw.githubusercontent.com/minio/minio/master/.github/logo.svg?sanitize=true)](https://min.io) -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See -[Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, -see to view MinIO SDKs for supported languages. +MinIO is a high-performance, S3-compatible object storage solution released under the GNU AGPL v3.0 license. +Designed for speed and scalability, it powers AI/ML, analytics, and data-intensive workloads with industry-leading performance. -> NOTE: To deploy MinIO on with persistent storage, you must map local persistent directories from the host OS to the container using the `podman -v` option. For example, `-v /mnt/data:/data` maps the host OS drive at `/mnt/data` to `/data` on the container. +- S3 API Compatible – Seamless integration with existing S3 tools +- Built for AI & Analytics – Optimized for large-scale data pipelines +- High Performance – Ideal for demanding storage workloads. -## macOS +This README provides instructions for building MinIO from source and deploying onto baremetal hardware. +Use the [MinIO Documentation](https://github.com/minio/docs) project to build and host a local copy of the documentation. -Use the following commands to run a standalone MinIO server on macOS. +## MinIO is Open Source Software -Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation. +We designed MinIO as Open Source software for the Open Source software community. We encourage the community to remix, redesign, and reshare MinIO under the terms of the AGPLv3 license. -### Homebrew (recommended) +All usage of MinIO in your application stack requires validation against AGPLv3 obligations, which include but are not limited to the release of modified code to the community from which you have benefited. Any commercial/proprietary usage of the AGPLv3 software, including repackaging or reselling services/features, is done at your own risk. -Run the following command to install the latest stable MinIO package using [Homebrew](https://brew.sh/). Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data. +The AGPLv3 provides no obligation by any party to support, maintain, or warranty the original or any modified work. +All support is provided on a best-effort basis through Github and our [Slack](https//slack.min.io) channel, and any member of the community is welcome to contribute and assist others in their usage of the software. -```sh -brew install minio/stable/minio -minio server /data -``` +MinIO [AIStor](https://www.min.io/product/aistor) includes enterprise-grade support and licensing for workloads which require commercial or proprietary usage and production-level SLA/SLO-backed support. For more information, [reach out for a quote](https://min.io/pricing). -> NOTE: If you previously installed minio using `brew install minio` then it is recommended that you reinstall minio from `minio/stable/minio` official repo instead. +## Source-Only Distribution -```sh -brew uninstall minio -brew install minio/stable/minio -``` +**Important:** The MinIO community edition is now distributed as source code only. We will no longer provide pre-compiled binary releases for the community version. -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. +### Installing Latest MinIO Community Edition -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages. +To use MinIO community edition, you have two options: -### Binary Download +1. **Install from source** using `go install github.com/minio/minio@latest` (recommended) +2. **Build a Docker image** from the provided Dockerfile -Use the following command to download and run a standalone MinIO server on macOS. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data. +See the sections below for detailed instructions on each method. -```sh -wget https://dl.min.io/server/minio/release/darwin-amd64/minio -chmod +x minio -./minio server /data -``` +### Legacy Binary Releases -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. +Historical pre-compiled binary releases remain available for reference but are no longer maintained: +- GitHub Releases: https://github.com/minio/minio/releases +- Direct downloads: https://dl.min.io/server/minio/release/ -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages. +**These legacy binaries will not receive updates.** We strongly recommend using source builds for access to the latest features, bug fixes, and security updates. -## GNU/Linux +## Install from Source -Use the following command to run a standalone MinIO server on Linux hosts running 64-bit Intel/AMD architectures. Replace ``/data`` with the path to the drive or directory in which you want MinIO to store data. +Use the following commands to compile and run a standalone MinIO server from source. +If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.24](https://golang.org/dl/#stable) ```sh -wget https://dl.min.io/server/minio/release/linux-amd64/minio -chmod +x minio -./minio server /data +go install github.com/minio/minio@latest ``` -The following table lists supported architectures. Replace the `wget` URL with the architecture for your Linux host. - -| Architecture | URL | -| -------- | ------ | -| 64-bit Intel/AMD | | -| 64-bit ARM | | -| 64-bit PowerPC LE (ppc64le) | | -| IBM Z-Series (S390X) | | - -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. - -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages. - -> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation. - -## Microsoft Windows - -To run MinIO on 64-bit Windows hosts, download the MinIO executable from the following URL: +You can alternatively run `go build` and use the `GOOS` and `GOARCH` environment variables to control the OS and architecture target. +For example: -```sh -https://dl.min.io/server/minio/release/windows-amd64/minio.exe ``` - -Use the following command to run a standalone MinIO server on the Windows host. Replace ``D:\`` with the path to the drive or directory in which you want MinIO to store data. You must change the terminal or powershell directory to the location of the ``minio.exe`` executable, *or* add the path to that directory to the system ``$PATH``: - -```sh -minio.exe server D:\ +env GOOS=linux GOARCh=arm64 go build ``` -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. +Start MinIO by running `minio server PATH` where `PATH` is any empty folder on your local filesystem. -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages. +The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. +You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. +Point a web browser running on the host machine to and log in with the root credentials. +You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. -> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#) for more complete documentation. - -## Install from Source - -Use the following commands to compile and run a standalone MinIO server from source. Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.21](https://golang.org/dl/#stable) +You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool: ```sh -go install github.com/minio/minio@latest +mc alias set local http://localhost:9000 minioadmin minioadmin +mc admin info local ``` -The MinIO deployment starts using default root credentials `minioadmin:minioadmin`. You can test the deployment using the MinIO Console, an embedded web-based object browser built into MinIO Server. Point a web browser running on the host machine to and log in with the root credentials. You can use the Browser to create buckets, upload objects, and browse the contents of the MinIO server. +See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. +For application developers, see to view MinIO SDKs for supported languages. -You can also connect using any S3-compatible tool, such as the MinIO Client `mc` commandline tool. See [Test using MinIO Client `mc`](#test-using-minio-client-mc) for more information on using the `mc` commandline tool. For application developers, see to view MinIO SDKs for supported languages. +> [!NOTE] +> Production environments using compiled-from-source MinIO binaries do so at their own risk. +> The AGPLv3 license provides no warranties nor liabilites for any such usage. -> NOTE: Standalone MinIO servers are best suited for early development and evaluation. Certain features such as versioning, object locking, and bucket replication require distributed deploying MinIO with Erasure Coding. For extended development and production, deploy MinIO with Erasure Coding enabled - specifically, with a *minimum* of 4 drives per MinIO server. See [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) for more complete documentation. +## Build Docker Image -MinIO strongly recommends *against* using compiled-from-source MinIO servers for production environments. +You can use the `docker build .` command to build a Docker image on your local host machine. +You must first [build MinIO](#install-from-source) and ensure the `minio` binary exists in the project root. -## Deployment Recommendations - -### Allow port access for Firewalls - -By default MinIO uses the port 9000 to listen for incoming connections. If your platform blocks the port by default, you may need to enable access to the port. - -### ufw - -For hosts with ufw enabled (Debian based distros), you can use `ufw` command to allow traffic to specific ports. Use below command to allow access to port 9000 +The following command builds the Docker image using the default `Dockerfile` in the root project directory with the repository and image tag `myminio:minio` ```sh -ufw allow 9000 +docker build -t myminio:minio . ``` -Below command enables all incoming traffic to ports ranging from 9000 to 9010. +Use `docker image ls` to confirm the image exists in your local repository. +You can run the server using standard Docker invocation: ```sh -ufw allow 9000:9010/tcp +docker run -p 9000:9000 -p 9001:9001 myminio:minio server /tmp/minio --console-address :9001 ``` -### firewall-cmd +Complete documentation for building Docker containers, managing custom images, or loading images into orchestration platforms is out of scope for this documentation. +You can modify the `Dockerfile` and `dockerscripts/docker-entrypoint.sh` as-needed to reflect your specific image requirements. -For hosts with firewall-cmd enabled (CentOS), you can use `firewall-cmd` command to allow traffic to specific ports. Use below commands to allow access to port 9000 +See the [MinIO Container](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html#deploy-minio-container) documentation for more guidance on running MinIO within a Container image. -```sh -firewall-cmd --get-active-zones -``` - -This command gets the active zone(s). Now, apply port rules to the relevant zones returned above. For example if the zone is `public`, use +## Install using Helm Charts -```sh -firewall-cmd --zone=public --add-port=9000/tcp --permanent -``` +There are two paths for installing MinIO onto Kubernetes infrastructure: -Note that `permanent` makes sure the rules are persistent across firewall start, restart or reload. Finally reload the firewall for changes to take effect. - -```sh -firewall-cmd --reload -``` - -### iptables - -For hosts with iptables enabled (RHEL, CentOS, etc), you can use `iptables` command to enable all traffic coming to specific ports. Use below command to allow -access to port 9000 - -```sh -iptables -A INPUT -p tcp --dport 9000 -j ACCEPT -service iptables restart -``` +- Use the [MinIO Operator](https://github.com/minio/operator) +- Use the community-maintained [Helm charts](https://github.com/minio/minio/tree/master/helm/minio) -Below command enables all incoming traffic to ports ranging from 9000 to 9010. - -```sh -iptables -A INPUT -p tcp --dport 9000:9010 -j ACCEPT -service iptables restart -``` +See the [MinIO Documentation](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) for guidance on deploying using the Operator. +The Community Helm chart has instructions in the folder-level README. ## Test MinIO Connectivity ### Test using MinIO Console -MinIO Server comes with an embedded web based object browser. Point your web browser to to ensure your server has started successfully. - -> NOTE: MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port. - -### Things to consider - -MinIO redirects browser access requests to the configured server port (i.e. `127.0.0.1:9000`) to the configured Console port. MinIO uses the hostname or IP address specified in the request when building the redirect URL. The URL and port *must* be accessible by the client for the redirection to work. - -For deployments behind a load balancer, proxy, or ingress rule where the MinIO host IP address or port is not public, use the `MINIO_BROWSER_REDIRECT_URL` environment variable to specify the external hostname for the redirect. The LB/Proxy must have rules for directing traffic to the Console port specifically. - -For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL. +MinIO Server comes with an embedded web based object browser. +Point your web browser to to ensure your server has started successfully. -Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate. +> [!NOTE] +> MinIO runs console on random port by default, if you wish to choose a specific port use `--console-address` to pick a specific interface and port. -For example: `export MINIO_SERVER_URL="https://minio.example.net"` +### Test using MinIO Client `mc` -| Dashboard | Creating a bucket | -| ------------- | ------------- | -| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) | +`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. -## Test using MinIO Client `mc` - -`mc` provides a modern alternative to UNIX commands like ls, cat, cp, mirror, diff etc. It supports filesystems and Amazon S3 compatible cloud storage services. Follow the MinIO Client [Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) for further instructions. - -## Upgrading MinIO - -Upgrades require zero downtime in MinIO, all upgrades are non-disruptive, all transactions on MinIO are atomic. So upgrading all the servers simultaneously is the recommended way to upgrade MinIO. - -> NOTE: requires internet access to update directly from , optionally you can host any mirrors at - -- For deployments that installed the MinIO server binary by hand, use [`mc admin update`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-update.html) +The following commands set a local alias, validate the server information, create a bucket, copy data to that bucket, and list the contents of the bucket. ```sh -mc admin update +mc alias set local http://localhost:9000 minioadmin minioadmin +mc admin info +mc mb data +mc cp ~/Downloads/mydata data/ +mc ls data/ ``` -- For deployments without external internet access (e.g. airgapped environments), download the binary from and replace the existing MinIO binary let's say for example `/opt/bin/minio`, apply executable permissions `chmod +x /opt/bin/minio` and proceed to perform `mc admin service restart alias/`. - -- For installations using Systemd MinIO service, upgrade via RPM/DEB packages **parallelly** on all servers or replace the binary lets say `/opt/bin/minio` on all nodes, apply executable permissions `chmod +x /opt/bin/minio` and process to perform `mc admin service restart alias/`. - -### Upgrade Checklist - -- Test all upgrades in a lower environment (DEV, QA, UAT) before applying to production. Performing blind upgrades in production environments carries significant risk. -- Read the release notes for MinIO *before* performing any upgrade, there is no forced requirement to upgrade to latest release upon every release. Some release may not be relevant to your setup, avoid upgrading production environments unnecessarily. -- If you plan to use `mc admin update`, MinIO process must have write access to the parent directory where the binary is present on the host system. -- `mc admin update` is not supported and should be avoided in kubernetes/container environments, please upgrade containers by upgrading relevant container images. -- **We do not recommend upgrading one MinIO server at a time, the product is designed to support parallel upgrades please follow our recommended guidelines.** +Follow the MinIO Client [Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) for further instructions. ## Explore Further -- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) +- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/enterprise/aistor-object-store/developers/sdk/go/) ## Contribute to MinIO Project -Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) +Please follow MinIO [Contributor's Guide](https://github.com/minio/minio/blob/master/CONTRIBUTING.md) for guidance on making new contributions to the repository. ## License diff --git a/buildscripts/checkdeps.sh b/buildscripts/checkdeps.sh old mode 100644 new mode 100755 index 11ecc4db0d34a..ed4f666ea0fad --- a/buildscripts/checkdeps.sh +++ b/buildscripts/checkdeps.sh @@ -74,11 +74,11 @@ check_minimum_version() { assert_is_supported_arch() { case "${ARCH}" in - x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64) + x86_64 | amd64 | aarch64 | ppc64le | arm* | s390x | loong64 | loongarch64 | riscv64) return ;; *) - echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64]" + echo "Arch '${ARCH}' is not supported. Supported Arch: [x86_64, amd64, aarch64, ppc64le, arm*, s390x, loong64, loongarch64, riscv64]" exit 1 ;; esac diff --git a/buildscripts/cross-compile.sh b/buildscripts/cross-compile.sh index 691891bae6d42..0590aebb9f14e 100755 --- a/buildscripts/cross-compile.sh +++ b/buildscripts/cross-compile.sh @@ -9,7 +9,7 @@ function _init() { export CGO_ENABLED=0 ## List of architectures and OS to test coss compilation. - SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64" + SUPPORTED_OSARCH="linux/ppc64le linux/mips64 linux/amd64 linux/arm64 linux/s390x darwin/arm64 darwin/amd64 freebsd/amd64 windows/amd64 linux/arm linux/386 netbsd/amd64 linux/mips openbsd/amd64 linux/riscv64" } function _build() { diff --git a/buildscripts/disable-root.sh b/buildscripts/disable-root.sh index fdef120686138..c35c769f09be6 100755 --- a/buildscripts/disable-root.sh +++ b/buildscripts/disable-root.sh @@ -32,6 +32,7 @@ fi set +e export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/ +./mc ready minioadm ./mc ls minioadm/ @@ -56,7 +57,7 @@ done set +e -sleep 10 +./mc ready minioadm/ ./mc ls minioadm/ if [ $? -ne 0 ]; then @@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \ "http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 & -sleep 20s - export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001 export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004 +./mc ready sitea +./mc ready siteb + ./mc admin replicate add sitea siteb ./mc admin user add sitea foobar foo12345 @@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \ "http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 & -sleep 20s - export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001 export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004 +./mc ready sitea +./mc ready siteb + ./mc admin user add sitea foobar-admin foo12345 sleep 2s diff --git a/buildscripts/minio-iam-ldap-upgrade-import-test.sh b/buildscripts/minio-iam-ldap-upgrade-import-test.sh new file mode 100755 index 0000000000000..e7da2b101cc98 --- /dev/null +++ b/buildscripts/minio-iam-ldap-upgrade-import-test.sh @@ -0,0 +1,127 @@ +#!/bin/bash + +# This script is used to test the migration of IAM content from old minio +# instance to new minio instance. +# +# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing +# repo (e.g. make podman-run), and then run this script. +# +# This script assumes that LDAP server is at: +# +# `localhost:389` +# +# if this is not the case, set the environment variable +# `_MINIO_LDAP_TEST_SERVER`. + +OLD_VERSION=RELEASE.2024-03-26T22-10-45Z +OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION} + +__init__() { + if which curl &>/dev/null; then + echo "curl is already installed" + else + echo "Installing curl:" + sudo apt install curl -y + fi + + export GOPATH=/tmp/gopath + export PATH="${PATH}":"${GOPATH}"/bin + + if which mc &>/dev/null; then + echo "mc is already installed" + else + echo "Installing mc:" + go install github.com/minio/mc@latest + fi + + if [ ! -x ./minio.${OLD_VERSION} ]; then + echo "Downloading minio.${OLD_VERSION} binary" + curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK} + chmod +x minio.${OLD_VERSION} + fi + + if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then + export _MINIO_LDAP_TEST_SERVER=localhost:389 + echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER" + fi + + rm -rf /tmp/data +} + +create_iam_content_in_old_minio() { + echo "Creating IAM content in old minio instance." + + MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} & + sleep 5 + + set -x + mc alias set old-minio http://localhost:9000 minioadmin minioadmin + mc ready old-minio + mc idp ldap add old-minio \ + server_addr=localhost:389 \ + server_insecure=on \ + lookup_bind_dn=cn=admin,dc=min,dc=io \ + lookup_bind_password=admin \ + user_dn_search_base_dn=dc=min,dc=io \ + user_dn_search_filter="(uid=%s)" \ + group_search_base_dn=ou=swengg,dc=min,dc=io \ + group_search_filter="(&(objectclass=groupOfNames)(member=%d))" + mc admin service restart old-minio + + mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io + mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io + + mc idp ldap policy entities old-minio + + mc admin cluster iam export old-minio + set +x + + mc admin service stop old-minio +} + +import_iam_content_in_new_minio() { + echo "Importing IAM content in new minio instance." + # Assume current minio binary exists. + MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} & + sleep 5 + + set -x + mc alias set new-minio http://localhost:9000 minioadmin minioadmin + echo "BEFORE IMPORT mappings:" + mc ready new-minio + mc idp ldap policy entities new-minio + mc admin cluster iam import new-minio ./old-minio-iam-info.zip + echo "AFTER IMPORT mappings:" + mc idp ldap policy entities new-minio + set +x + + # mc admin service stop new-minio +} + +verify_iam_content_in_new_minio() { + output=$(mc idp ldap policy entities new-minio --json) + + groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]') + if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then + echo "Failed to verify groups: $groups" + exit 1 + fi + + users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]') + if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then + echo "Failed to verify users: $users" + exit 1 + fi + + mc admin service stop new-minio +} + +main() { + create_iam_content_in_old_minio + + import_iam_content_in_new_minio + + verify_iam_content_in_new_minio +} + +(__init__ "$@" && main "$@") diff --git a/buildscripts/minio-upgrade.sh b/buildscripts/minio-upgrade.sh old mode 100644 new mode 100755 index 5721a8c6431ba..deaaf4606186c --- a/buildscripts/minio-upgrade.sh +++ b/buildscripts/minio-upgrade.sh @@ -4,10 +4,22 @@ trap 'cleanup $LINENO' ERR # shellcheck disable=SC2120 cleanup() { - MINIO_VERSION=dev docker-compose \ + MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \ -f "buildscripts/upgrade-tests/compose.yml" \ - rm -s -f + down || true + + MINIO_VERSION=dev /tmp/gopath/bin/docker-compose \ + -f "buildscripts/upgrade-tests/compose.yml" \ + rm || true + + for volume in $(docker volume ls -q | grep upgrade); do + docker volume rm ${volume} || true + done + docker volume prune -f + docker system prune -f || true + docker volume prune -f || true + docker volume rm $(docker volume ls -q -f dangling=true) || true } verify_checksum_after_heal() { @@ -55,6 +67,15 @@ __init__() { go install github.com/minio/mc@latest + ## this is needed because github actions don't have + ## docker-compose on all runners + COMPOSE_VERSION=v2.35.1 + mkdir -p /tmp/gopath/bin/ + wget -O /tmp/gopath/bin/docker-compose https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-linux-x86_64 + chmod +x /tmp/gopath/bin/docker-compose + + cleanup + TAG=minio/minio:dev make docker MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \ @@ -72,11 +93,11 @@ __init__() { curl -s http://127.0.0.1:9000/minio-test/to-read/hosts | sha256sum - MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop + MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" stop } main() { - MINIO_VERSION=dev docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build + MINIO_VERSION=dev /tmp/gopath/bin/docker-compose -f "buildscripts/upgrade-tests/compose.yml" up -d --build add_alias diff --git a/buildscripts/multipart-quorum-test.sh b/buildscripts/multipart-quorum-test.sh new file mode 100644 index 0000000000000..f226b0e5a8285 --- /dev/null +++ b/buildscripts/multipart-quorum-test.sh @@ -0,0 +1,126 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +WORK_DIR="$PWD/.verify-$RANDOM" +MINIO_CONFIG_DIR="$WORK_DIR/.minio" +MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server) + +if [ ! -x "$PWD/minio" ]; then + echo "minio executable binary not found in current directory" + exit 1 +fi + +if [ ! -x "$PWD/minio" ]; then + echo "minio executable binary not found in current directory" + exit 1 +fi + +trap 'catch $LINENO' ERR + +function purge() { + rm -rf "$1" +} + +# shellcheck disable=SC2120 +catch() { + if [ $# -ne 0 ]; then + echo "error on line $1" + fi + + echo "Cleaning up instances of MinIO" + pkill minio || true + pkill -9 minio || true + purge "$WORK_DIR" + if [ $# -ne 0 ]; then + exit $# + fi +} + +catch + +function start_minio_10drive() { + start_port=$1 + + export MINIO_ROOT_USER=minio + export MINIO_ROOT_PASSWORD=minio123 + export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/" + unset MINIO_KMS_AUTO_ENCRYPTION # do not auto-encrypt objects + export MINIO_CI_CD=1 + + mkdir ${WORK_DIR} + C_PWD=${PWD} + if [ ! -x "$PWD/mc" ]; then + MC_BUILD_DIR="mc-$RANDOM" + if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then + echo "failed to download https://github.com/minio/mc" + purge "${MC_BUILD_DIR}" + exit 1 + fi + + (cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc") + + # remove mc source. + purge "${MC_BUILD_DIR}" + fi + + "${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/disk{1...10}" >"${WORK_DIR}/server1.log" 2>&1 & + pid=$! + disown $pid + sleep 5 + + if ! ps -p ${pid} 1>&2 >/dev/null; then + echo "server1 log:" + cat "${WORK_DIR}/server1.log" + echo "FAILED" + purge "$WORK_DIR" + exit 1 + fi + + "${PWD}/mc" mb --with-versioning minio/bucket + + export AWS_ACCESS_KEY_ID=minio + export AWS_SECRET_ACCESS_KEY=minio123 + aws --endpoint-url http://localhost:"$start_port" s3api create-multipart-upload --bucket bucket --key obj-1 >upload-id.json + uploadId=$(jq -r '.UploadId' upload-id.json) + + truncate -s 5MiB file-5mib + for i in {1..2}; do + aws --endpoint-url http://localhost:"$start_port" s3api upload-part \ + --upload-id "$uploadId" --bucket bucket --key obj-1 \ + --part-number "$i" --body ./file-5mib + done + for i in {1..6}; do + find ${WORK_DIR}/disk${i}/.minio.sys/multipart/ -type f -name "part.1" -delete + done + cat <parts.json +{ + "Parts": [ + { + "PartNumber": 1, + "ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6" + }, + { + "PartNumber": 2, + "ETag": "5f363e0e58a95f06cbe9bbc662c5dfb6" + } + ] +} +EOF + err=$(aws --endpoint-url http://localhost:"$start_port" s3api complete-multipart-upload --upload-id "$uploadId" --bucket bucket --key obj-1 --multipart-upload file://./parts.json 2>&1) + rv=$? + if [ $rv -eq 0 ]; then + echo "Failed to receive an error" + exit 1 + fi + echo "Received an error during complete-multipart as expected: $err" +} + +function main() { + start_port=$(shuf -i 10000-65000 -n 1) + start_minio_10drive ${start_port} +} + +main "$@" diff --git a/buildscripts/rewrite-old-new.sh b/buildscripts/rewrite-old-new.sh index 0dfef4670e392..3527f3a6814d2 100755 --- a/buildscripts/rewrite-old-new.sh +++ b/buildscripts/rewrite-old-new.sh @@ -45,7 +45,8 @@ function verify_rewrite() { "${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 & pid=$! disown $pid - sleep 10 + + "${WORK_DIR}/mc" ready minio/ if ! ps -p ${pid} 1>&2 >/dev/null; then echo "server1 log:" @@ -77,7 +78,8 @@ function verify_rewrite() { "${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 & pid=$! disown $pid - sleep 10 + + "${WORK_DIR}/mc" ready minio/ if ! ps -p ${pid} 1>&2 >/dev/null; then echo "server1 log:" @@ -87,17 +89,12 @@ function verify_rewrite() { exit 1 fi - ( - cd ./docs/debugging/s3-check-md5 - go install -v - ) - - if ! s3-check-md5 \ + if ! ./s3-check-md5 \ -debug \ -versions \ -access-key minio \ -secret-key minio123 \ - -endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then + -endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then echo "server1 log:" cat "${WORK_DIR}/server1.log" echo "FAILED" @@ -117,7 +114,7 @@ function verify_rewrite() { go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123" sleep 1 - if ! s3-check-md5 \ + if ! ./s3-check-md5 \ -debug \ -versions \ -access-key minio \ diff --git a/buildscripts/test-timeout.sh b/buildscripts/test-timeout.sh new file mode 100644 index 0000000000000..77a248a6432fc --- /dev/null +++ b/buildscripts/test-timeout.sh @@ -0,0 +1,137 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +WORK_DIR="$PWD/.verify-$RANDOM" +MINIO_CONFIG_DIR="$WORK_DIR/.minio" +MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server) + +if [ ! -x "$PWD/minio" ]; then + echo "minio executable binary not found in current directory" + exit 1 +fi + +if [ ! -x "$PWD/minio" ]; then + echo "minio executable binary not found in current directory" + exit 1 +fi + +trap 'catch $LINENO' ERR + +function purge() { + rm -rf "$1" +} + +# shellcheck disable=SC2120 +catch() { + if [ $# -ne 0 ]; then + echo "error on line $1" + fi + + echo "Cleaning up instances of MinIO" + pkill minio || true + pkill -9 minio || true + purge "$WORK_DIR" + if [ $# -ne 0 ]; then + exit $# + fi +} + +catch + +function gen_put_request() { + hdr_sleep=$1 + body_sleep=$2 + + echo "PUT /testbucket/testobject HTTP/1.1" + sleep $hdr_sleep + echo "Host: foo-header" + echo "User-Agent: curl/8.2.1" + echo "Accept: */*" + echo "Content-Length: 30" + echo "" + + sleep $body_sleep + echo "random line 0" + echo "random line 1" + echo "" + echo "" +} + +function send_put_object_request() { + hdr_timeout=$1 + body_timeout=$2 + + start=$(date +%s) + timeout 5m bash -c "gen_put_request $hdr_timeout $body_timeout | netcat 127.0.0.1 $start_port | read" || return -1 + [ $(($(date +%s) - start)) -gt $((srv_hdr_timeout + srv_idle_timeout + 1)) ] && return -1 + return 0 +} + +function test_minio_with_timeout() { + start_port=$1 + + export MINIO_ROOT_USER=minio + export MINIO_ROOT_PASSWORD=minio123 + export MC_HOST_minio="http://minio:minio123@127.0.0.1:${start_port}/" + export MINIO_CI_CD=1 + + mkdir ${WORK_DIR} + C_PWD=${PWD} + if [ ! -x "$PWD/mc" ]; then + MC_BUILD_DIR="mc-$RANDOM" + if ! git clone --quiet https://github.com/minio/mc "$MC_BUILD_DIR"; then + echo "failed to download https://github.com/minio/mc" + purge "${MC_BUILD_DIR}" + exit 1 + fi + + (cd "${MC_BUILD_DIR}" && go build -o "$C_PWD/mc") + + # remove mc source. + purge "${MC_BUILD_DIR}" + fi + + "${MINIO[@]}" --address ":$start_port" --read-header-timeout ${srv_hdr_timeout}s --idle-timeout ${srv_idle_timeout}s "${WORK_DIR}/disk/" >"${WORK_DIR}/server1.log" 2>&1 & + pid=$! + disown $pid + sleep 1 + + if ! ps -p ${pid} 1>&2 >/dev/null; then + echo "server1 log:" + cat "${WORK_DIR}/server1.log" + echo "FAILED" + purge "$WORK_DIR" + exit 1 + fi + + set -e + + "${PWD}/mc" mb minio/testbucket + "${PWD}/mc" anonymous set public minio/testbucket + + # slow header writing + send_put_object_request 20 0 && exit -1 + "${PWD}/mc" stat minio/testbucket/testobject && exit -1 + + # quick header write and slow bodywrite + send_put_object_request 0 40 && exit -1 + "${PWD}/mc" stat minio/testbucket/testobject && exit -1 + + # quick header and body write + send_put_object_request 1 1 || exit -1 + "${PWD}/mc" stat minio/testbucket/testobject || exit -1 +} + +function main() { + export start_port=$(shuf -i 10000-65000 -n 1) + export srv_hdr_timeout=5 + export srv_idle_timeout=5 + export -f gen_put_request + + test_minio_with_timeout ${start_port} +} + +main "$@" diff --git a/buildscripts/upgrade-tests/compose.yml b/buildscripts/upgrade-tests/compose.yml index d70d443d06f2c..e820a6c536919 100644 --- a/buildscripts/upgrade-tests/compose.yml +++ b/buildscripts/upgrade-tests/compose.yml @@ -1,5 +1,3 @@ -version: '3.7' - # Settings and configurations that are common for all containers x-minio-common: &minio-common image: minio/minio:${MINIO_VERSION} diff --git a/buildscripts/verify-build.sh b/buildscripts/verify-build.sh index c3e4f3f837e19..e15d647584557 100755 --- a/buildscripts/verify-build.sh +++ b/buildscripts/verify-build.sh @@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM" export MINT_MODE=core export MINT_DATA_DIR="$WORK_DIR/data" export SERVER_ENDPOINT="127.0.0.1:9000" +export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/" +export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/" export ACCESS_KEY="minio" export SECRET_KEY="minio123" export ENABLE_HTTPS=0 export GO111MODULE=on export GOGC=25 export ENABLE_ADMIN=1 - export MINIO_CI_CD=1 MINIO_CONFIG_DIR="$WORK_DIR/.minio" @@ -36,18 +37,21 @@ function start_minio_fs() { export MINIO_ROOT_USER=$ACCESS_KEY export MINIO_ROOT_PASSWORD=$SECRET_KEY "${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 & - sleep 10 + + "${WORK_DIR}/mc" ready verify } function start_minio_erasure() { "${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 & - sleep 15 + + "${WORK_DIR}/mc" ready verify } function start_minio_erasure_sets() { export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}" "${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 & - sleep 15 + + "${WORK_DIR}/mc" ready verify } function start_minio_pool_erasure_sets() { @@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() { "${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 & "${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 & - sleep 40 + "${WORK_DIR}/mc" ready verify } function start_minio_pool_erasure_sets_ipv6() { @@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() { "${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 & "${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 & - sleep 40 + "${WORK_DIR}/mc" ready verify_ipv6 } function start_minio_dist_erasure() { @@ -78,7 +82,7 @@ function start_minio_dist_erasure() { "${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 & done - sleep 40 + "${WORK_DIR}/mc" ready verify } function run_test_fs() { @@ -222,7 +226,7 @@ function __init__() { exit 1 fi - (cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc") + (cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc") # remove mc source. purge "${MC_BUILD_DIR}" diff --git a/buildscripts/verify-healing-empty-erasure-set.sh b/buildscripts/verify-healing-empty-erasure-set.sh new file mode 100755 index 0000000000000..ddbbc1c0684dd --- /dev/null +++ b/buildscripts/verify-healing-empty-erasure-set.sh @@ -0,0 +1,151 @@ +#!/bin/bash -e +# + +set -E +set -o pipefail + +if [ ! -x "$PWD/minio" ]; then + echo "minio executable binary not found in current directory" + exit 1 +fi + +WORK_DIR="$PWD/.verify-$RANDOM" +MINIO_CONFIG_DIR="$WORK_DIR/.minio" +MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server) + +function start_minio_3_node() { + export MINIO_ROOT_USER=minio + export MINIO_ROOT_PASSWORD=minio123 + export MINIO_ERASURE_SET_DRIVE_COUNT=6 + export MINIO_CI_CD=1 + + start_port=$1 + args="" + for i in $(seq 1 3); do + args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/" + done + + "${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 & + pid1=$! + disown ${pid1} + + "${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 & + pid2=$! + disown $pid2 + + "${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 & + pid3=$! + disown $pid3 + + export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))" + + timeout 15m /tmp/mc ready myminio || fail + + # Wait for all drives to be online and formatted + while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done + # Wait for all drives to be healed + while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done + + # Wait for Status: in MinIO output + while true; do + rv=$(check_online) + if [ "$rv" != "1" ]; then + # success + break + fi + + # Check if we should retry + retry=$((retry + 1)) + if [ $retry -le 20 ]; then + sleep 5 + continue + fi + + # Failure + fail + done + + if ! ps -p $pid1 1>&2 >/dev/null; then + echo "minio-server-1 is not running." && fail + fi + + if ! ps -p $pid2 1>&2 >/dev/null; then + echo "minio-server-2 is not running." && fail + fi + + if ! ps -p $pid3 1>&2 >/dev/null; then + echo "minio-server-3 is not running." && fail + fi + + if ! pkill minio; then + fail + fi + + sleep 1 + if pgrep minio; then + # forcibly killing, to proceed further properly. + if ! pkill -9 minio; then + echo "no minio process running anymore, proceed." + fi + fi +} + +function fail() { + for i in $(seq 1 3); do + echo "server$i log:" + cat "${WORK_DIR}/dist-minio-server$i.log" + done + echo "FAILED" + purge "$WORK_DIR" + exit 1 +} + +function check_online() { + if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then + echo "1" + fi +} + +function purge() { + echo rm -rf "$1" +} + +function __init__() { + echo "Initializing environment" + mkdir -p "$WORK_DIR" + mkdir -p "$MINIO_CONFIG_DIR" + + ## version is purposefully set to '3' for minio to migrate configuration file + echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json" + + if [ ! -f /tmp/mc ]; then + wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x /tmp/mc + fi +} + +function perform_test() { + start_minio_3_node $2 + + echo "Testing Distributed Erasure setup healing of drives" + echo "Remove the contents of the disks belonging to '${1}' erasure set" + + rm -rf ${WORK_DIR}/${1}/*/ + + set -x + start_minio_3_node $2 +} + +function main() { + # use same ports for all tests + start_port=$(shuf -i 10000-65000 -n 1) + + perform_test "2" ${start_port} + perform_test "1" ${start_port} + perform_test "3" ${start_port} +} + +(__init__ "$@" && main "$@") +rv=$? +purge "$WORK_DIR" +exit "$rv" diff --git a/buildscripts/verify-healing.sh b/buildscripts/verify-healing.sh index 4a7be9e48c254..66778c179c9ae 100755 --- a/buildscripts/verify-healing.sh +++ b/buildscripts/verify-healing.sh @@ -12,17 +12,26 @@ fi WORK_DIR="$PWD/.verify-$RANDOM" MINIO_CONFIG_DIR="$WORK_DIR/.minio" MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server) +GOPATH=/tmp/gopath function start_minio_3_node() { + for i in $(seq 1 3); do + rm "${WORK_DIR}/dist-minio-server$i.log" + done + export MINIO_ROOT_USER=minio export MINIO_ROOT_PASSWORD=minio123 export MINIO_ERASURE_SET_DRIVE_COUNT=6 export MINIO_CI_CD=1 - start_port=$2 + first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l) + + start_port=$1 args="" - for i in $(seq 1 3); do - args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/" + for d in $(seq 1 3 5); do + args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ " + d=$((d + 1)) + args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ " done "${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 & @@ -37,40 +46,26 @@ function start_minio_3_node() { pid3=$! disown $pid3 - sleep "$1" + export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))" + timeout 15m /tmp/mc ready myminio || fail + + [ ${first_time} -eq 0 ] && upload_objects + [ ${first_time} -ne 0 ] && sleep 120 if ! ps -p $pid1 1>&2 >/dev/null; then - echo "server1 log:" - cat "${WORK_DIR}/dist-minio-server1.log" - echo "FAILED" - purge "$WORK_DIR" - exit 1 + echo "minio server 1 is not running" && fail fi if ! ps -p $pid2 1>&2 >/dev/null; then - echo "server2 log:" - cat "${WORK_DIR}/dist-minio-server2.log" - echo "FAILED" - purge "$WORK_DIR" - exit 1 + echo "minio server 2 is not running" && fail fi if ! ps -p $pid3 1>&2 >/dev/null; then - echo "server3 log:" - cat "${WORK_DIR}/dist-minio-server3.log" - echo "FAILED" - purge "$WORK_DIR" - exit 1 + echo "minio server 3 is not running" && fail fi if ! pkill minio; then - for i in $(seq 1 3); do - echo "server$i log:" - cat "${WORK_DIR}/dist-minio-server$i.log" - done - echo "FAILED" - purge "$WORK_DIR" - exit 1 + fail fi sleep 1 @@ -82,16 +77,40 @@ function start_minio_3_node() { fi } -function check_online() { - if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then - echo "1" +function check_heal() { + if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then + return 1 fi + + for ((i = 0; i < 20; i++)); do + test -f ${WORK_DIR}/$1/1/.minio.sys/format.json + v1=$? + nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1 + foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l) + foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l) + test $foundFiles1 -eq $foundFiles2 + v2=$? + [ $v1 == 0 -a $v2 == 0 ] && return 0 + sleep 10 + done + return 1 } function purge() { rm -rf "$1" } +function fail() { + for i in $(seq 1 3); do + echo "server$i log:" + cat "${WORK_DIR}/dist-minio-server$i.log" + done + pkill -9 minio + echo "FAILED" + purge "$WORK_DIR" + exit 1 +} + function __init__() { echo "Initializing environment" mkdir -p "$WORK_DIR" @@ -99,29 +118,37 @@ function __init__() { ## version is purposefully set to '3' for minio to migrate configuration file echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json" + + if [ ! -f /tmp/mc ]; then + wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x /tmp/mc + fi +} + +function upload_objects() { + /tmp/mc mb myminio/testbucket/ + for ((i = 0; i < 20; i++)); do + echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i + done } function perform_test() { - start_minio_3_node 120 $2 + start_port=$2 + + start_minio_3_node $start_port echo "Testing Distributed Erasure setup healing of drives" - echo "Remove the contents of the disks belonging to '${1}' erasure set" + echo "Remove the contents of the disks belonging to '${1}' node" rm -rf ${WORK_DIR}/${1}/*/ set -x - start_minio_3_node 120 $2 + start_minio_3_node $start_port - rv=$(check_online) + check_heal ${1} + rv=$? if [ "$rv" == "1" ]; then - for i in $(seq 1 3); do - echo "server$i log:" - cat "${WORK_DIR}/dist-minio-server$i.log" - done - pkill -9 minio - echo "FAILED" - purge "$WORK_DIR" - exit 1 + fail fi } diff --git a/cmd/acl-handlers.go b/cmd/acl-handlers.go index 63573df717ae6..eb1f3c1eaaf7d 100644 --- a/cmd/acl-handlers.go +++ b/cmd/acl-handlers.go @@ -25,7 +25,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // Data types used for returning dummy access control diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 5f21d8d0db262..4ea93878f2073 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -38,10 +38,10 @@ import ( objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/event" + xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -99,7 +99,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r * } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta)) + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta)) // Write success response. writeSuccessResponseHeadersOnly(w) @@ -428,10 +428,25 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * cfgPath := pathJoin(bi.Name, cfgFile) bucket := bi.Name switch cfgFile { + case bucketPolicyConfig: + config, _, err := globalBucketMetadataSys.GetBucketPolicy(bucket) + if err != nil { + if errors.Is(err, BucketPolicyNotFound{Bucket: bucket}) { + continue + } + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) + return + } + configData, err := json.Marshal(config) + if err != nil { + writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) + return + } + rawDataFn(bytes.NewReader(configData), cfgPath, len(configData)) case bucketNotificationConfig: config, err := globalBucketMetadataSys.GetNotificationConfig(bucket) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } @@ -447,7 +462,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { continue } - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL) return } @@ -736,7 +751,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended.")) continue } - if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() { + if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() { rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended.")) continue } @@ -784,7 +799,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * } switch fileName { case bucketNotificationConfig: - config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList) + config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList) if err != nil { rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err)) continue @@ -797,11 +812,12 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * } bucketMap[bucket].NotificationConfigXML = configData + bucketMap[bucket].NotificationConfigUpdatedAt = updatedAt rpt.SetStatus(bucket, fileName, nil) case bucketPolicyConfig: // Error out if Content-Length is beyond allowed size. if sz > maxBucketPolicySize { - rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyTooLarge.String())) + rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyTooLarge.String())) continue } @@ -819,7 +835,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * // Version in policy must not be empty if bucketPolicy.Version == "" { - rpt.SetStatus(bucket, fileName, fmt.Errorf(ErrPolicyInvalidVersion.String())) + rpt.SetStatus(bucket, fileName, errors.New(ErrPolicyInvalidVersion.String())) continue } @@ -838,9 +854,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * rpt.SetStatus(bucket, fileName, err) continue } - + rcfg, err := globalBucketObjectLockSys.Get(bucket) + if err != nil { + rpt.SetStatus(bucket, fileName, err) + continue + } // Validate the received bucket policy document - if err = bucketLifecycle.Validate(); err != nil { + if err = bucketLifecycle.Validate(rcfg); err != nil { rpt.SetStatus(bucket, fileName, err) continue } @@ -875,8 +895,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * } kmsKey := encConfig.KeyID() if kmsKey != "" { - kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation - _, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext) + _, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + Name: kmsKey, + AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation + }) if err != nil { if errors.Is(err, kes.ErrKeyNotFound) { rpt.SetStatus(bucket, fileName, errKMSKeyNotFound) @@ -959,7 +981,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * rpt.SetStatus(bucket, "", err) continue } - } rptData, err := json.Marshal(rpt.BucketMetaImportErrs) @@ -1018,7 +1039,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http. } if len(diffCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(diffCh) > 0 { @@ -1027,7 +1048,7 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http. if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -1077,7 +1098,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R } if len(mrfCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(mrfCh) > 0 { @@ -1086,7 +1107,7 @@ func (a adminAPIHandlers) ReplicationMRFHandler(w http.ResponseWriter, r *http.R if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } diff --git a/cmd/admin-handler-utils.go b/cmd/admin-handler-utils.go index 3466c2973634c..cdfb798739b8c 100644 --- a/cmd/admin-handler-utils.go +++ b/cmd/admin-handler-utils.go @@ -27,7 +27,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // validateAdminReq will validate request against and return whether it is allowed. @@ -216,6 +216,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError { Description: err.Error(), HTTPStatusCode: http.StatusBadRequest, } + case errors.Is(err, errTierInvalidConfig): + apiErr = APIError{ + Code: "XMinioAdminTierInvalidConfig", + Description: err.Error(), + HTTPStatusCode: http.StatusBadRequest, + } default: apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err) } diff --git a/cmd/admin-handlers-config-kv.go b/cmd/admin-handlers-config-kv.go index dd500d6a2c62f..2169103db6309 100644 --- a/cmd/admin-handlers-config-kv.go +++ b/cmd/admin-handlers-config-kv.go @@ -37,7 +37,7 @@ import ( "github.com/minio/minio/internal/config/subnet" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv @@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -193,27 +193,27 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (result setConfigResult, err error) { result.Cfg, err = readServerConfig(ctx, objectAPI, nil) if err != nil { - return + return result, err } result.Dynamic, err = result.Cfg.ReadConfig(bytes.NewReader(kvBytes)) if err != nil { - return + return result, err } result.SubSys, _, _, err = config.GetSubSys(string(kvBytes)) if err != nil { - return + return result, err } tgts, err := config.ParseConfigTargetID(bytes.NewReader(kvBytes)) if err != nil { - return + return result, err } ctx = context.WithValue(ctx, config.ContextKeyForTargetFromConfig, tgts) if verr := validateConfig(ctx, result.Cfg, result.SubSys); verr != nil { err = badConfigErr{Err: verr} - return + return result, err } // Check if subnet proxy being set and if so set the same value to proxy of subnet @@ -222,12 +222,12 @@ func setConfigKV(ctx context.Context, objectAPI ObjectLayer, kvBytes []byte) (re // Update the actual server config on disk. if err = saveServerConfig(ctx, objectAPI, result.Cfg); err != nil { - return + return result, err } // Write the config input KV to history. err = saveServerConfigHistory(ctx, objectAPI, kvBytes) - return + return result, err } // GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key} @@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques password := cred.SecretKey kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } diff --git a/cmd/admin-handlers-idp-config.go b/cmd/admin-handlers-idp-config.go index c054b0d347f75..7b5792f03a589 100644 --- a/cmd/admin-handlers-idp-config.go +++ b/cmd/admin-handlers-idp-config.go @@ -31,10 +31,9 @@ import ( "github.com/minio/minio/internal/config" cfgldap "github.com/minio/minio/internal/config/identity/ldap" "github.com/minio/minio/internal/config/identity/openid" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/ldap" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/ldap" + "github.com/minio/pkg/v3/policy" ) func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) { @@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R password := cred.SecretKey reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -126,7 +125,6 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R } if err = validateConfig(ctx, cfg, subSys); err != nil { - var validationErr ldap.Validation if errors.As(err, &validationErr) { // If we got an LDAP validation error, we need to send appropriate @@ -417,7 +415,6 @@ func (a adminAPIHandlers) DeleteIdentityProviderCfg(w http.ResponseWriter, r *ht return } if err = validateConfig(ctx, cfg, subSys); err != nil { - var validationErr ldap.Validation if errors.As(err, &validationErr) { // If we got an LDAP validation error, we need to send appropriate diff --git a/cmd/admin-handlers-idp-ldap.go b/cmd/admin-handlers-idp-ldap.go index 29578638d7ce3..6807de18c19ec 100644 --- a/cmd/admin-handlers-idp-ldap.go +++ b/cmd/admin-handlers-idp-ldap.go @@ -27,9 +27,9 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/auth" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + xldap "github.com/minio/pkg/v3/ldap" + "github.com/minio/pkg/v3/policy" ) // ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies. @@ -105,6 +105,12 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http. return } + // fail if ldap is not enabled + if !globalIAMSys.LDAPConfig.Enabled() { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL) + return + } + if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 { // More than maxConfigSize bytes were available writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL) @@ -132,7 +138,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http. password := cred.SecretKey reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } @@ -184,7 +190,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http. // // PUT /minio/admin/v3/idp/ldap/add-service-account func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.Request) { - ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r) + ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, true) if APIError.Code != "" { writeErrorResponseJSON(ctx, w, APIError, r.URL) return @@ -192,7 +198,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R // fail if ldap is not enabled if !globalIAMSys.LDAPConfig.Enabled() { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("LDAP not enabled")), r.URL) + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL) + return } // Find the user for the request sender (as it may be sent via a service @@ -207,19 +214,15 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R } // Check if we are creating svc account for request sender. - isSvcAccForRequestor := false - if targetUser == requestorUser || targetUser == requestorParentUser { - isSvcAccForRequestor = true - } + isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser var ( targetGroups []string err error ) - // If we are creating svc account for request sender, ensure - // that targetUser is a real user (i.e. not derived - // credentials). + // If we are creating svc account for request sender, ensure that targetUser + // is a real user (i.e. not derived credentials). if isSvcAccForRequestor { if requestorIsDerivedCredential { if requestorParentUser == "" { @@ -232,12 +235,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R targetGroups = requestorGroups // Deny if the target user is not LDAP - isLDAP, err := globalIAMSys.LDAPConfig.DoesUsernameExist(targetUser) + foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - if isLDAP == "" { + if foundResult == nil { err := errors.New("Specified user does not exist on LDAP server") APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err) writeErrorResponseJSON(ctx, w, APIErr, r.URL) @@ -253,20 +256,48 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R opts.claims[k] = v } } else { - isDN := globalIAMSys.LDAPConfig.IsLDAPUserDN(targetUser) + // We still need to ensure that the target user is a valid LDAP user. + // + // The target user may be supplied as a (short) username or a DN. + // However, for now, we only support using the short username. + isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser) opts.claims[ldapUserN] = targetUser // simple username - targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser) + var lookupResult *xldap.DNSearchResult + lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser) if err != nil { // if not found, check if DN - if strings.Contains(err.Error(), "not found") && isDN { - // warn user that DNs are not allowed - err = fmt.Errorf("Must use short username to add service account. %w", err) + if strings.Contains(err.Error(), "User DN not found for:") { + if isDN { + // warn user that DNs are not allowed + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL) + } else { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL) + } } writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } + targetUser = lookupResult.NormDN opts.claims[ldapUser] = targetUser // DN + opts.claims[ldapActualUser] = lookupResult.ActualDN + + // Check if this user or their groups have a policy applied. + ldapPolicies, err := globalIAMSys.PolicyDBGet(targetUser, targetGroups...) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + if len(ldapPolicies) == 0 { + err = fmt.Errorf("No policy set for user `%s` or any of their groups: `%s`", opts.claims[ldapActualUser], strings.Join(targetGroups, "`,`")) + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL) + return + } + + // Add LDAP attributes that were looked up into the claims. + for attribKey, attribValue := range lookupResult.Attributes { + opts.claims[ldapAttribPrefix+attribKey] = attribValue + } } newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts) @@ -300,7 +331,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R // Call hook for cluster-replication if the service account is not for a // root user. if newCred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Create: &madmin.SRSvcAccCreate{ @@ -311,7 +342,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R Name: newCred.Name, Description: newCred.Description, Claims: opts.claims, - SessionPolicy: createReq.Policy, + SessionPolicy: madmin.SRSessionPolicy(createReq.Policy), Status: auth.AccountOn, Expiration: createReq.Expiration, }, @@ -373,14 +404,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ } } - targetAccount, err := globalIAMSys.LDAPConfig.DoesUsernameExist(userDN) + dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return - } else if userDN == "" { + } + if dnResult == nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL) return } + targetAccount := dnResult.NormDN listType := r.Form.Get("listType") if listType != "sts-only" && listType != "svcacc-only" && listType != "" { @@ -412,8 +445,10 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ for _, svc := range serviceAccounts { expiryTime := svc.Expiration serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{ - AccessKey: svc.AccessKey, - Expiration: &expiryTime, + AccessKey: svc.AccessKey, + Expiration: &expiryTime, + Name: svc.Name, + Description: svc.Description, }) } for _, sts := range stsKeys { @@ -443,3 +478,180 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ writeSuccessResponseJSON(w, encryptedData) } + +// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk +func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil || globalNotificationSys == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + + dnList := r.Form["userDNs"] + isAll := r.Form.Get("all") == "true" + selfOnly := !isAll && len(dnList) == 0 + + if isAll && len(dnList) > 0 { + // This should be checked on client side, so return generic error + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + // Empty DN list and not self, list access keys for all users + if isAll { + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListUsersAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + } else if len(dnList) == 1 { + var dn string + foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0]) + if err == nil { + dn = foundResult.NormDN + } + if dn == cred.ParentUser || dnList[0] == cred.ParentUser { + selfOnly = true + } + } + + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListServiceAccountsAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + DenyOnly: selfOnly, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + + if selfOnly && len(dnList) == 0 { + selfDN := cred.AccessKey + if cred.ParentUser != "" { + selfDN = cred.ParentUser + } + dnList = append(dnList, selfDN) + } + + var ldapUserList []string + if isAll { + ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for user := range ldapUsers { + ldapUserList = append(ldapUserList, user) + } + } else { + for _, userDN := range dnList { + // Validate the userDN + foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + if foundResult == nil { + continue + } + ldapUserList = append(ldapUserList, foundResult.NormDN) + } + } + + listType := r.Form.Get("listType") + var listSTSKeys, listServiceAccounts bool + switch listType { + case madmin.AccessKeyListUsersOnly: + listSTSKeys = false + listServiceAccounts = false + case madmin.AccessKeyListSTSOnly: + listSTSKeys = true + listServiceAccounts = false + case madmin.AccessKeyListSvcaccOnly: + listSTSKeys = false + listServiceAccounts = true + case madmin.AccessKeyListAll: + listSTSKeys = true + listServiceAccounts = true + default: + err := errors.New("invalid list type") + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL) + return + } + + accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp) + for _, internalDN := range ldapUserList { + externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN) + accessKeys := madmin.ListAccessKeysLDAPResp{} + if listSTSKeys { + stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for _, sts := range stsKeys { + accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{ + AccessKey: sts.AccessKey, + Expiration: &sts.Expiration, + }) + } + // if only STS keys, skip if user has no STS keys + if !listServiceAccounts && len(stsKeys) == 0 { + continue + } + } + + if listServiceAccounts { + serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for _, svc := range serviceAccounts { + accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{ + AccessKey: svc.AccessKey, + Expiration: &svc.Expiration, + Name: svc.Name, + Description: svc.Description, + }) + } + // if only service accounts, skip if user has no service accounts + if !listSTSKeys && len(serviceAccounts) == 0 { + continue + } + } + accessKeyMap[externalDN] = accessKeys + } + + data, err := json.Marshal(accessKeyMap) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + encryptedData, err := madmin.EncryptData(cred.SecretKey, data) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, encryptedData) +} diff --git a/cmd/admin-handlers-idp-openid.go b/cmd/admin-handlers-idp-openid.go new file mode 100644 index 0000000000000..7e2387832e302 --- /dev/null +++ b/cmd/admin-handlers-idp-openid.go @@ -0,0 +1,248 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "encoding/json" + "errors" + "net/http" + "sort" + + "github.com/minio/madmin-go/v3" + "github.com/minio/minio-go/v7/pkg/set" + "github.com/minio/pkg/v3/policy" +) + +const dummyRoleARN = "dummy-internal" + +// ListAccessKeysOpenIDBulk - GET /minio/admin/v3/idp/openid/list-access-keys-bulk +func (a adminAPIHandlers) ListAccessKeysOpenIDBulk(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil || globalNotificationSys == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + + if !globalIAMSys.OpenIDConfig.Enabled { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminOpenIDNotEnabled), r.URL) + return + } + + userList := r.Form["users"] + isAll := r.Form.Get("all") == "true" + selfOnly := !isAll && len(userList) == 0 + cfgName := r.Form.Get("configName") + allConfigs := r.Form.Get("allConfigs") == "true" + if cfgName == "" && !allConfigs { + cfgName = madmin.Default + } + + if isAll && len(userList) > 0 { + // This should be checked on client side, so return generic error + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + // Empty DN list and not self, list access keys for all users + if isAll { + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListUsersAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + } else if len(userList) == 1 && userList[0] == cred.ParentUser { + selfOnly = true + } + + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListServiceAccountsAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + DenyOnly: selfOnly, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + + if selfOnly && len(userList) == 0 { + selfDN := cred.AccessKey + if cred.ParentUser != "" { + selfDN = cred.ParentUser + } + userList = append(userList, selfDN) + } + + listType := r.Form.Get("listType") + var listSTSKeys, listServiceAccounts bool + switch listType { + case madmin.AccessKeyListUsersOnly: + listSTSKeys = false + listServiceAccounts = false + case madmin.AccessKeyListSTSOnly: + listSTSKeys = true + listServiceAccounts = false + case madmin.AccessKeyListSvcaccOnly: + listSTSKeys = false + listServiceAccounts = true + case madmin.AccessKeyListAll: + listSTSKeys = true + listServiceAccounts = true + default: + err := errors.New("invalid list type") + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL) + return + } + + s := globalServerConfig.Clone() + roleArnMap := make(map[string]string) + // Map of configs to a map of users to their access keys + cfgToUsersMap := make(map[string]map[string]madmin.OpenIDUserAccessKeys) + configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for _, config := range configs { + if !allConfigs && cfgName != config.Name { + continue + } + arn := dummyRoleARN + if config.RoleARN != "" { + arn = config.RoleARN + } + roleArnMap[arn] = config.Name + newResp := make(map[string]madmin.OpenIDUserAccessKeys) + cfgToUsersMap[config.Name] = newResp + } + if len(roleArnMap) == 0 { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchConfigTarget), r.URL) + return + } + + userSet := set.CreateStringSet(userList...) + accessKeys, err := globalIAMSys.ListAllAccessKeys(ctx) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + for _, accessKey := range accessKeys { + // Filter out any disqualifying access keys + _, ok := accessKey.Claims[subClaim] + if !ok { + continue // OpenID access keys must have a sub claim + } + if (!listSTSKeys && !accessKey.IsServiceAccount()) || (!listServiceAccounts && accessKey.IsServiceAccount()) { + continue // skip if not the type we want + } + arn, ok := accessKey.Claims[roleArnClaim].(string) + if !ok { + if _, ok := accessKey.Claims[iamPolicyClaimNameOpenID()]; !ok { + continue // skip if no roleArn and no policy claim + } + // claim-based provider is in the roleArnMap under dummy ARN + arn = dummyRoleARN + } + matchingCfgName, ok := roleArnMap[arn] + if !ok { + continue // skip if not part of the target config + } + var id string + if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(matchingCfgName); idClaim != "" { + id, _ = accessKey.Claims[idClaim].(string) + } + if !userSet.IsEmpty() && !userSet.Contains(accessKey.ParentUser) && !userSet.Contains(id) { + continue // skip if not in the user list + } + openIDUserAccessKeys, ok := cfgToUsersMap[matchingCfgName][accessKey.ParentUser] + + // Add new user to map if not already present + if !ok { + var readableClaim string + if rc := globalIAMSys.OpenIDConfig.GetUserReadableClaim(matchingCfgName); rc != "" { + readableClaim, _ = accessKey.Claims[rc].(string) + } + openIDUserAccessKeys = madmin.OpenIDUserAccessKeys{ + MinioAccessKey: accessKey.ParentUser, + ID: id, + ReadableName: readableClaim, + } + } + svcAccInfo := madmin.ServiceAccountInfo{ + AccessKey: accessKey.AccessKey, + Expiration: &accessKey.Expiration, + } + if accessKey.IsServiceAccount() { + openIDUserAccessKeys.ServiceAccounts = append(openIDUserAccessKeys.ServiceAccounts, svcAccInfo) + } else { + openIDUserAccessKeys.STSKeys = append(openIDUserAccessKeys.STSKeys, svcAccInfo) + } + cfgToUsersMap[matchingCfgName][accessKey.ParentUser] = openIDUserAccessKeys + } + + // Convert map to slice and sort + resp := make([]madmin.ListAccessKeysOpenIDResp, 0, len(cfgToUsersMap)) + for cfgName, usersMap := range cfgToUsersMap { + users := make([]madmin.OpenIDUserAccessKeys, 0, len(usersMap)) + for _, user := range usersMap { + users = append(users, user) + } + sort.Slice(users, func(i, j int) bool { + return users[i].MinioAccessKey < users[j].MinioAccessKey + }) + resp = append(resp, madmin.ListAccessKeysOpenIDResp{ + ConfigName: cfgName, + Users: users, + }) + } + sort.Slice(resp, func(i, j int) bool { + return resp[i].ConfigName < resp[j].ConfigName + }) + + data, err := json.Marshal(resp) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + encryptedData, err := madmin.EncryptData(cred.SecretKey, data) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, encryptedData) +} diff --git a/cmd/admin-handlers-pools.go b/cmd/admin-handlers-pools.go index e862292ad131a..c4f98c45485a6 100644 --- a/cmd/admin-handlers-pools.go +++ b/cmd/admin-handlers-pools.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -18,6 +18,7 @@ package cmd import ( + "context" "encoding/json" "errors" "fmt" @@ -25,9 +26,9 @@ import ( "strconv" "strings" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/policy" ) var ( @@ -60,7 +61,7 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque return } - if z.IsRebalanceStarted() { + if z.IsRebalanceStarted(ctx) { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL) return } @@ -106,21 +107,12 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque poolIndices = append(poolIndices, idx) } - if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal { - ep := globalEndpoints[poolIndices[0]].Endpoints[0] - for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { - if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) { - return - } - } + if len(poolIndices) == 0 || !proxyDecommissionRequest(ctx, globalEndpoints[poolIndices[0]].Endpoints[0], w, r) { + if err := z.Decommission(r.Context(), poolIndices...); err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return } } - - if err := z.Decommission(r.Context(), poolIndices...); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } } func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) { @@ -162,20 +154,12 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ return } - if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal { - for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { - if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) { - return - } - } + if !proxyDecommissionRequest(ctx, globalEndpoints[idx].Endpoints[0], w, r) { + if err := pools.DecommissionCancel(ctx, idx); err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return } } - - if err := pools.DecommissionCancel(ctx, idx); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } } func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) { @@ -225,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) { return } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status)) } func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) { @@ -258,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) { poolsStatus[idx] = status } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus)) } func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) { @@ -274,8 +258,8 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) // concurrent rebalance-start commands. if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal { for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { - if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) { + if proxyEp.Host == ep.Host { + if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { return } } @@ -293,7 +277,7 @@ func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) return } - if pools.IsRebalanceStarted() { + if pools.IsRebalanceStarted(ctx) { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceAlreadyStarted), r.URL) return } @@ -345,8 +329,8 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request // pools may temporarily have out of date info on the others. if ep := globalEndpoints[0].Endpoints[0]; !ep.IsLocal { for nodeIdx, proxyEp := range globalProxyEndpoints { - if proxyEp.Endpoint.Host == ep.Host { - if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) { + if proxyEp.Host == ep.Host { + if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { return } } @@ -365,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL) return } - logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err)) + adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs)) + adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs)) } func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) { @@ -389,5 +373,21 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) // Cancel any ongoing rebalance operation globalNotificationSys.StopRebalance(r.Context()) writeSuccessResponseHeadersOnly(w) - logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt)) + adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt)) + globalNotificationSys.LoadRebalanceMeta(ctx, false) +} + +func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) { + host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host) + if host == "" { + return proxy + } + for nodeIdx, proxyEp := range globalProxyEndpoints { + if proxyEp.Host == host && !proxyEp.IsLocal { + if proxied, success := proxyRequestByNodeIndex(ctx, w, r, nodeIdx, false); proxied && success { + return true + } + } + } + return proxy } diff --git a/cmd/admin-handlers-site-replication.go b/cmd/admin-handlers-site-replication.go index 03dc0351f965d..bda0939554858 100644 --- a/cmd/admin-handlers-site-replication.go +++ b/cmd/admin-handlers-site-replication.go @@ -32,9 +32,8 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add @@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ opts := getSRAddOptions(r) status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -71,7 +70,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ func getSRAddOptions(r *http.Request) (opts madmin.SRAddOptions) { opts.ReplicateILMExpiry = r.Form.Get("replicateILMExpiry") == "true" - return + return opts } // SRPeerJoin - PUT /minio/admin/v3/site-replication/join @@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http. err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt) } if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -305,7 +304,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re } } -func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error { +func parseJSONBody(ctx context.Context, body io.Reader, v any, encryptionKey string) error { data, err := io.ReadAll(body) if err != nil { return SRError{ @@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio if encryptionKey != "" { data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data)) if err != nil { - logger.LogIf(ctx, err) return SRError{ Cause: err, Code: ErrSiteReplicationInvalidRequest, @@ -349,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } + // Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled + var replicateILMExpiry bool + for _, site := range info.Sites { + if site.ReplicateILMExpiry { + replicateILMExpiry = true + break + } + } + if !replicateILMExpiry { + // explicitly send nil for ILMExpiryStats + info.ILMExpiryStats = nil + } if err = json.NewEncoder(w).Encode(info); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -396,7 +406,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req opts := getSREditOptions(r) status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -412,7 +422,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req func getSREditOptions(r *http.Request) (opts madmin.SREditOptions) { opts.DisableILMExpiryReplication = r.Form.Get("disableILMExpiryReplication") == "true" opts.EnableILMExpiryReplication = r.Form.Get("enableILMExpiryReplication") == "true" - return + return opts } // SRPeerEdit - PUT /minio/admin/v3/site-replication/peer/edit @@ -433,7 +443,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -456,7 +466,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) { return } if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -474,7 +484,7 @@ func getSRStatusOptions(r *http.Request) (opts madmin.SRStatusOptions) { opts.EntityValue = q.Get("entityvalue") opts.ShowDeleted = q.Get("showDeleted") == "true" opts.Metrics = q.Get("metrics") == "true" - return + return opts } // SiteReplicationRemove - PUT /minio/admin/v3/site-replication/remove @@ -493,7 +503,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R } status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -524,7 +534,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) { } if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -586,7 +596,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http. // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } } if err != nil { @@ -609,5 +619,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http. duration = globalNetPerfMinDuration } result := siteNetperf(r.Context(), duration) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } diff --git a/cmd/admin-handlers-users-race_test.go b/cmd/admin-handlers-users-race_test.go index 5d86d5d451e28..79474a01fcece 100644 --- a/cmd/admin-handlers-users-race_test.go +++ b/cmd/admin-handlers-users-race_test.go @@ -32,7 +32,7 @@ import ( "github.com/minio/madmin-go/v3" minio "github.com/minio/minio-go/v7" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" ) func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) { @@ -89,7 +89,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { // Create a policy policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -104,7 +104,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -113,16 +113,19 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { userCount := 50 accessKeys := make([]string, userCount) secretKeys := make([]string, userCount) - for i := 0; i < userCount; i++ { + for i := range userCount { accessKey, secretKey := mustGenerateCredentials(c) err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) if err != nil { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) - if err != nil { - c.Fatalf("Unable to set policy: %v", err) + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + } + if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) } accessKeys[i] = accessKey @@ -130,7 +133,7 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) { } g := errgroup.Group{} - for i := 0; i < userCount; i++ { + for i := range userCount { g.Go(func(i int) func() error { return func() error { uClient := s.getUserClient(c, accessKeys[i], secretKeys[i], "") diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 7b8047eb705ed..8530046b98e83 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -24,20 +24,25 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "os" + "slices" "sort" "strconv" + "strings" "time" + "unicode/utf8" "github.com/klauspost/compress/zip" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/auth" - "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/config/dns" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + xldap "github.com/minio/pkg/v3/ldap" + "github.com/minio/pkg/v3/policy" + "github.com/puzpuzpuz/xsync/v3" ) // RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey= @@ -62,6 +67,17 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) { return } + // This API only supports removal of internal users not service accounts. + ok, _, err = globalIAMSys.IsServiceAccount(accessKey) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + if ok { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL) + return + } + // When the user is root credential you are not allowed to // remove the root user. Also you cannot delete yourself. if accessKey == globalActiveCred.AccessKey || accessKey == cred.AccessKey { @@ -74,7 +90,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) { return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -142,9 +158,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - for k, v := range ldapUsers { - allCredentials[k] = v - } + maps.Copy(allCredentials, ldapUsers) // Marshal the response data, err := json.Marshal(allCredentials) @@ -182,12 +196,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) { return } - checkDenyOnly := false - if name == cred.AccessKey { - // Check that there is no explicit deny - otherwise it's allowed - // to view one's own info. - checkDenyOnly = true - } + checkDenyOnly := name == cred.AccessKey if !globalIAMSys.IsAllowed(policy.Args{ AccountName: cred.AccessKey, @@ -271,14 +280,21 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ return } } - updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members) + + if globalIAMSys.LDAPConfig.Enabled() { + // We don't allow internal group manipulation in this API when LDAP + // is enabled for now. + err = errIAMActionNotAllowed + } else { + updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members) + } } if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemGroupInfo, GroupInfo: &madmin.SRGroupInfo{ UpdateReq: updReq, @@ -368,7 +384,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemGroupInfo, GroupInfo: &madmin.SRGroupInfo{ UpdateReq: madmin.GroupAddRemove{ @@ -406,7 +422,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -466,13 +482,13 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) { return } - checkDenyOnly := false - if accessKey == cred.AccessKey { - // Check that there is no explicit deny - otherwise it's allowed - // to change one's own password. - checkDenyOnly = true + if !utf8.ValidString(accessKey) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL) + return } + checkDenyOnly := accessKey == cred.AccessKey + if !globalIAMSys.IsAllowed(policy.Args{ AccountName: cred.AccessKey, Groups: cred.Groups, @@ -495,25 +511,31 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) { password := cred.SecretKey configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength)) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } var ureq madmin.AddOrUpdateUserReq if err = json.Unmarshal(configBytes, &ureq); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL) return } + // We don't allow internal user creation with LDAP enabled for now. + if globalIAMSys.LDAPConfig.Enabled() { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL) + return + } + updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemIAMUser, IAMUser: &madmin.SRIAMUser{ AccessKey: accessKey, @@ -616,7 +638,7 @@ func (a adminAPIHandlers) TemporaryAccountInfo(w http.ResponseWriter, r *http.Re // AddServiceAccount - PUT /minio/admin/v3/add-service-account func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) { - ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r) + ctx, cred, opts, createReq, targetUser, APIError := commonAddServiceAccount(r, false) if APIError.Code != "" { writeErrorResponseJSON(ctx, w, APIError, r.URL) return @@ -656,10 +678,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque } // Check if we are creating svc account for request sender. - isSvcAccForRequestor := false - if targetUser == requestorUser || targetUser == requestorParentUser { - isSvcAccForRequestor = true - } + isSvcAccForRequestor := targetUser == requestorUser || targetUser == requestorParentUser // If we are creating svc account for request sender, ensure // that targetUser is a real user (i.e. not derived @@ -687,12 +706,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque // In case of LDAP we need to resolve the targetUser to a DN and // query their groups: opts.claims[ldapUserN] = targetUser // simple username - targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser) + var lookupResult *xldap.DNSearchResult + lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } + targetUser = lookupResult.NormDN opts.claims[ldapUser] = targetUser // username DN + opts.claims[ldapActualUser] = lookupResult.ActualDN + + // Add LDAP attributes that were looked up into the claims. + for attribKey, attribValue := range lookupResult.Attributes { + opts.claims[ldapAttribPrefix+attribKey] = attribValue + } // NOTE: if not using LDAP, then internal IDP or open ID is // being used - in the former, group info is enforced when @@ -731,7 +758,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque // Call hook for cluster-replication if the service account is not for a // root user. if newCred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Create: &madmin.SRSvcAccCreate{ @@ -742,7 +769,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque Name: newCred.Name, Description: newCred.Description, Claims: opts.claims, - SessionPolicy: createReq.Policy, + SessionPolicy: madmin.SRSessionPolicy(createReq.Policy), Status: auth.AccountOn, Expiration: createReq.Expiration, }, @@ -802,7 +829,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re } condValues := getConditionValues(r, "", cred) - addExpirationToCondValues(updateReq.NewExpiration, condValues) + err = addExpirationToCondValues(updateReq.NewExpiration, condValues) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } // Permission checks: // @@ -853,7 +884,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re // Call site replication hook - non-root user accounts are replicated. if svcAccount.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Update: &madmin.SRSvcAccUpdate{ @@ -862,7 +893,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re Status: opts.status, Name: opts.name, Description: opts.description, - SessionPolicy: updateReq.NewPolicy, + SessionPolicy: madmin.SRSessionPolicy(updateReq.NewPolicy), Expiration: updateReq.NewExpiration, }, }, @@ -1026,8 +1057,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req for _, svc := range serviceAccounts { expiryTime := svc.Expiration serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{ - AccessKey: svc.AccessKey, - Expiration: &expiryTime, + Description: svc.Description, + ParentUser: svc.ParentUser, + Name: svc.Name, + AccountStatus: svc.Status, + AccessKey: svc.AccessKey, + ImpliedPolicy: svc.IsImpliedPolicy(), + Expiration: &expiryTime, }) } @@ -1115,7 +1151,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re // Call site replication hook - non-root user accounts are replicated. if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSvcAcc, SvcAccChange: &madmin.SRSvcAccChange{ Delete: &madmin.SRSvcAccDelete{ @@ -1129,6 +1165,172 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re writeSuccessNoContent(w) } +// ListAccessKeysBulk - GET /minio/admin/v3/list-access-keys-bulk +func (a adminAPIHandlers) ListAccessKeysBulk(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil || globalNotificationSys == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + + users := r.Form["users"] + isAll := r.Form.Get("all") == "true" + selfOnly := !isAll && len(users) == 0 + + if isAll && len(users) > 0 { + // This should be checked on client side, so return generic error + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + // Empty user list and not self, list access keys for all users + if isAll { + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListUsersAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + } else if len(users) == 1 { + if users[0] == cred.AccessKey || users[0] == cred.ParentUser { + selfOnly = true + } + } + + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListServiceAccountsAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + DenyOnly: selfOnly, + }) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + + if selfOnly && len(users) == 0 { + selfUser := cred.AccessKey + if cred.ParentUser != "" { + selfUser = cred.ParentUser + } + users = append(users, selfUser) + } + + var checkedUserList []string + if isAll { + users, err := globalIAMSys.ListUsers(ctx) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for user := range users { + checkedUserList = append(checkedUserList, user) + } + checkedUserList = append(checkedUserList, globalActiveCred.AccessKey) + } else { + for _, user := range users { + // Validate the user + _, ok := globalIAMSys.GetUser(ctx, user) + if !ok { + continue + } + checkedUserList = append(checkedUserList, user) + } + } + + listType := r.Form.Get("listType") + var listSTSKeys, listServiceAccounts bool + switch listType { + case madmin.AccessKeyListUsersOnly: + listSTSKeys = false + listServiceAccounts = false + case madmin.AccessKeyListSTSOnly: + listSTSKeys = true + listServiceAccounts = false + case madmin.AccessKeyListSvcaccOnly: + listSTSKeys = false + listServiceAccounts = true + case madmin.AccessKeyListAll: + listSTSKeys = true + listServiceAccounts = true + default: + err := errors.New("invalid list type") + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL) + return + } + + accessKeyMap := make(map[string]madmin.ListAccessKeysResp) + for _, user := range checkedUserList { + accessKeys := madmin.ListAccessKeysResp{} + if listSTSKeys { + stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, user) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for _, sts := range stsKeys { + accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{ + AccessKey: sts.AccessKey, + Expiration: &sts.Expiration, + }) + } + // if only STS keys, skip if user has no STS keys + if !listServiceAccounts && len(stsKeys) == 0 { + continue + } + } + + if listServiceAccounts { + serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, user) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + for _, svc := range serviceAccounts { + accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{ + AccessKey: svc.AccessKey, + Expiration: &svc.Expiration, + }) + } + // if only service accounts, skip if user has no service accounts + if !listSTSKeys && len(serviceAccounts) == 0 { + continue + } + } + accessKeyMap[user] = accessKeys + } + + data, err := json.Marshal(accessKeyMap) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + encryptedData, err := madmin.EncryptData(cred.SecretKey, data) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, encryptedData) +} + // AccountInfoHandler returns usage, permissions and other bucket metadata for incoming us func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -1198,18 +1400,6 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ return rd, wr } - bucketStorageCache.InitOnce(10*time.Second, - cachevalue.Opts{ReturnLastGood: true, NoWait: true}, - func() (DataUsageInfo, error) { - ctx, done := context.WithTimeout(context.Background(), 2*time.Second) - defer done() - - return loadDataUsageFromBackend(ctx, objectAPI) - }, - ) - - dataUsageInfo, _ := bucketStorageCache.Get() - // If etcd, dns federation configured list buckets from etcd. var err error var buckets []BucketInfo @@ -1250,7 +1440,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ var buf []byte switch { - case accountName == globalActiveCred.AccessKey: + case accountName == globalActiveCred.AccessKey || newGlobalAuthZPluginFn() != nil: + // For owner account and when plugin authZ is configured always set + // effective policy as `consoleAdmin`. + // + // In the latter case, we let the UI render everything, but individual + // actions would fail if not permitted by the external authZ service. for _, policy := range policy.DefaultPolicies { if policy.Name == "consoleAdmin" { effectivePolicy = policy.Definition @@ -1273,13 +1468,13 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ default: policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } effectivePolicy = globalIAMSys.GetCombinedPolicy(policies...) - } + buf, err = json.MarshalIndent(effectivePolicy, "", " ") if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -1296,15 +1491,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ rd, wr := isAllowedAccess(bucket.Name) if rd || wr { // Fetch the data usage of the current bucket - var size uint64 - var objectsCount uint64 - var objectsHist, versionsHist map[string]uint64 - if !dataUsageInfo.LastUpdate.IsZero() { - size = dataUsageInfo.BucketsUsage[bucket.Name].Size - objectsCount = dataUsageInfo.BucketsUsage[bucket.Name].ObjectsCount - objectsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectSizesHistogram - versionsHist = dataUsageInfo.BucketsUsage[bucket.Name].ObjectVersionsHistogram - } + bui := globalBucketQuotaSys.GetBucketUsageInfo(ctx, bucket.Name) + size := bui.Size + objectsCount := bui.ObjectsCount + objectsHist := bui.ObjectSizesHistogram + versionsHist := bui.ObjectVersionsHistogram + // Fetch the prefix usage of the current bucket var prefixUsage map[string]uint64 if enablePrefixUsage { @@ -1374,6 +1566,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errTooManyPolicies), r.URL) return } + setReqInfoPolicyName(ctx, name) policyDoc, err := globalIAMSys.InfoPolicy(name) if err != nil { @@ -1425,7 +1618,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ for name, p := range policies { _, err = json.Marshal(p) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) continue } newPolicies[name] = p @@ -1455,7 +1648,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ for name, p := range policies { _, err = json.Marshal(p) if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) continue } newPolicies[name] = p @@ -1477,6 +1670,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ vars := mux.Vars(r) policyName := vars["name"] + setReqInfoPolicyName(ctx, policyName) if err := globalIAMSys.DeletePolicy(ctx, policyName, true); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -1485,7 +1679,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ // Call cluster-replication policy creation hook to replicate policy deletion to // other minio clusters. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicy, Name: policyName, UpdatedAt: UTCNow(), @@ -1509,6 +1703,13 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL) return } + setReqInfoPolicyName(ctx, policyName) + + // Reject policy names with commas. + if strings.Contains(policyName, ",") { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrPolicyInvalidName), r.URL) + return + } // Error out if Content-Length is missing. if r.ContentLength <= 0 { @@ -1548,7 +1749,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request // Call cluster-replication policy creation hook to replicate policy to // other minio clusters. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicy, Name: policyName, Policy: iamPolicyBytes, @@ -1556,7 +1757,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request })) } -// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group] +// SetPolicyForUserOrGroup - sets a policy on a user or a group. +// +// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group] +// +// Deprecated: This API is replaced by attach/detach policy APIs for specific +// type of users (builtin or LDAP). func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -1569,6 +1775,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http policyName := vars["policyName"] entityName := vars["userOrGroup"] isGroup := vars["isGroup"] == "true" + setReqInfoPolicyName(ctx, policyName) if !isGroup { ok, _, err := globalIAMSys.IsTempUser(entityName) @@ -1608,6 +1815,34 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http userType := regUser if globalIAMSys.GetUsersSysType() == LDAPUsersSysType { userType = stsUser + + // Validate that the user or group exists in LDAP and use the normalized + // form of the entityName (which will be an LDAP DN). + var err error + if isGroup { + var foundGroupDN *xldap.DNSearchResult + var underBaseDN bool + if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil { + iamLogIf(ctx, err) + } else if foundGroupDN == nil || !underBaseDN { + err = errNoSuchGroup + } else { + entityName = foundGroupDN.NormDN + } + } else { + var foundUserDN *xldap.DNSearchResult + if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil { + iamLogIf(ctx, err) + } else if foundUserDN == nil { + err = errNoSuchUser + } else { + entityName = foundUserDN.NormDN + } + } + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } } updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup) @@ -1616,7 +1851,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http return } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: entityName, @@ -1628,7 +1863,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http })) } -// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/polciy-entities?policy=xxx&user=xxx&group=xxx +// ListPolicyMappingEntities - GET /minio/admin/v3/idp/builtin/policy-entities?policy=xxx&user=xxx&group=xxx func (a adminAPIHandlers) ListPolicyMappingEntities(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -1730,6 +1965,7 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } + setReqInfoPolicyName(ctx, strings.Join(addedOrRemoved, ",")) respBody := madmin.PolicyAssociationResp{ UpdatedAt: updatedAt, @@ -1755,18 +1991,249 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht writeSuccessResponseJSON(w, encryptedData) } +// RevokeTokens - POST /minio/admin/v3/revoke-tokens/{userProvider} +func (a adminAPIHandlers) RevokeTokens(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil || globalNotificationSys == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + + userProvider := mux.Vars(r)["userProvider"] + + user := r.Form.Get("user") + tokenRevokeType := r.Form.Get("tokenRevokeType") + fullRevoke := r.Form.Get("fullRevoke") == "true" + isTokenSelfRevoke := user == "" + if !isTokenSelfRevoke { + var err error + user, err = getUserWithProvider(ctx, userProvider, user, false) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + } + + if (user != "" && tokenRevokeType == "" && !fullRevoke) || (tokenRevokeType != "" && fullRevoke) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + adminPrivilege := globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.RemoveServiceAccountAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + }) + + if !adminPrivilege || isTokenSelfRevoke { + parentUser := cred.AccessKey + if cred.ParentUser != "" { + parentUser = cred.ParentUser + } + if !isTokenSelfRevoke && user != parentUser { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + user = parentUser + } + + // Infer token revoke type from the request if requestor is STS. + if isTokenSelfRevoke && tokenRevokeType == "" && !fullRevoke { + if cred.IsTemp() { + tokenRevokeType, _ = cred.Claims[tokenRevokeTypeClaim].(string) + } + if tokenRevokeType == "" { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNoTokenRevokeType), r.URL) + return + } + } + + err := globalIAMSys.RevokeTokens(ctx, user, tokenRevokeType) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessNoContent(w) +} + +// InfoAccessKey - GET /minio/admin/v3/info-access-key?access-key= +func (a adminAPIHandlers) InfoAccessKey(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get current object layer instance. + objectAPI := newObjectLayerFn() + if objectAPI == nil || globalNotificationSys == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + + accessKey := mux.Vars(r)["accessKey"] + if accessKey == "" { + accessKey = cred.AccessKey + } + + u, ok := globalIAMSys.GetUser(ctx, accessKey) + targetCred := u.Credentials + + if !globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.ListServiceAccountsAdminAction, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + }) { + // If requested user does not exist and requestor is not allowed to list service accounts, return access denied. + if !ok { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + + requestUser := cred.AccessKey + if cred.ParentUser != "" { + requestUser = cred.ParentUser + } + + if requestUser != targetCred.ParentUser { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + } + + if !ok { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminNoSuchAccessKey), r.URL) + return + } + + var ( + sessionPolicy *policy.Policy + err error + userType string + ) + switch { + case targetCred.IsTemp(): + userType = "STS" + _, sessionPolicy, err = globalIAMSys.GetTemporaryAccount(ctx, accessKey) + if err == errNoSuchTempAccount { + err = errNoSuchAccessKey + } + case targetCred.IsServiceAccount(): + userType = "Service Account" + _, sessionPolicy, err = globalIAMSys.GetServiceAccount(ctx, accessKey) + if err == errNoSuchServiceAccount { + err = errNoSuchAccessKey + } + default: + err = errNoSuchAccessKey + } + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + // if session policy is nil or empty, then it is implied policy + impliedPolicy := sessionPolicy == nil || (sessionPolicy.Version == "" && len(sessionPolicy.Statements) == 0) + + var svcAccountPolicy policy.Policy + + if !impliedPolicy { + svcAccountPolicy = *sessionPolicy + } else { + policiesNames, err := globalIAMSys.PolicyDBGet(targetCred.ParentUser, targetCred.Groups...) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + svcAccountPolicy = globalIAMSys.GetCombinedPolicy(policiesNames...) + } + + policyJSON, err := json.MarshalIndent(svcAccountPolicy, "", " ") + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + var expiration *time.Time + if !targetCred.Expiration.IsZero() && !targetCred.Expiration.Equal(timeSentinel) { + expiration = &targetCred.Expiration + } + + userProvider := guessUserProvider(targetCred) + + infoResp := madmin.InfoAccessKeyResp{ + AccessKey: accessKey, + InfoServiceAccountResp: madmin.InfoServiceAccountResp{ + ParentUser: targetCred.ParentUser, + Name: targetCred.Name, + Description: targetCred.Description, + AccountStatus: targetCred.Status, + ImpliedPolicy: impliedPolicy, + Policy: string(policyJSON), + Expiration: expiration, + }, + + UserType: userType, + UserProvider: userProvider, + } + + populateProviderInfoFromClaims(targetCred.Claims, userProvider, &infoResp) + + data, err := json.Marshal(infoResp) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + encryptedData, err := madmin.EncryptData(cred.SecretKey, data) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, encryptedData) +} + const ( - allPoliciesFile = "policies.json" - allUsersFile = "users.json" - allGroupsFile = "groups.json" - allSvcAcctsFile = "svcaccts.json" - userPolicyMappingsFile = "user_mappings.json" - groupPolicyMappingsFile = "group_mappings.json" - stsUserPolicyMappingsFile = "stsuser_mappings.json" - stsGroupPolicyMappingsFile = "stsgroup_mappings.json" - iamAssetsDir = "iam-assets" + allPoliciesFile = "policies.json" + allUsersFile = "users.json" + allGroupsFile = "groups.json" + allSvcAcctsFile = "svcaccts.json" + userPolicyMappingsFile = "user_mappings.json" + groupPolicyMappingsFile = "group_mappings.json" + stsUserPolicyMappingsFile = "stsuser_mappings.json" + + iamAssetsDir = "iam-assets" ) +var iamExportFiles = []string{ + allPoliciesFile, + allUsersFile, + allGroupsFile, + allSvcAcctsFile, + userPolicyMappingsFile, + groupPolicyMappingsFile, + stsUserPolicyMappingsFile, +} + // ExportIAMHandler - exports all iam info as a zipped file func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -1774,6 +2241,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, policy.ExportIAMAction) if objectAPI == nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) return } // Initialize a zip writer which will provide a zipped content @@ -1790,38 +2258,28 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { sys: nil, }) if zerr != nil { - logger.LogIf(ctx, zerr) + adminLogIf(ctx, zerr) return nil } header.Method = zip.Deflate zwriter, zerr := zipWriter.CreateHeader(header) if zerr != nil { - logger.LogIf(ctx, zerr) + adminLogIf(ctx, zerr) return nil } if _, err := io.Copy(zwriter, r); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } return nil } - iamFiles := []string{ - allPoliciesFile, - allUsersFile, - allGroupsFile, - allSvcAcctsFile, - userPolicyMappingsFile, - groupPolicyMappingsFile, - stsUserPolicyMappingsFile, - stsGroupPolicyMappingsFile, - } - for _, f := range iamFiles { + for _, f := range iamExportFiles { iamFile := pathJoin(iamAssetsDir, f) switch f { case allPoliciesFile: allPolicies, err := globalIAMSys.ListPolicies(ctx, "") if err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } @@ -1900,7 +2358,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } - _, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey) + sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return @@ -1920,8 +2378,11 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { SecretKey: acc.Credentials.SecretKey, Groups: acc.Credentials.Groups, Claims: claims, - SessionPolicy: json.RawMessage(policyJSON), + SessionPolicy: policyJSON, Status: acc.Credentials.Status, + Name: sa.Name, + Description: sa.Description, + Expiration: &sa.Expiration, } } @@ -1936,13 +2397,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { return } case userPolicyMappingsFile: - userPolicyMap := make(map[string]MappedPolicy) + userPolicyMap := xsync.NewMapOf[string, MappedPolicy]() err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } - userPolData, err := json.Marshal(userPolicyMap) + userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap)) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return @@ -1953,13 +2414,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { return } case groupPolicyMappingsFile: - groupPolicyMap := make(map[string]MappedPolicy) + groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]() err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } - grpPolData, err := json.Marshal(groupPolicyMap) + grpPolData, err := json.Marshal(mappedPoliciesToMap(groupPolicyMap)) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return @@ -1970,13 +2431,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { return } case stsUserPolicyMappingsFile: - userPolicyMap := make(map[string]MappedPolicy) + userPolicyMap := xsync.NewMapOf[string, MappedPolicy]() err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } - userPolData, err := json.Marshal(userPolicyMap) + userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap)) if err != nil { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return @@ -1985,41 +2446,31 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) return } - case stsGroupPolicyMappingsFile: - groupPolicyMap := make(map[string]MappedPolicy) - err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap) - if err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) - return - } - grpPolData, err := json.Marshal(groupPolicyMap) - if err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) - return - } - if err = rawDataFn(bytes.NewReader(grpPolData), iamFile, len(grpPolData)); err != nil { - writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL) - return - } } } } // ImportIAM - imports all IAM info into MinIO func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { + a.importIAM(w, r, "") +} + +// ImportIAMV2 - imports all IAM info into MinIO +func (a adminAPIHandlers) ImportIAMV2(w http.ResponseWriter, r *http.Request) { + a.importIAM(w, r, "v2") +} + +// ImportIAM - imports all IAM info into MinIO +func (a adminAPIHandlers) importIAM(w http.ResponseWriter, r *http.Request, apiVer string) { ctx := r.Context() - // Get current object layer instance. - objectAPI := newObjectLayerFn() + // Validate signature, permissions and get current object layer instance. + objectAPI, _ := validateAdminReq(ctx, w, r, policy.ImportIAMAction) if objectAPI == nil || globalNotificationSys == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) return } - cred, owner, s3Err := validateAdminSignature(ctx, r, "") - if s3Err != ErrNone { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) - return - } + data, err := io.ReadAll(r.Body) if err != nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) @@ -2031,9 +2482,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) return } + + var skipped, removed, added madmin.IAMEntities + var failed madmin.IAMErrEntities + // import policies first { - f, err := zr.Open(pathJoin(iamAssetsDir, allPoliciesFile)) switch { case errors.Is(err, os.ErrNotExist): @@ -2056,8 +2510,10 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { for policyName, policy := range allPolicies { if policy.IsEmpty() { err = globalIAMSys.DeletePolicy(ctx, policyName, true) + removed.Policies = append(removed.Policies, policyName) } else { _, err = globalIAMSys.SetPolicy(ctx, policyName, policy) + added.Policies = append(added.Policies, policyName) } if err != nil { writeErrorResponseJSON(ctx, w, importError(ctx, err, allPoliciesFile, policyName), r.URL) @@ -2103,43 +2559,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { return } - if (cred.IsTemp() || cred.IsServiceAccount()) && cred.ParentUser == accessKey { - // Incoming access key matches parent user then we should - // reject password change requests. - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAddUserInvalidArgument, err, allUsersFile, accessKey), r.URL) - return - } - // Check if accessKey has beginning and end space characters, this only applies to new users. if !exists && hasSpaceBE(accessKey) { writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allUsersFile, accessKey), r.URL) return } - checkDenyOnly := false - if accessKey == cred.AccessKey { - // Check that there is no explicit deny - otherwise it's allowed - // to change one's own password. - checkDenyOnly = true - } - - if !globalIAMSys.IsAllowed(policy.Args{ - AccountName: cred.AccessKey, - Groups: cred.Groups, - Action: policy.CreateUserAdminAction, - ConditionValues: getConditionValues(r, "", cred), - IsOwner: owner, - Claims: cred.Claims, - DenyOnly: checkDenyOnly, - }) { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allUsersFile, accessKey), r.URL) - return - } if _, err = globalIAMSys.CreateUser(ctx, accessKey, ureq); err != nil { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, toAdminAPIErrCode(ctx, err), err, allUsersFile, accessKey), r.URL) - return + failed.Users = append(failed.Users, madmin.IAMErrEntity{Name: accessKey, Error: err}) + } else { + added.Users = append(added.Users, accessKey) } - } } } @@ -2170,13 +2600,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { // If group does not exist, then check if the group has beginning and end space characters // we will reject such group names. if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL) + writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL) return } } if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL) - return + failed.Groups = append(failed.Groups, madmin.IAMErrEntity{Name: group, Error: err}) + } else { + added.Groups = append(added.Groups, group) } } } @@ -2202,7 +2633,21 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL) return } + + // Validations for LDAP enabled deployments. + if globalIAMSys.LDAPConfig.Enabled() { + skippedAccessKeys, err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs) + skipped.ServiceAccounts = append(skipped.ServiceAccounts, skippedAccessKeys...) + if err != nil { + writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL) + return + } + } + for user, svcAcctReq := range serviceAcctReqs { + if slices.Contains(skipped.ServiceAccounts, user) { + continue + } var sp *policy.Policy var err error if len(svcAcctReq.SessionPolicy) > 0 { @@ -2212,22 +2657,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { return } } - // service account access key cannot have space characters beginning and end of the string. + // service account access key cannot have space characters + // beginning and end of the string. if hasSpaceBE(svcAcctReq.AccessKey) { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL) return } - if !globalIAMSys.IsAllowed(policy.Args{ - AccountName: cred.AccessKey, - Groups: cred.Groups, - Action: policy.CreateServiceAccountAdminAction, - ConditionValues: getConditionValues(r, "", cred), - IsOwner: owner, - Claims: cred.Claims, - }) { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAccessDenied, err, allSvcAcctsFile, user), r.URL) - return - } updateReq := true _, _, err = globalIAMSys.GetServiceAccount(ctx, svcAcctReq.AccessKey) if err != nil { @@ -2238,20 +2673,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { updateReq = false } if updateReq { - opts := updateServiceAccountOpts{ - secretKey: svcAcctReq.SecretKey, - status: svcAcctReq.Status, - name: svcAcctReq.Name, - description: svcAcctReq.Description, - expiration: svcAcctReq.Expiration, - sessionPolicy: sp, - } - _, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts) + // If the service account exists, we remove it to ensure a + // clean import. + err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true) if err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL) + delErr := fmt.Errorf("failed to delete existing service account (%s) before importing it: %w", svcAcctReq.AccessKey, err) + writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL) return } - continue } opts := newServiceAccountOpts{ accessKey: user, @@ -2264,23 +2693,11 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { allowSiteReplicatorAccount: false, } - // In case of LDAP we need to resolve the targetUser to a DN and - // query their groups: - if globalIAMSys.LDAPConfig.Enabled() { - opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username - targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey) - if err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL) - return - } - opts.claims[ldapUser] = targetUser // username DN - } - if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL) - return + failed.ServiceAccounts = append(failed.ServiceAccounts, madmin.IAMErrEntity{Name: user, Error: err}) + } else { + added.ServiceAccounts = append(added.ServiceAccounts, user) } - } } } @@ -2317,8 +2734,15 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { return } if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, regUser, false); err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, userPolicyMappingsFile, u), r.URL) - return + failed.UserPolicies = append( + failed.UserPolicies, + madmin.IAMErrPolicyEntity{ + Name: u, + Policies: strings.Split(pm.Policies, ","), + Error: err, + }) + } else { + added.UserPolicies = append(added.UserPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")}) } } } @@ -2344,10 +2768,32 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL) return } + + // Validations for LDAP enabled deployments. + if globalIAMSys.LDAPConfig.Enabled() { + isGroup := true + skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap) + skipped.Groups = append(skipped.Groups, skippedDN...) + if err != nil { + writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL) + return + } + } + for g, pm := range grpPolicyMap { + if slices.Contains(skipped.Groups, g) { + continue + } if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL) - return + failed.GroupPolicies = append( + failed.GroupPolicies, + madmin.IAMErrPolicyEntity{ + Name: g, + Policies: strings.Split(pm.Policies, ","), + Error: err, + }) + } else { + added.GroupPolicies = append(added.GroupPolicies, map[string][]string{g: strings.Split(pm.Policies, ",")}) } } } @@ -2373,7 +2819,21 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL) return } + + // Validations for LDAP enabled deployments. + if globalIAMSys.LDAPConfig.Enabled() { + isGroup := true + skippedDN, err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap) + skipped.Users = append(skipped.Users, skippedDN...) + if err != nil { + writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL) + return + } + } for u, pm := range userPolicyMap { + if slices.Contains(skipped.Users, u) { + continue + } // disallow setting policy mapping if user is a temporary user ok, _, err := globalIAMSys.IsTempUser(u) if err != nil && err != errNoSuchUser { @@ -2384,52 +2844,53 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) { writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL) return } + if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL) - return + failed.STSPolicies = append( + failed.STSPolicies, + madmin.IAMErrPolicyEntity{ + Name: u, + Policies: strings.Split(pm.Policies, ","), + Error: err, + }) + } else { + added.STSPolicies = append(added.STSPolicies, map[string][]string{u: strings.Split(pm.Policies, ",")}) } } } } - // import sts group policy mappings - { - f, err := zr.Open(pathJoin(iamAssetsDir, stsGroupPolicyMappingsFile)) - switch { - case errors.Is(err, os.ErrNotExist): - case err != nil: - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL) + if apiVer == "v2" { + iamr := madmin.ImportIAMResult{ + Skipped: skipped, + Removed: removed, + Added: added, + Failed: failed, + } + + b, err := json.Marshal(iamr) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return - default: - defer f.Close() - var grpPolicyMap map[string]MappedPolicy - data, err := io.ReadAll(f) - if err != nil { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL) - return - } - if err = json.Unmarshal(data, &grpPolicyMap); err != nil { - writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsGroupPolicyMappingsFile, ""), r.URL) - return - } - for g, pm := range grpPolicyMap { - if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil { - writeErrorResponseJSON(ctx, w, importError(ctx, err, stsGroupPolicyMappingsFile, g), r.URL) - return - } - } } + + writeSuccessResponseJSON(w, b) } } -func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) { - if exp == nil { - return +func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error { + if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) { + return nil + } + dur := time.Until(*exp) + if dur <= 0 { + return errors.New("unsupported expiration time") } - condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)} + condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)} + return nil } -func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) { +func commonAddServiceAccount(r *http.Request, ldap bool) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) { ctx := r.Context() // Get current object layer instance. @@ -2454,6 +2915,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err) } + if createReq.Expiration != nil && !createReq.Expiration.IsZero() { + // truncate expiration at the second. + truncateTime := createReq.Expiration.Truncate(time.Second) + createReq.Expiration = &truncateTime + } + // service account access key cannot have space characters beginning and end of the string. if hasSpaceBE(createReq.AccessKey) { return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument) @@ -2481,11 +2948,22 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials name: createReq.Name, description: description, expiration: createReq.Expiration, - claims: make(map[string]interface{}), + claims: make(map[string]any), } condValues := getConditionValues(r, "", cred) - addExpirationToCondValues(createReq.Expiration, condValues) + err = addExpirationToCondValues(createReq.Expiration, condValues) + if err != nil { + return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err) + } + + denyOnly := (targetUser == cred.AccessKey || targetUser == cred.ParentUser) + if ldap && !denyOnly { + res, _ := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser) + if res != nil && res.NormDN == cred.ParentUser { + denyOnly = true + } + } // Check if action is allowed if creating access key for another user // Check if action is explicitly denied if for self @@ -2496,7 +2974,7 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials ConditionValues: condValues, IsOwner: owner, Claims: cred.Claims, - DenyOnly: (targetUser == cred.AccessKey || targetUser == cred.ParentUser), + DenyOnly: denyOnly, }) { return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAccessDenied) } @@ -2513,3 +2991,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials return ctx, cred, opts, createReq, targetUser, APIError{} } + +// setReqInfoPolicyName will set the given policyName as a tag on the context's request info, +// so that it appears in audit logs. +func setReqInfoPolicyName(ctx context.Context, policyName string) { + reqInfo := logger.GetReqInfo(ctx) + reqInfo.SetTags("policyName", policyName) +} diff --git a/cmd/admin-handlers-users_test.go b/cmd/admin-handlers-users_test.go index f91f63fca2648..828264583323a 100644 --- a/cmd/admin-handlers-users_test.go +++ b/cmd/admin-handlers-users_test.go @@ -28,6 +28,7 @@ import ( "net/http" "net/url" "runtime" + "slices" "strings" "testing" "time" @@ -39,7 +40,7 @@ import ( "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/signer" "github.com/minio/minio/internal/auth" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) const ( @@ -159,7 +160,7 @@ func (s *TestSuiteIAM) SetUpSuite(c *check) { } func (s *TestSuiteIAM) RestartIAMSuite(c *check) { - s.TestSuiteCommon.RestartTestServer(c) + s.RestartTestServer(c) s.iamSetup(c) } @@ -207,6 +208,8 @@ func TestIAMInternalIDPServerSuite(t *testing.T) { suite.TestGroupAddRemove(c) suite.TestServiceAccountOpsByAdmin(c) suite.TestServiceAccountPrivilegeEscalationBug(c) + suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, true) + suite.TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c, false) suite.TestServiceAccountOpsByUser(c) suite.TestServiceAccountDurationSecondsCondition(c) suite.TestAddServiceAccountPerms(c) @@ -239,9 +242,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) { c.Assert(v.Status, madmin.AccountEnabled) // 3. Associate policy and check that user can access - err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + User: accessKey, + }) if err != nil { - c.Fatalf("unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } client := s.getUserClient(c, accessKey, secretKey, "") @@ -328,29 +334,40 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) { // 2.2 create and associate policy to user policy := "mypolicy-test-user-update" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // 2.3 check user has access to bucket c.mustListObjects(ctx, uClient, bucket) @@ -436,7 +453,7 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) { "s3:ListBucket" ], "Resource": [ - "arn:aws:s3:::testbucket/*" + "arn:aws:s3:::testbucket" ] } ] @@ -470,9 +487,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) { c.mustNotListObjects(ctx, uClient, "testbucket") // 3.2 associate policy to user - err = s.adm.SetPolicy(ctx, policy1, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy1}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } admClnt := s.getAdminClient(c, accessKey, secretKey, "") @@ -490,10 +510,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) { c.Fatalf("policy was missing!") } + // Detach policy1 to set up for policy2 + _, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy1}, + User: accessKey, + }) + if err != nil { + c.Fatalf("unable to detach policy: %v", err) + } + // 3.2 associate policy to user - err = s.adm.SetPolicy(ctx, policy2, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy2}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // 3.3 check user can create service account implicitly. @@ -532,22 +564,30 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) { // 1. Create a policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -571,9 +611,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) { c.mustNotListObjects(ctx, uClient, bucket) // 3.2 associate policy to user - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // 3.3 check user has access to bucket c.mustListObjects(ctx, uClient, bucket) @@ -639,22 +682,30 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) { c.Fatalf("bucket creat error: %v", err) } - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) // Check that default policies can be overwritten. err = s.adm.AddCannedPolicy(ctx, "readwrite", policyBytes) @@ -667,6 +718,12 @@ func (s *TestSuiteIAM) TestCannedPolicies(c *check) { c.Fatalf("policy info err: %v", err) } + // Check that policy with comma is rejected. + err = s.adm.AddCannedPolicy(ctx, "invalid,policy", policyBytes) + if err == nil { + c.Fatalf("invalid policy created successfully") + } + infoStr := string(info) if !strings.Contains(infoStr, `"s3:PutObject"`) || !strings.Contains(infoStr, ":"+bucket+"/") { c.Fatalf("policy contains unexpected content!") @@ -684,22 +741,30 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { } policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -726,9 +791,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { c.mustNotListObjects(ctx, uClient, bucket) // 3. Associate policy to group and check user got access. - err = s.adm.SetPolicy(ctx, policy, group, true) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + Group: group, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // 3.1 check user has access to bucket c.mustListObjects(ctx, uClient, bucket) @@ -743,8 +811,9 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) { if err != nil { c.Fatalf("group list err: %v", err) } - if !set.CreateStringSet(groups...).Contains(group) { - c.Fatalf("created group not present!") + expected := []string{group} + if !slices.Equal(groups, expected) { + c.Fatalf("expected group listing: %v, got: %v", expected, groups) } groupInfo, err := s.adm.GetGroupDescription(ctx, group) if err != nil { @@ -844,22 +913,30 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -871,9 +948,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // Create an madmin client with user creds @@ -917,7 +997,7 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -931,16 +1011,24 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) { { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -952,9 +1040,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // Create an madmin client with user creds @@ -1004,22 +1095,30 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", "s3:ListBucket" ], + "Resource": [ + "arn:aws:s3:::%s" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject" + ], "Resource": [ "arn:aws:s3:::%s/*" ] } ] -}`, bucket)) +}`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1031,9 +1130,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("unable to attach policy: %v", err) } // 1. Create a service account for the user @@ -1149,6 +1251,108 @@ func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug(c *check) { } } +func (s *TestSuiteIAM) TestServiceAccountPrivilegeEscalationBug2_2025_10_15(c *check, forRoot bool) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + for i := range 3 { + err := s.client.MakeBucket(ctx, fmt.Sprintf("bucket%d", i+1), minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + defer func(i int) { + _ = s.client.RemoveBucket(ctx, fmt.Sprintf("bucket%d", i+1)) + }(i) + } + + allow2BucketsPolicyBytes := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListBucket1AndBucket2", + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": ["arn:aws:s3:::bucket1", "arn:aws:s3:::bucket2"] + }, + { + "Sid": "ReadWriteBucket1AndBucket2Objects", + "Effect": "Allow", + "Action": [ + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:PutObject" + ], + "Resource": ["arn:aws:s3:::bucket1/*", "arn:aws:s3:::bucket2/*"] + } + ] +}`) + + if forRoot { + // Create a service account for the root user. + _, err := s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ + Policy: allow2BucketsPolicyBytes, + AccessKey: "restricted", + SecretKey: "restricted123", + }) + if err != nil { + c.Fatalf("could not create service account") + } + defer func() { + _ = s.adm.DeleteServiceAccount(ctx, "restricted") + }() + } else { + // Create a regular user and attach consoleAdmin policy + err := s.adm.AddUser(ctx, "foobar", "foobar123") + if err != nil { + c.Fatalf("could not create user") + } + + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{"consoleAdmin"}, + User: "foobar", + }) + if err != nil { + c.Fatalf("could not attach policy") + } + + // Create a service account for the regular user. + _, err = s.adm.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ + Policy: allow2BucketsPolicyBytes, + TargetUser: "foobar", + AccessKey: "restricted", + SecretKey: "restricted123", + }) + if err != nil { + c.Fatalf("could not create service account: %v", err) + } + defer func() { + _ = s.adm.DeleteServiceAccount(ctx, "restricted") + _ = s.adm.RemoveUser(ctx, "foobar") + }() + } + restrictedClient := s.getUserClient(c, "restricted", "restricted123", "") + + buckets, err := restrictedClient.ListBuckets(ctx) + if err != nil { + c.Fatalf("err fetching buckets %s", err) + } + if len(buckets) != 2 || buckets[0].Name != "bucket1" || buckets[1].Name != "bucket2" { + c.Fatalf("restricted service account should only have access to bucket1 and bucket2") + } + + // Try to escalate privileges + restrictedAdmClient := s.getAdminClient(c, "restricted", "restricted123", "") + _, err = restrictedAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ + AccessKey: "newroot", + SecretKey: "newroot123", + }) + if err == nil { + c.Fatalf("restricted service account was able to create service account bypassing sub-policy!") + } +} + func (s *TestSuiteIAM) SetUpAccMgmtPlugin(c *check) { ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) defer cancel() @@ -1267,7 +1471,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) { svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1281,7 +1485,7 @@ func (s *TestSuiteIAM) TestAccMgmtPlugin(c *check) { ] } ] -}`, bucket)) +}`, bucket) cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: accessKey, @@ -1458,7 +1662,7 @@ func (c *check) mustDownload(ctx context.Context, client *minio.Client, bucket s func (c *check) mustUploadReturnVersions(ctx context.Context, client *minio.Client, bucket string) []string { c.Helper() versions := []string{} - for i := 0; i < 5; i++ { + for range 5 { ui, err := client.PutObject(ctx, bucket, "some-object", bytes.NewBuffer([]byte("stuff")), 5, minio.PutObjectOptions{}) if err != nil { c.Fatalf("upload did not succeed got %#v", err) @@ -1527,7 +1731,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1541,7 +1745,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit ] } ] -}`, bucket)) +}`, bucket) cr, err := madmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: accessKey, @@ -1555,7 +1759,7 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit c.mustNotListObjects(ctx, svcClient, bucket) // This policy allows listing objects. - newPolicyBytes := []byte(fmt.Sprintf(`{ + newPolicyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1564,11 +1768,11 @@ func (c *check) assertSvcAccSessionPolicyUpdate(ctx context.Context, s *TestSuit "s3:ListBucket" ], "Resource": [ - "arn:aws:s3:::%s/*" + "arn:aws:s3:::%s" ] } ] -}`, bucket)) +}`, bucket) err = madmClient.UpdateServiceAccount(ctx, svcAK, madmin.UpdateServiceAccountReq{ NewPolicy: newPolicyBytes, }) diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index bded0edcd5114..0dcad4cc7624f 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -36,6 +36,7 @@ import ( "net/url" "os" "path" + "path/filepath" "regexp" "runtime" "sort" @@ -48,7 +49,9 @@ import ( "github.com/klauspost/compress/zip" "github.com/minio/madmin-go/v3" "github.com/minio/madmin-go/v3/estream" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio-go/v7/pkg/set" + "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/dsync" "github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/handlers" @@ -57,9 +60,8 @@ import ( "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/logger/message/log" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/policy" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/policy" "github.com/secure-io/sio-go" "github.com/zeebo/xxh3" ) @@ -91,11 +93,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R return } - if globalInplaceUpdateDisabled || currentReleaseTime.IsZero() { + if globalInplaceUpdateDisabled { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) return } + if currentReleaseTime.IsZero() || currentReleaseTime.Equal(timeSentinel) { + apiErr := errorCodes.ToAPIErr(ErrMethodNotAllowed) + apiErr.Description = fmt.Sprintf("unable to perform in-place update, release time is unrecognized: %s", currentReleaseTime) + writeErrorResponseJSON(ctx, w, apiErr, r.URL) + return + } + vars := mux.Vars(r) updateURL := vars["updateURL"] dryRun := r.Form.Get("dry-run") == "true" @@ -108,6 +117,11 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R } } + local := globalLocalNodeName + if local == "" { + local = "127.0.0.1" + } + u, err := url.Parse(updateURL) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -126,25 +140,48 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R return } + updateStatus := madmin.ServerUpdateStatusV2{ + DryRun: dryRun, + Results: make([]madmin.ServerPeerUpdateStatus, 0, len(globalNotificationSys.allPeerClients)), + } + peerResults := make(map[string]madmin.ServerPeerUpdateStatus, len(globalNotificationSys.allPeerClients)) + failedClients := make(map[int]bool, len(globalNotificationSys.allPeerClients)) + + if lrTime.Sub(currentReleaseTime) <= 0 { + updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{ + Host: local, + Err: fmt.Sprintf("server is running the latest version: %s", Version), + CurrentVersion: Version, + }) + + for _, client := range globalNotificationSys.peerClients { + updateStatus.Results = append(updateStatus.Results, madmin.ServerPeerUpdateStatus{ + Host: client.String(), + Err: fmt.Sprintf("server is running the latest version: %s", Version), + CurrentVersion: Version, + }) + } + + // Marshal API response + jsonBytes, err := json.Marshal(updateStatus) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, jsonBytes) + return + } + u.Path = path.Dir(u.Path) + SlashSeparator + releaseInfo // Download Binary Once binC, bin, err := downloadBinary(u, mode) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - updateStatus := madmin.ServerUpdateStatusV2{DryRun: dryRun} - peerResults := make(map[string]madmin.ServerPeerUpdateStatus) - - local := globalLocalNodeName - if local == "" { - local = "127.0.0.1" - } - - failedClients := make(map[int]struct{}) - if globalIsDistErasure { // Push binary to other servers for idx, nerr := range globalNotificationSys.VerifyBinary(ctx, u, sha256Sum, releaseInfo, binC) { @@ -154,7 +191,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R Err: nerr.Err.Error(), CurrentVersion: Version, } - failedClients[idx] = struct{}{} + failedClients[idx] = true } else { peerResults[nerr.Host.String()] = madmin.ServerPeerUpdateStatus{ Host: nerr.Host.String(), @@ -165,25 +202,17 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R } } - if lrTime.Sub(currentReleaseTime) > 0 { - if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil { - peerResults[local] = madmin.ServerPeerUpdateStatus{ - Host: local, - Err: err.Error(), - CurrentVersion: Version, - } - } else { - peerResults[local] = madmin.ServerPeerUpdateStatus{ - Host: local, - CurrentVersion: Version, - UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout), - } + if err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)); err != nil { + peerResults[local] = madmin.ServerPeerUpdateStatus{ + Host: local, + Err: err.Error(), + CurrentVersion: Version, } } else { peerResults[local] = madmin.ServerPeerUpdateStatus{ Host: local, - Err: fmt.Sprintf("server is already running the latest version: %s", Version), CurrentVersion: Version, + UpdatedVersion: lrTime.Format(MinioReleaseTagTimeLayout), } } @@ -191,8 +220,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R if globalIsDistErasure { ng := WithNPeers(len(globalNotificationSys.peerClients)) for idx, client := range globalNotificationSys.peerClients { - _, ok := failedClients[idx] - if ok { + if failedClients[idx] { continue } client := client @@ -235,17 +263,18 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R if globalIsDistErasure { // Notify all other MinIO peers signal service. + startTime := time.Now().Add(restartUpdateDelay) ng := WithNPeers(len(globalNotificationSys.peerClients)) for idx, client := range globalNotificationSys.peerClients { - _, ok := failedClients[idx] - if ok { + if failedClients[idx] { continue } client := client ng.Go(ctx, func() error { prs, ok := peerResults[client.String()] - if ok && prs.CurrentVersion != prs.UpdatedVersion && prs.UpdatedVersion != "" { - return client.SignalService(serviceRestart, "", dryRun) + // We restart only on success, not for any failures. + if ok && prs.Err == "" { + return client.SignalService(serviceRestart, "", dryRun, &startTime) } return nil }, idx, *client.host) @@ -281,7 +310,9 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R writeSuccessResponseJSON(w, jsonBytes) if !dryRun { - if lrTime.Sub(currentReleaseTime) > 0 { + prs, ok := peerResults[local] + // We restart only on success, not for any failures. + if ok && prs.Err == "" { globalServiceSignalCh <- serviceRestart } } @@ -354,7 +385,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req // Download Binary Once binC, bin, err := downloadBinary(u, mode) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -368,7 +399,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req StatusCode: http.StatusInternalServerError, } logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -376,7 +407,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -389,7 +420,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req StatusCode: http.StatusInternalServerError, } logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -397,7 +428,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req err = commitBinary() if err != nil { - logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err)) + adminLogIf(ctx, fmt.Errorf("server update failed with %w", err)) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -420,7 +451,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req for _, nerr := range globalNotificationSys.SignalService(serviceRestart) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) } } @@ -451,7 +482,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) case madmin.ServiceActionUnfreeze: serviceSig = serviceUnFreeze default: - logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) + adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) return } @@ -473,7 +504,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request) for _, nerr := range globalNotificationSys.SignalService(serviceSig) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) } } @@ -534,15 +565,18 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques case madmin.ServiceActionUnfreeze: serviceSig = serviceUnFreeze default: - logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) + adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind) writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) return } var objectAPI ObjectLayer + var execAt *time.Time switch serviceSig { case serviceRestart: objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceRestartAdminAction) + t := time.Now().Add(restartUpdateDelay) + execAt = &t case serviceStop: objectAPI, _ = validateAdminReq(ctx, w, r, policy.ServiceStopAdminAction) case serviceFreeze, serviceUnFreeze: @@ -569,7 +603,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques } if globalIsDistErasure { - for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun) { + for _, nerr := range globalNotificationSys.SignalServiceV2(serviceSig, dryRun, execAt) { if nerr.Err != nil && process { waitingDrives := map[string]madmin.DiskMetrics{} jerr := json.Unmarshal([]byte(nerr.Err.Error()), &waitingDrives) @@ -795,7 +829,7 @@ func (a adminAPIHandlers) MetricsHandler(w http.ResponseWriter, r *http.Request) } // Flush before waiting for next... - w.(http.Flusher).Flush() + xhttp.Flush(w) select { case <-ticker.C: @@ -842,9 +876,10 @@ func (a adminAPIHandlers) DataUsageInfoHandler(w http.ResponseWriter, r *http.Re } func lriToLockEntry(l lockRequesterInfo, now time.Time, resource, server string) *madmin.LockEntry { + t := time.Unix(0, l.Timestamp) entry := &madmin.LockEntry{ - Timestamp: l.Timestamp, - Elapsed: now.Sub(l.Timestamp), + Timestamp: t, + Elapsed: now.Sub(t), Resource: resource, ServerList: []string{server}, Source: l.Source, @@ -919,7 +954,7 @@ func (a adminAPIHandlers) ForceUnlockHandler(w http.ResponseWriter, r *http.Requ var args dsync.LockArgs var lockers []dsync.NetLocker - for _, path := range strings.Split(vars["paths"], ",") { + for path := range strings.SplitSeq(vars["paths"], ",") { if path == "" { continue } @@ -1029,7 +1064,7 @@ func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.R // Start profiling on remote servers. var hostErrs []NotificationPeerErr for _, profiler := range profiles { - hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(profiler)...) + hostErrs = append(hostErrs, globalNotificationSys.StartProfiling(ctx, profiler)...) // Start profiling locally as well. prof, err := startProfiler(profiler) @@ -1110,7 +1145,11 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) // Start profiling on remote servers. for _, profiler := range profiles { - globalNotificationSys.StartProfiling(profiler) + // Limit start time to max 10s. + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + globalNotificationSys.StartProfiling(ctx, profiler) + // StartProfiling blocks, so we can cancel now. + cancel() // Start profiling locally as well. prof, err := startProfiler(profiler) @@ -1125,6 +1164,10 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request) for { select { case <-ctx.Done(): + // Stop remote profiles + go globalNotificationSys.DownloadProfilingData(GlobalContext, io.Discard) + + // Stop local globalProfilerMu.Lock() defer globalProfilerMu.Unlock() for k, v := range globalProfiler { @@ -1150,7 +1193,7 @@ type dummyFileInfo struct { mode os.FileMode modTime time.Time isDir bool - sys interface{} + sys any } func (f dummyFileInfo) Name() string { return f.name } @@ -1158,7 +1201,7 @@ func (f dummyFileInfo) Size() int64 { return f.size } func (f dummyFileInfo) Mode() os.FileMode { return f.mode } func (f dummyFileInfo) ModTime() time.Time { return f.modTime } func (f dummyFileInfo) IsDir() bool { return f.isDir } -func (f dummyFileInfo) Sys() interface{} { return f.sys } +func (f dummyFileInfo) Sys() any { return f.sys } // DownloadProfilingHandler - POST /minio/admin/v3/profiling/download // ---------- @@ -1200,17 +1243,17 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read if hip.objPrefix != "" { // Bucket is required if object-prefix is given err = ErrHealMissingBucket - return + return hip, err } } else if isReservedOrInvalidBucket(hip.bucket, false) { err = ErrInvalidBucketName - return + return hip, err } // empty prefix is valid. if !IsValidObjectPrefix(hip.objPrefix) { err = ErrInvalidObjectName - return + return hip, err } if len(qParams[mgmtClientToken]) > 0 { @@ -1232,21 +1275,21 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read if (hip.forceStart && hip.forceStop) || (hip.clientToken != "" && (hip.forceStart || hip.forceStop)) { err = ErrInvalidRequest - return + return hip, err } // ignore body if clientToken is provided if hip.clientToken == "" { jerr := json.NewDecoder(r).Decode(&hip.hs) if jerr != nil { - logger.LogIf(GlobalContext, jerr, logger.ErrorKind) + adminLogIf(GlobalContext, jerr, logger.ErrorKind) err = ErrRequestBodyParse - return + return hip, err } } err = ErrNone - return + return hip, err } // HealHandler - POST /minio/admin/v3/heal/ @@ -1277,7 +1320,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { } // Analyze the heal token and route the request accordingly - token, success := proxyRequestByToken(ctx, w, r, hip.clientToken) + token, _, success := proxyRequestByToken(ctx, w, r, hip.clientToken, false) if success { return } @@ -1316,7 +1359,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case hr := <-respCh: switch hr.apiErr { case noError: @@ -1324,7 +1367,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write(hr.respBytes); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } else { writeSuccessResponseJSON(w, hr.respBytes) } @@ -1351,7 +1394,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write(errorRespJSON); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } break forLoop } @@ -1364,7 +1407,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { if exists && !nh.hasEnded() && len(nh.currentStatus.Items) > 0 { clientToken := nh.clientToken if globalIsDistErasure { - clientToken = fmt.Sprintf("%s:%d", nh.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints)) + clientToken = fmt.Sprintf("%s%s%d", nh.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints)) } b, err := json.Marshal(madmin.HealStartSuccess{ ClientToken: clientToken, @@ -1429,11 +1472,11 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi if globalIsDistErasure { // Get heal status from other peers - peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus() + peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx) var errCount int for _, nerr := range nerrs { if nerr.Err != nil { - logger.LogIf(ctx, nerr.Err) + adminLogIf(ctx, nerr.Err) errCount++ } } @@ -1561,21 +1604,21 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request) if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } } totalRx += n if err != nil || ctx.Err() != nil || totalRx > 100*humanize.GiByte { break } - } w.WriteHeader(http.StatusOK) } // NetperfHandler - perform mesh style network throughput test func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction) if objectAPI == nil { @@ -1596,6 +1639,15 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) ctx = lkctx.Context() defer nsLock.Unlock(lkctx) + // Freeze all incoming S3 API calls before running speedtest. + globalNotificationSys.ServiceFreeze(ctx, true) + + // Unfreeze as soon as request context is canceled or when the function returns. + go func() { + <-ctx.Done() + globalNotificationSys.ServiceFreeze(ctx, false) + }() + durationStr := r.Form.Get(peerRESTDuration) duration, err := time.ParseDuration(durationStr) if err != nil { @@ -1616,18 +1668,73 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) } } +func isAllowedRWAccess(r *http.Request, cred auth.Credentials, bucketName string) (rd, wr bool) { + owner := cred.AccessKey == globalActiveCred.AccessKey + + // Set prefix value for "s3:prefix" policy conditionals. + r.Header.Set("prefix", "") + + // Set delimiter value for "s3:delimiter" policy conditionals. + r.Header.Set("delimiter", SlashSeparator) + + isAllowedAccess := func(bucketName string) (rd, wr bool) { + if globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.GetObjectAction, + BucketName: bucketName, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + ObjectName: "", + Claims: cred.Claims, + }) { + rd = true + } + + if globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.PutObjectAction, + BucketName: bucketName, + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + ObjectName: "", + Claims: cred.Claims, + }) { + wr = true + } + + return rd, wr + } + return isAllowedAccess(bucketName) +} + // ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and // GET operations on the server, supports auto tuning by default by automatically // increasing concurrency and stopping when we have reached the limits on the // system. func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() - objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction) + objectAPI, creds := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction) if objectAPI == nil { return } + if !globalAPIConfig.permitRootAccess() { + rd, wr := isAllowedRWAccess(r, creds, globalObjectPerfBucket) + if !rd || !wr { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{ + Code: "XMinioSpeedtestInsufficientPermissions", + Message: fmt.Sprintf("%s does not have read and write access to '%s' bucket", creds.AccessKey, + globalObjectPerfBucket), + StatusCode: http.StatusForbidden, + }), r.URL) + return + } + } + sizeStr := r.Form.Get(peerRESTSize) durationStr := r.Form.Get(peerRESTDuration) concurrentStr := r.Form.Get(peerRESTConcurrent) @@ -1636,6 +1743,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. autotune := r.Form.Get("autotune") == "true" noClear := r.Form.Get("noclear") == "true" enableSha256 := r.Form.Get("enableSha256") == "true" + enableMultipart := r.Form.Get("enableMultipart") == "true" size, err := strconv.Atoi(sizeStr) if err != nil { @@ -1691,8 +1799,11 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. // Freeze all incoming S3 API calls before running speedtest. globalNotificationSys.ServiceFreeze(ctx, true) - // unfreeze all incoming S3 API calls after speedtest. - defer globalNotificationSys.ServiceFreeze(ctx, false) + // Unfreeze as soon as request context is canceled or when the function returns. + go func() { + <-ctx.Done() + globalNotificationSys.ServiceFreeze(ctx, false) + }() keepAliveTicker := time.NewTicker(500 * time.Millisecond) defer keepAliveTicker.Stop() @@ -1706,6 +1817,8 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. storageClass: storageClass, bucketName: customBucket, enableSha256: enableSha256, + enableMultipart: enableMultipart, + creds: creds, }) var prevResult madmin.SpeedTestResult for { @@ -1726,7 +1839,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. return } } - w.(http.Flusher).Flush() + xhttp.Flush(w) case result, ok := <-ch: if !ok { return @@ -1735,7 +1848,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. return } prevResult = result - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -1792,7 +1905,8 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo, // DriveSpeedtestHandler - reports throughput of drives available in the cluster func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) { - ctx := r.Context() + ctx, cancel := context.WithCancel(r.Context()) + defer cancel() objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction) if objectAPI == nil { @@ -1802,8 +1916,11 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R // Freeze all incoming S3 API calls before running speedtest. globalNotificationSys.ServiceFreeze(ctx, true) - // unfreeze all incoming S3 API calls after speedtest. - defer globalNotificationSys.ServiceFreeze(ctx, false) + // Unfreeze as soon as request context is canceled or when the function returns. + go func() { + <-ctx.Done() + globalNotificationSys.ServiceFreeze(ctx, false) + }() serial := r.Form.Get("serial") == "true" blockSizeStr := r.Form.Get("blocksize") @@ -1840,7 +1957,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R if err := enc.Encode(madmin.DriveSpeedTestResult{}); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case result, ok := <-ch: if !ok { return @@ -1848,7 +1965,7 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R if err := enc.Encode(result); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -1905,7 +2022,7 @@ func extractTraceOptions(r *http.Request) (opts madmin.ServiceTraceOpts, err err opts.OS = true // Older mc - cannot deal with more types... } - return + return opts, err } // TraceHandler - POST /minio/admin/v3/trace @@ -1965,7 +2082,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { grid.PutByteBuffer(entry) if len(traceCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(traceCh) > 0 { @@ -1974,7 +2091,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -2066,7 +2183,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque grid.PutByteBuffer(log) if len(logCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-keepAliveTicker.C: if len(logCh) > 0 { @@ -2075,7 +2192,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } @@ -2096,14 +2213,16 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req return } - if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil { + if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{ + Name: r.Form.Get("key-id"), + }); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } writeSuccessResponseHeadersOnly(w) } -// KMSKeyStatusHandler - GET /minio/admin/v3/kms/status +// KMSStatusHandler - GET /minio/admin/v3/kms/status func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -2117,22 +2236,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques return } - stat, err := GlobalKMS.Stat(ctx) + stat, err := GlobalKMS.Status(ctx) if err != nil { writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) return } - - status := madmin.KMSStatus{ - Name: stat.Name, - DefaultKeyID: stat.DefaultKey, - Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)), - } - for _, endpoint := range stat.Endpoints { - status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS - } - - resp, err := json.Marshal(status) + resp, err := json.Marshal(stat) if err != nil { writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) return @@ -2154,15 +2263,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req return } - stat, err := GlobalKMS.Stat(ctx) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - keyID := r.Form.Get("key-id") if keyID == "" { - keyID = stat.DefaultKey + keyID = GlobalKMS.DefaultKey } response := madmin.KMSKeyStatus{ KeyID: keyID, @@ -2170,7 +2273,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation // 1. Generate a new key using the KMS. - key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext) + key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + Name: keyID, + AssociatedData: kmsContext, + }) if err != nil { response.EncryptionErr = err.Error() resp, err := json.Marshal(response) @@ -2183,7 +2289,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req } // 2. Verify that we can indeed decrypt the (encrypted) key - decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext) + decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{ + Name: key.KeyID, + Ciphertext: key.Ciphertext, + AssociatedData: kmsContext, + }) if err != nil { response.DecryptionErr = err.Error() resp, err := json.Marshal(response) @@ -2256,6 +2366,7 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int] } func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage { + const operationTimeout = 10 * time.Second ldap := madmin.LDAP{} if globalIAMSys.LDAPConfig.Enabled() { ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect() @@ -2277,7 +2388,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma notifyTarget := fetchLambdaInfo() local := getLocalServerProperty(globalEndpoints, r, metrics) - servers := globalNotificationSys.ServerInfo(metrics) + servers := globalNotificationSys.ServerInfo(ctx, metrics) servers = append(servers, local) var poolsInfo map[int]map[int]madmin.ErasureSetInfo @@ -2296,7 +2407,9 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma mode = madmin.ItemOnline // Load data usage - dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI) + ctx2, cancel := context.WithTimeout(ctx, operationTimeout) + dataUsageInfo, err := loadDataUsageFromBackend(ctx2, objectAPI) + cancel() if err == nil { buckets = madmin.Buckets{Count: dataUsageInfo.BucketsCount} objects = madmin.Objects{Count: dataUsageInfo.ObjectsTotalCount} @@ -2330,25 +2443,30 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma } if pools { - poolsInfo, _ = getPoolsInfo(ctx, allDisks) + ctx2, cancel := context.WithTimeout(ctx, operationTimeout) + poolsInfo, _ = getPoolsInfo(ctx2, allDisks) + cancel() } } domain := globalDomainNames services := madmin.Services{ - KMS: fetchKMSStatus(), - KMSStatus: fetchKMSStatusV2(ctx), LDAP: ldap, Logger: log, Audit: audit, Notifications: notifyTarget, } + { + ctx2, cancel := context.WithTimeout(ctx, operationTimeout) + services.KMSStatus = fetchKMSStatus(ctx2) + cancel() + } return madmin.InfoMessage{ Mode: string(mode), Domain: domain, - Region: globalSite.Region, - SQSARN: globalEventNotifier.GetARNList(false), + Region: globalSite.Region(), + SQSARN: globalEventNotifier.GetARNList(), DeploymentID: globalDeploymentID(), Buckets: buckets, Objects: objects, @@ -2375,7 +2493,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo { } client := &http.Client{ - Transport: globalHealthChkTransport, + Transport: globalRemoteTargetTransport, Timeout: 10 * time.Second, } @@ -2558,7 +2676,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur // disk metrics are already included under drive info of each server getRealtimeMetrics := func() *madmin.RealtimeMetrics { var m madmin.RealtimeMetrics - var types madmin.MetricType = madmin.MetricsAll &^ madmin.MetricsDisk + types := madmin.MetricsAll &^ madmin.MetricsDisk mLocal := collectLocalMetrics(types, collectMetricsOpts{}) m.Merge(&mLocal) cctx, cancel := context.WithTimeout(healthCtx, time.Second/2) @@ -2602,7 +2720,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur poolsArgs := re.ReplaceAllString(cmdLine, `$3`) var anonPools []string - if !(strings.Contains(poolsArgs, "{") && strings.Contains(poolsArgs, "}")) { + if !strings.Contains(poolsArgs, "{") || !strings.Contains(poolsArgs, "}") { // No ellipses pattern. Anonymize host name from every pool arg pools := strings.Fields(poolsArgs) anonPools = make([]string, len(pools)) @@ -2800,7 +2918,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID)) encodedErrorResponse := encodeResponse(errorResponse) healthInfo.Error = string(encodedErrorResponse) - logger.LogIf(ctx, enc.Encode(healthInfo)) + adminLogIf(ctx, enc.Encode(healthInfo)) } deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics. @@ -2844,13 +2962,13 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque } if len(healthInfoCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } case <-ticker.C: if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-healthCtx.Done(): return } @@ -2947,66 +3065,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus { return notify } -// fetchKMSStatus fetches KMS-related status information. -func fetchKMSStatus() madmin.KMS { - kmsStat := madmin.KMS{} +// fetchKMSStatus fetches KMS-related status information for all instances +func fetchKMSStatus(ctx context.Context) []madmin.KMS { if GlobalKMS == nil { - kmsStat.Status = "disabled" - return kmsStat - } - - stat, err := GlobalKMS.Stat(context.Background()) - if err != nil { - kmsStat.Status = string(madmin.ItemOffline) - return kmsStat - } - if len(stat.Endpoints) == 0 { - kmsStat.Status = stat.Name - return kmsStat + return []madmin.KMS{} } - kmsStat.Status = string(madmin.ItemOnline) - kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation - // 1. Generate a new key using the KMS. - key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext) + stat, err := GlobalKMS.Status(ctx) if err != nil { - kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err) - } else { - kmsStat.Encrypt = "success" - } - - // 2. Verify that we can indeed decrypt the (encrypted) key - decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext) - switch { - case err != nil: - kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err) - case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1: - kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key" - default: - kmsStat.Decrypt = "success" - } - return kmsStat -} - -// fetchKMSStatusV2 fetches KMS-related status information for all instances -func fetchKMSStatusV2(ctx context.Context) []madmin.KMS { - if GlobalKMS == nil { + kmsLogIf(ctx, err, "failed to fetch KMS status information") return []madmin.KMS{} } - results := GlobalKMS.Verify(ctx) - - stats := []madmin.KMS{} - for _, result := range results { + stats := make([]madmin.KMS, 0, len(stat.Endpoints)) + for endpoint, state := range stat.Endpoints { stats = append(stats, madmin.KMS{ - Status: result.Status, - Endpoint: result.Endpoint, - Encrypt: result.Encrypt, - Decrypt: result.Decrypt, - Version: result.Version, + Status: string(state), + Endpoint: endpoint, }) } - return stats } @@ -3017,7 +3094,7 @@ func targetStatus(ctx context.Context, h logger.Target) madmin.Status { return madmin.Status{Status: string(madmin.ItemOffline)} } -// fetchLoggerDetails return log info +// fetchLoggerInfo return log info func fetchLoggerInfo(ctx context.Context) ([]madmin.Logger, []madmin.Audit) { var loggerInfo []madmin.Logger var auditloggerInfo []madmin.Audit @@ -3113,7 +3190,7 @@ func getClusterMetaInfo(ctx context.Context) []byte { case ci := <-resultCh: out, err := json.MarshalIndent(ci, "", " ") if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return nil } return out @@ -3172,11 +3249,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) return } - file = strings.ReplaceAll(file, string(os.PathSeparator), "/") + file = filepath.ToSlash(file) // Reject attempts to traverse parent or absolute paths. - if strings.Contains(file, "..") || strings.Contains(volume, "..") { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + if hasBadPathComponent(volume) || hasBadPathComponent(file) { + writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL) return } @@ -3195,6 +3272,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ return } } + addErr := func(msg string) {} // Write a version for making *incompatible* changes. // The AdminClient will reject any version it does not know. @@ -3206,18 +3284,18 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey()) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } err = stream.AddKeyEncrypted(clusterKey) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } if b := getClusterMetaInfo(ctx); len(b) > 0 { w, err := stream.AddEncryptedStream("cluster.info", nil) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } w.Write(b) @@ -3226,14 +3304,19 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ // Add new key for inspect data. if err := stream.AddKeyEncrypted(publicKey); err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } encStream, err := stream.AddEncryptedStream("inspect.zip", nil) if err != nil { - logger.LogIf(ctx, stream.AddError(err.Error())) + bugLogIf(ctx, stream.AddError(err.Error())) return } + addErr = func(msg string) { + inspectZipW.Close() + encStream.Close() + stream.AddError(msg) + } defer encStream.Close() inspectZipW = zip.NewWriter(encStream) @@ -3244,7 +3327,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ // MUST use crypto/rand n, err := crand.Read(key[:]) if err != nil || n != len(key) { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -3258,7 +3341,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ stream, err := sio.AES_256_GCM.Stream(key[:]) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } // Zero nonce, we only use each key once, and 32 bytes is plenty. @@ -3272,7 +3355,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ defer inspectZipW.Close() if b := getClusterMetaInfo(ctx); len(b) > 0 { - logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600)) } } @@ -3300,32 +3383,20 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ sys: nil, }) if zerr != nil { - logger.LogIf(ctx, zerr) + bugLogIf(ctx, zerr) return nil } header.Method = zip.Deflate zwriter, zerr := inspectZipW.CreateHeader(header) if zerr != nil { - logger.LogIf(ctx, zerr) + bugLogIf(ctx, zerr) return nil } if _, err := io.Copy(zwriter, r); err != nil { - logger.LogIf(ctx, err) + adminLogIf(ctx, err) } return nil } - err := o.GetRawData(ctx, volume, file, rawDataFn) - if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) - } - - // save the format.json as part of inspect by default - if !(volume == minioMetaBucket && file == formatConfigFile) { - err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn) - } - if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) - } // save args passed to inspect command var sb bytes.Buffer @@ -3336,7 +3407,25 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ sb.WriteString(pool.CmdLine) } sb.WriteString("\n") - logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600)) + + err := o.GetRawData(ctx, volume, file, rawDataFn) + if err != nil { + if errors.Is(err, errFileNotFound) { + addErr("GetRawData: No files matched the given pattern") + return + } + embedFileInZip(inspectZipW, "GetRawData-err.txt", []byte(err.Error()), 0o600) + adminLogIf(ctx, err) + } + + // save the format.json as part of inspect by default + if volume != minioMetaBucket || file != formatConfigFile { + err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn) + } + if !errors.Is(err, errFileNotFound) { + adminLogIf(ctx, err) + } scheme := "https" if !globalIsTLS { @@ -3370,7 +3459,7 @@ function main() { } main "$@"`, scheme) - logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755)) + adminLogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755)) } func getSubnetAdminPublicKey() []byte { diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 8adec1df4e3c1..3f8b3482b5dfb 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -263,7 +263,7 @@ func buildAdminRequest(queryVal url.Values, method, path string, } func TestAdminServerInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() adminTestBed, err := prepareAdminErasureTestBed(ctx) @@ -402,7 +402,7 @@ func (b byResourceUID) Less(i, j int) bool { func TestTopLockEntries(t *testing.T) { locksHeld := make(map[string][]lockRequesterInfo) var owners []string - for i := 0; i < 4; i++ { + for i := range 4 { owners = append(owners, fmt.Sprintf("node-%d", i)) } @@ -410,7 +410,7 @@ func TestTopLockEntries(t *testing.T) { // request UID, but 10 different resource names associated with it. var lris []lockRequesterInfo uuid := mustGetUUID() - for i := 0; i < 10; i++ { + for i := range 10 { resource := fmt.Sprintf("bucket/delete-object-%d", i) lri := lockRequesterInfo{ Name: resource, @@ -425,7 +425,7 @@ func TestTopLockEntries(t *testing.T) { } // Add a few concurrent read locks to the mix - for i := 0; i < 50; i++ { + for i := range 50 { resource := fmt.Sprintf("bucket/get-object-%d", i) lri := lockRequesterInfo{ Name: resource, @@ -463,6 +463,7 @@ func TestTopLockEntries(t *testing.T) { Owner: lri.Owner, ID: lri.UID, Quorum: lri.Quorum, + Timestamp: time.Unix(0, lri.Timestamp), }) } diff --git a/cmd/admin-heal-ops.go b/cmd/admin-heal-ops.go index b1d48b3d12854..0b2976349761f 100644 --- a/cmd/admin-heal-ops.go +++ b/cmd/admin-heal-ops.go @@ -22,6 +22,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "net/http" "sort" "sync" @@ -63,8 +64,8 @@ const ( ) var ( - errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long") - errHealStopSignalled = fmt.Errorf("heal stop signaled") + errHealIdleTimeout = errors.New("healing results were not consumed for too long") + errHealStopSignalled = errors.New("heal stop signaled") errFnHealFromAPIErr = func(ctx context.Context, err error) error { apiErr := toAdminAPIErr(ctx, err) @@ -260,7 +261,7 @@ func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) { } else { clientToken := he.clientToken if globalIsDistErasure { - clientToken = fmt.Sprintf("%s:%d", he.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints)) + clientToken = fmt.Sprintf("%s%s%d", he.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints)) } hsp = madmin.HealStopSuccess{ @@ -329,12 +330,16 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay // Add heal state and start sequence ahs.healSeqMap[hpath] = h - // Launch top-level background heal go-routine - go h.healSequenceStart(objAPI) - clientToken := h.clientToken if globalIsDistErasure { - clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints)) + clientToken = fmt.Sprintf("%s%s%d", h.clientToken, getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints)) + } + + if h.clientToken == bgHealingUUID { + // For background heal do nothing, do not spawn an unnecessary goroutine. + } else { + // Launch top-level background heal go-routine + go h.healSequenceStart(objAPI) } b, err := json.Marshal(madmin.HealStartSuccess{ @@ -343,7 +348,7 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay StartTime: h.startTime, }) if err != nil { - logger.LogIf(h.ctx, err) + bugLogIf(h.ctx, err) return nil, toAdminAPIErr(h.ctx, err), "" } return b, noError, "" @@ -390,7 +395,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string, if err != nil { h.currentStatus.Items = nil - logger.LogIf(h.ctx, err) + bugLogIf(h.ctx, err) return nil, ErrInternalError } @@ -451,8 +456,8 @@ type healSequence struct { // Number of total items healed against item type healedItemsMap map[madmin.HealItemType]int64 - // Number of total items where healing failed against endpoint and drive state - healFailedItemsMap map[string]int64 + // Number of total items where healing failed against item type + healFailedItemsMap map[madmin.HealItemType]int64 // The time of the last scan/heal activity lastHealActivity time.Time @@ -493,7 +498,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string, ctx: ctx, scannedItemsMap: make(map[madmin.HealItemType]int64), healedItemsMap: make(map[madmin.HealItemType]int64), - healFailedItemsMap: make(map[string]int64), + healFailedItemsMap: make(map[madmin.HealItemType]int64), } } @@ -516,9 +521,7 @@ func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 { // Make a copy before returning the value retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap)) - for k, v := range h.scannedItemsMap { - retMap[k] = v - } + maps.Copy(retMap, h.scannedItemsMap) return retMap } @@ -530,28 +533,48 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 { // Make a copy before returning the value retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap)) - for k, v := range h.healedItemsMap { - retMap[k] = v - } + maps.Copy(retMap, h.healedItemsMap) return retMap } -// gethealFailedItemsMap - returns map of all items where heal failed against +// getHealFailedItemsMap - returns map of all items where heal failed against // drive endpoint and status -func (h *healSequence) gethealFailedItemsMap() map[string]int64 { +func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 { h.mutex.RLock() defer h.mutex.RUnlock() // Make a copy before returning the value - retMap := make(map[string]int64, len(h.healFailedItemsMap)) - for k, v := range h.healFailedItemsMap { - retMap[k] = v - } + retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap)) + maps.Copy(retMap, h.healFailedItemsMap) return retMap } +func (h *healSequence) countFailed(healType madmin.HealItemType) { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.healFailedItemsMap[healType]++ + h.lastHealActivity = UTCNow() +} + +func (h *healSequence) countScanned(healType madmin.HealItemType) { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.scannedItemsMap[healType]++ + h.lastHealActivity = UTCNow() +} + +func (h *healSequence) countHealed(healType madmin.HealItemType) { + h.mutex.Lock() + defer h.mutex.Unlock() + + h.healedItemsMap[healType]++ + h.lastHealActivity = UTCNow() +} + // isQuitting - determines if the heal sequence is quitting (due to an // external signal) func (h *healSequence) isQuitting() bool { @@ -704,10 +727,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem task.opts.ScanMode = madmin.HealNormalScan } - h.mutex.Lock() - h.scannedItemsMap[healType]++ - h.lastHealActivity = UTCNow() - h.mutex.Unlock() + h.countScanned(healType) if source.noWait { select { @@ -736,42 +756,40 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem return nil } + countOKDrives := func(drives []madmin.HealDriveInfo) (count int) { + for _, drive := range drives { + if drive.State == madmin.DriveStateOk { + count++ + } + } + return count + } + // task queued, now wait for the response. select { case res := <-task.respCh: + if res.err == nil { + h.countHealed(healType) + } else { + h.countFailed(healType) + } if !h.reportProgress { if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal return nil } - h.mutex.Lock() - defer h.mutex.Unlock() - - // Progress is not reported in case of background heal processing. - // Instead we increment relevant counter based on the heal result - // for prometheus reporting. - if res.err != nil { - for _, d := range res.result.After.Drives { - // For failed items we report the endpoint and drive state - // This will help users take corrective actions for drives - h.healFailedItemsMap[d.Endpoint+","+d.State]++ - } - } else { - // Only object type reported for successful healing - h.healedItemsMap[res.result.Type]++ - } - // Report caller of any failure return res.err } res.result.Type = healType if res.err != nil { - // Only report object error - if healType != madmin.HealItemObject { - return res.err - } res.result.Detail = res.err.Error() } + if res.result.ParityBlocks > 0 && res.result.DataBlocks > 0 && res.result.DataBlocks > res.result.ParityBlocks { + if got := countOKDrives(res.result.After.Drives); got < res.result.ParityBlocks { + res.result.Detail = fmt.Sprintf("quorum loss - expected %d minimum, got drive states in OK %d", res.result.ParityBlocks, got) + } + } return h.pushHealResultItem(res.result) case <-h.ctx.Done(): return nil @@ -783,18 +801,20 @@ func (h *healSequence) healDiskMeta(objAPI ObjectLayer) error { return h.healMinioSysMeta(objAPI, minioConfigPrefix)() } -func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error { +func (h *healSequence) healItems(objAPI ObjectLayer) error { if h.clientToken == bgHealingUUID { // For background heal do nothing. return nil } - if err := h.healDiskMeta(objAPI); err != nil { - return err + if h.bucket == "" { // heal internal meta only during a site-wide heal + if err := h.healDiskMeta(objAPI); err != nil { + return err + } } // Heal buckets and objects - return h.healBuckets(objAPI, bucketsOnly) + return h.healBuckets(objAPI) } // traverseAndHeal - traverses on-disk data and performs healing @@ -805,8 +825,7 @@ func (h *healSequence) healItems(objAPI ObjectLayer, bucketsOnly bool) error { // has to wait until a safe point is reached, such as between scanning // two objects. func (h *healSequence) traverseAndHeal(objAPI ObjectLayer) { - bucketsOnly := false // Heals buckets and objects also. - h.traverseAndHealDoneCh <- h.healItems(objAPI, bucketsOnly) + h.traverseAndHealDoneCh <- h.healItems(objAPI) xioutil.SafeClose(h.traverseAndHealDoneCh) } @@ -817,6 +836,7 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f // NOTE: Healing on meta is run regardless // of any bucket being selected, this is to ensure that // meta are always upto date and correct. + h.settings.Recursive = true return objAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket, object, versionID string, scanMode madmin.HealScanMode) error { if h.isQuitting() { return errHealStopSignalled @@ -833,14 +853,14 @@ func (h *healSequence) healMinioSysMeta(objAPI ObjectLayer, metaPrefix string) f } // healBuckets - check for all buckets heal or just particular bucket. -func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error { +func (h *healSequence) healBuckets(objAPI ObjectLayer) error { if h.isQuitting() { return errHealStopSignalled } // 1. If a bucket was specified, heal only the bucket. if h.bucket != "" { - return h.healBucket(objAPI, h.bucket, bucketsOnly) + return h.healBucket(objAPI, h.bucket, false) } buckets, err := objAPI.ListBuckets(h.ctx, BucketOptions{}) @@ -854,7 +874,7 @@ func (h *healSequence) healBuckets(objAPI ObjectLayer, bucketsOnly bool) error { }) for _, bucket := range buckets { - if err = h.healBucket(objAPI, bucket.Name, bucketsOnly); err != nil { + if err = h.healBucket(objAPI, bucket.Name, false); err != nil { return err } } @@ -872,16 +892,6 @@ func (h *healSequence) healBucket(objAPI ObjectLayer, bucket string, bucketsOnly return nil } - if !h.settings.Recursive { - if h.object != "" { - if err := h.healObject(bucket, h.object, "", h.settings.ScanMode); err != nil { - return err - } - } - - return nil - } - if err := objAPI.HealObjects(h.ctx, bucket, h.object, h.settings, h.healObject); err != nil { return errFnHealFromAPIErr(h.ctx, err) } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index b6e0fb2e175f0..d218e475e0bcd 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -60,7 +60,7 @@ const ( noObjLayerFlag ) -// Has checks if the the given flag is enabled in `h`. +// Has checks if the given flag is enabled in `h`. func (h hFlag) Has(flag hFlag) bool { // Use bitwise-AND and check if the result is non-zero. return h&flag != 0 @@ -159,14 +159,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // Info operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(adminMiddleware(adminAPI.ServerInfoHandler, traceAllFlag, noObjLayerFlag)) - adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceAllFlag)) + adminRouter.Methods(http.MethodGet, http.MethodPost).Path(adminVersion + "/inspect-data").HandlerFunc(adminMiddleware(adminAPI.InspectDataHandler, noGZFlag, traceHdrsS3HFlag)) // StorageInfo operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(adminMiddleware(adminAPI.StorageInfoHandler, traceAllFlag)) // DataUsageInfo operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(adminMiddleware(adminAPI.DataUsageInfoHandler, traceAllFlag)) // Metrics operation - adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceAllFlag)) + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/metrics").HandlerFunc(adminMiddleware(adminAPI.MetricsHandler, traceHdrsS3HFlag)) if globalIsDistErasure || globalIsErasure { // Heal operations @@ -193,9 +193,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // Profiling operations - deprecated API adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(adminMiddleware(adminAPI.StartProfilingHandler, traceAllFlag, noObjLayerFlag)). Queries("profilerType", "{profilerType:.*}") - adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceAllFlag, noObjLayerFlag)) + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(adminMiddleware(adminAPI.DownloadProfilingHandler, traceHdrsS3HFlag, noObjLayerFlag)) // Profiling operations - adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceAllFlag, noObjLayerFlag)) + adminRouter.Methods(http.MethodPost).Path(adminVersion + "/profile").HandlerFunc(adminMiddleware(adminAPI.ProfileHandler, traceHdrsS3HFlag, noObjLayerFlag)) // Config KV operations. if enableConfigOps { @@ -244,6 +244,10 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // STS accounts ops adminRouter.Methods(http.MethodGet).Path(adminVersion+"/temporary-account-info").HandlerFunc(adminMiddleware(adminAPI.TemporaryAccountInfo)).Queries("accessKey", "{accessKey:.*}") + // Access key (service account/STS) operations + adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-access-keys-bulk").HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysBulk)).Queries("listType", "{listType:.*}") + adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-access-key").HandlerFunc(adminMiddleware(adminAPI.InfoAccessKey)).Queries("accessKey", "{accessKey:.*}") + // Info policy IAM latest adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(adminMiddleware(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}") // List policies latest @@ -290,8 +294,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // Import IAM info adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam").HandlerFunc(adminMiddleware(adminAPI.ImportIAM, noGZFlag)) + adminRouter.Methods(http.MethodPut).Path(adminVersion + "/import-iam-v2").HandlerFunc(adminMiddleware(adminAPI.ImportIAMV2, noGZFlag)) - // IDentity Provider configuration APIs + // Identity Provider configuration APIs adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.AddIdentityProviderCfg)) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp-config/{type}/{name}").HandlerFunc(adminMiddleware(adminAPI.UpdateIdentityProviderCfg)) adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp-config/{type}").HandlerFunc(adminMiddleware(adminAPI.ListIdentityProviderCfg)) @@ -301,12 +306,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // LDAP specific service accounts ops adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP)) adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys"). - HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)). - Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}") + HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}") + adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk"). + HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}") // LDAP IAM operations adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities)) adminRouter.Methods(http.MethodPost).Path(adminVersion + "/idp/ldap/policy/{operation}").HandlerFunc(adminMiddleware(adminAPI.AttachDetachPolicyLDAP)) + + // OpenID specific service accounts ops + adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/openid/list-access-keys-bulk"). + HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysOpenIDBulk)).Queries("listType", "{listType:.*}") + // -- END IAM APIs -- // GetBucketQuotaConfig @@ -340,6 +351,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc( adminMiddleware(adminAPI.ListBatchJobs)) + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc( + adminMiddleware(adminAPI.BatchJobStatus)) + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc( adminMiddleware(adminAPI.DescribeBatchJob)) adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc( @@ -416,6 +430,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { // -- Health API -- adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo"). HandlerFunc(adminMiddleware(adminAPI.HealthInfoHandler)) + + // STS Revocation + adminRouter.Methods(http.MethodPost).Path(adminVersion + "/revoke-tokens/{userProvider}").HandlerFunc(adminMiddleware(adminAPI.RevokeTokens)) } // If none of the routes match add default error handler routes diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index 7e57cb652cc22..4a98f9ba6663e 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -18,7 +18,6 @@ package cmd import ( - "context" "math" "net/http" "os" @@ -31,7 +30,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" + xnet "github.com/minio/pkg/v3/net" ) // getLocalServerProperty - returns madmin.ServerProperties for only the @@ -65,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req if err := isServerResolvable(endpoint, 5*time.Second); err == nil { network[nodeName] = string(madmin.ItemOnline) } else { - network[nodeName] = string(madmin.ItemOffline) - // log once the error - logger.LogOnceIf(context.Background(), err, nodeName) + if xnet.IsNetworkOrHostDown(err, false) { + network[nodeName] = string(madmin.ItemOffline) + } else if xnet.IsNetworkOrHostDown(err, true) { + network[nodeName] = "connection attempt timedout" + } } } } diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index b86b15a204693..cc3bcb1c04420 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -32,6 +32,8 @@ type DeletedObject struct { DeleteMarkerMTime DeleteMarkerMTime `xml:"-"` // MinIO extensions to support delete marker replication ReplicationState ReplicationState `xml:"-"` + + found bool // the object was found during deletion } // DeleteMarkerMTime is an embedded type containing time.Time for XML marshal @@ -42,10 +44,10 @@ type DeleteMarkerMTime struct { // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (t DeleteMarkerMTime) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if t.Time.IsZero() { + if t.IsZero() { return nil } - return e.EncodeElement(t.Time.Format(time.RFC3339), startElement) + return e.EncodeElement(t.Format(time.RFC3339), startElement) } // ObjectV object version key/versionId @@ -67,7 +69,7 @@ type ObjectToDelete struct { ReplicateDecisionStr string `xml:"-"` } -// createBucketConfiguration container for bucket configuration request from client. +// createBucketLocationConfiguration container for bucket configuration request from client. // Used for parsing the location from the request body for Makebucket. type createBucketLocationConfiguration struct { XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"` diff --git a/cmd/api-errors.go b/cmd/api-errors.go index 6d8b1e8177352..6ccd5fab2c27a 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -28,7 +28,7 @@ import ( "strconv" "strings" - "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/minio/minio/internal/ioutil" "google.golang.org/api/googleapi" @@ -48,7 +48,7 @@ import ( levent "github.com/minio/minio/internal/config/lambda/event" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/hash" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // APIError structure @@ -56,19 +56,23 @@ type APIError struct { Code string Description string HTTPStatusCode int + ObjectSize string + RangeRequested string } // APIErrorResponse - error response format type APIErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - Key string `xml:"Key,omitempty" json:"Key,omitempty"` - BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` - Resource string - Region string `xml:"Region,omitempty" json:"Region,omitempty"` - RequestID string `xml:"RequestId" json:"RequestId"` - HostID string `xml:"HostId" json:"HostId"` + XMLName xml.Name `xml:"Error" json:"-"` + Code string + Message string + Key string `xml:"Key,omitempty" json:"Key,omitempty"` + BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"` + Resource string + Region string `xml:"Region,omitempty" json:"Region,omitempty"` + RequestID string `xml:"RequestId" json:"RequestId"` + HostID string `xml:"HostId" json:"HostId"` + ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"` + RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"` } // APIErrorCode type of error status. @@ -209,6 +213,10 @@ const ( ErrPolicyAlreadyAttached ErrPolicyNotAttached ErrExcessData + ErrPolicyInvalidName + ErrNoTokenRevokeType + ErrAdminOpenIDNotEnabled + ErrAdminNoSuchAccessKey // Add new error codes here. // SSE-S3/SSE-KMS related API errors @@ -263,6 +271,7 @@ const ( ErrInvalidResourceName ErrInvalidLifecycleQueryParameter ErrServerNotInitialized + ErrBucketMetadataNotInitialized ErrRequestTimedout ErrClientDisconnected ErrTooManyRequests @@ -278,9 +287,11 @@ const ( ErrMalformedJSON ErrAdminNoSuchUser ErrAdminNoSuchUserLDAPWarn + ErrAdminLDAPExpectedLoginName ErrAdminNoSuchGroup ErrAdminGroupNotEmpty ErrAdminGroupDisabled + ErrAdminInvalidGroupName ErrAdminNoSuchJob ErrAdminNoSuchPolicy ErrAdminPolicyChangeAlreadyApplied @@ -300,6 +311,7 @@ const ( ErrAdminConfigIDPCfgNameDoesNotExist ErrInsecureClientRequest ErrObjectTampered + ErrAdminLDAPNotEnabled // Site-Replication errors ErrSiteReplicationInvalidRequest @@ -418,6 +430,7 @@ const ( ErrAdminProfilerNotEnabled ErrInvalidDecompressedSize ErrAddUserInvalidArgument + ErrAddUserValidUTF ErrAdminResourceInvalidArgument ErrAdminAccountNotEligible ErrAccountNotEligible @@ -436,6 +449,8 @@ const ( ErrAdminNoAccessKey ErrAdminNoSecretKey + ErrIAMNotInitialized + apiErrCodeEnd // This is used only for the testing code ) @@ -449,9 +464,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError if err != nil { apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err) } - if globalSite.Region != "" { + if region := globalSite.Region(); region != "" { if errCode == ErrAuthorizationHeaderMalformed { - apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region) + apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region) return apiErr } } @@ -550,6 +565,16 @@ var errorCodes = errorCodeMap{ Description: "More data provided than indicated content length", HTTPStatusCode: http.StatusBadRequest, }, + ErrPolicyInvalidName: { + Code: "PolicyInvalidName", + Description: "Policy name may not contain comma", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAdminOpenIDNotEnabled: { + Code: "OpenIDNotEnabled", + Description: "No enabled OpenID Connect identity providers", + HTTPStatusCode: http.StatusBadRequest, + }, ErrPolicyTooLarge: { Code: "PolicyTooLarge", Description: "Policy exceeds the maximum allowed document size.", @@ -612,7 +637,7 @@ var errorCodes = errorCodeMap{ }, ErrMissingContentMD5: { Code: "MissingContentMD5", - Description: "Missing required header for this request: Content-Md5.", + Description: "Missing or invalid required header for this request: Content-Md5 or Amz-Content-Checksum", HTTPStatusCode: http.StatusBadRequest, }, ErrMissingSecurityHeader: { @@ -958,7 +983,7 @@ var errorCodes = errorCodeMap{ ErrReplicationRemoteConnectionError: { Code: "XMinioAdminReplicationRemoteConnectionError", Description: "Remote service connection error", - HTTPStatusCode: http.StatusNotFound, + HTTPStatusCode: http.StatusServiceUnavailable, }, ErrReplicationBandwidthLimitError: { Code: "XMinioAdminReplicationBandwidthLimitError", @@ -967,7 +992,7 @@ var errorCodes = errorCodeMap{ }, ErrReplicationNoExistingObjects: { Code: "XMinioReplicationNoExistingObjects", - Description: "No matching ExistingsObjects rule enabled", + Description: "No matching ExistingObjects rule enabled", HTTPStatusCode: http.StatusBadRequest, }, ErrRemoteTargetDenyAddError: { @@ -1247,6 +1272,16 @@ var errorCodes = errorCodeMap{ Description: "The security token included in the request is invalid", HTTPStatusCode: http.StatusForbidden, }, + ErrNoTokenRevokeType: { + Code: "InvalidArgument", + Description: "No token revoke type specified and one could not be inferred from the request", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAdminNoSuchAccessKey: { + Code: "XMinioAdminNoSuchAccessKey", + Description: "The specified access key does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, // S3 extensions. ErrContentSHA256Mismatch: { @@ -1293,7 +1328,17 @@ var errorCodes = errorCodeMap{ }, ErrServerNotInitialized: { Code: "XMinioServerNotInitialized", - Description: "Server not initialized, please try again.", + Description: "Server not initialized yet, please try again.", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrIAMNotInitialized: { + Code: "XMinioIAMNotInitialized", + Description: "IAM sub-system not initialized yet, please try again.", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrBucketMetadataNotInitialized: { + Code: "XMinioBucketMetadataNotInitialized", + Description: "Bucket metadata not initialized yet, please try again.", HTTPStatusCode: http.StatusServiceUnavailable, }, ErrMalformedJSON: { @@ -1465,8 +1510,8 @@ var errorCodes = errorCodeMap{ }, ErrTooManyRequests: { Code: "TooManyRequests", - Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate", - HTTPStatusCode: http.StatusServiceUnavailable, + Description: "Please reduce your request rate", + HTTPStatusCode: http.StatusTooManyRequests, }, ErrUnsupportedMetadata: { Code: "InvalidArgument", @@ -2079,7 +2124,26 @@ var errorCodes = errorCodeMap{ Description: "Invalid attribute name specified.", HTTPStatusCode: http.StatusBadRequest, }, - // Add your error structure here. + ErrAdminLDAPNotEnabled: { + Code: "XMinioLDAPNotEnabled", + Description: "LDAP is not enabled. LDAP must be enabled to make LDAP requests.", + HTTPStatusCode: http.StatusNotImplemented, + }, + ErrAdminLDAPExpectedLoginName: { + Code: "XMinioLDAPExpectedLoginName", + Description: "Expected LDAP short username but was given full DN.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAdminInvalidGroupName: { + Code: "XMinioInvalidGroupName", + Description: "The group name is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrAddUserValidUTF: { + Code: "XMinioInvalidUTF", + Description: "Invalid UTF-8 character detected.", + HTTPStatusCode: http.StatusBadRequest, + }, } // toAPIErrorCode - Converts embedded errors. Convenience @@ -2115,10 +2179,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrAdminNoSuchUserLDAPWarn case errNoSuchServiceAccount: apiErr = ErrAdminServiceAccountNotFound + case errNoSuchAccessKey: + apiErr = ErrAdminNoSuchAccessKey case errNoSuchGroup: apiErr = ErrAdminNoSuchGroup case errGroupNotEmpty: apiErr = ErrAdminGroupNotEmpty + case errGroupNameContainsReservedChars: + apiErr = ErrAdminInvalidGroupName case errNoSuchJob: apiErr = ErrAdminNoSuchJob case errNoPolicyToAttachOrDetach: @@ -2133,6 +2201,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrEntityTooSmall case errAuthentication: apiErr = ErrAccessDenied + case auth.ErrContainsReservedChars: + apiErr = ErrAdminInvalidAccessKey case auth.ErrInvalidAccessKeyLength: apiErr = ErrAdminInvalidAccessKey case auth.ErrInvalidSecretKeyLength: @@ -2200,6 +2270,12 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrInvalidMaxParts case ioutil.ErrOverread: apiErr = ErrExcessData + case errServerNotInitialized: + apiErr = ErrServerNotInitialized + case errBucketMetadataNotInitialized: + apiErr = ErrBucketMetadataNotInitialized + case hash.ErrInvalidChecksum: + apiErr = ErrInvalidChecksum } // Compression errors @@ -2391,10 +2467,9 @@ func toAPIError(ctx context.Context, err error) APIError { apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err)) switch apiErr.Code { case "NotImplemented": - desc := fmt.Sprintf("%s (%v)", apiErr.Description, err) apiErr = APIError{ Code: apiErr.Code, - Description: desc, + Description: fmt.Sprintf("%s (%v)", apiErr.Description, err), HTTPStatusCode: apiErr.HTTPStatusCode, } case "XMinioBackendDown": @@ -2406,12 +2481,24 @@ func toAPIError(ctx context.Context, err error) APIError { switch e := err.(type) { case kms.Error: apiErr = APIError{ - Description: e.Err.Error(), Code: e.APICode, - HTTPStatusCode: e.HTTPStatusCode, + Description: e.Err, + HTTPStatusCode: e.Code, } case batchReplicationJobError: - apiErr = APIError(e) + apiErr = APIError{ + Description: e.Description, + Code: e.Code, + HTTPStatusCode: e.HTTPStatusCode, + } + case InvalidRange: + apiErr = APIError{ + Code: "InvalidRange", + Description: e.Error(), + HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode, + ObjectSize: strconv.FormatInt(e.ResourceSize, 10), + RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd), + } case InvalidArgument: apiErr = APIError{ Code: "InvalidArgument", @@ -2490,11 +2577,11 @@ func toAPIError(ctx context.Context, err error) APIError { if len(e.Errors) >= 1 { apiErr.Code = e.Errors[0].Reason } - case azblob.StorageError: + case *azcore.ResponseError: apiErr = APIError{ - Code: string(e.ServiceCode()), + Code: e.ErrorCode, Description: e.Error(), - HTTPStatusCode: e.Response().StatusCode, + HTTPStatusCode: e.StatusCode, } // Add more other SDK related errors here if any in future. default: @@ -2519,7 +2606,7 @@ func toAPIError(ctx context.Context, err error) APIError { // Make sure to log the errors which we cannot translate // to a meaningful S3 API errors. This is added to aid in // debugging unexpected/unhandled errors. - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } return apiErr @@ -2533,18 +2620,20 @@ func getAPIError(code APIErrorCode) APIError { return errorCodes.ToAPIErr(ErrInternalError) } -// getErrorResponse gets in standard error and resource value and +// getAPIErrorResponse gets in standard error and resource value and // provides a encodable populated response values func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse { reqInfo := logger.GetReqInfo(ctx) return APIErrorResponse{ - Code: err.Code, - Message: err.Description, - BucketName: reqInfo.BucketName, - Key: reqInfo.ObjectName, - Resource: resource, - Region: globalSite.Region, - RequestID: requestID, - HostID: hostID, + Code: err.Code, + Message: err.Description, + BucketName: reqInfo.BucketName, + Key: reqInfo.ObjectName, + Resource: resource, + Region: globalSite.Region(), + RequestID: requestID, + HostID: hostID, + ActualObjectSize: err.ObjectSize, + RangeRequested: err.RangeRequested, } } diff --git a/cmd/api-errors_test.go b/cmd/api-errors_test.go index d57817397cd40..fda913b64d6f0 100644 --- a/cmd/api-errors_test.go +++ b/cmd/api-errors_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "errors" "testing" @@ -64,7 +63,7 @@ var toAPIErrorTests = []struct { } func TestAPIErrCode(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for i, testCase := range toAPIErrorTests { errCode := toAPIErrorCode(ctx, testCase.err) if errCode != testCase.errCode { diff --git a/cmd/api-headers.go b/cmd/api-headers.go index 3936227a33e14..c2ca23fbf8937 100644 --- a/cmd/api-headers.go +++ b/cmd/api-headers.go @@ -19,9 +19,11 @@ package cmd import ( "bytes" + "context" "encoding/json" "encoding/xml" "fmt" + "mime" "net/http" "strconv" "strings" @@ -30,7 +32,6 @@ import ( "github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" xxml "github.com/minio/xxml" ) @@ -54,7 +55,7 @@ func setCommonHeaders(w http.ResponseWriter) { // Set `x-amz-bucket-region` only if region is set on the server // by default minio uses an empty region. - if region := globalSite.Region; region != "" { + if region := globalSite.Region(); region != "" { w.Header().Set(xhttp.AmzBucketRegion, region) } w.Header().Set(xhttp.AcceptRanges, "bytes") @@ -64,11 +65,11 @@ func setCommonHeaders(w http.ResponseWriter) { } // Encodes the response headers into XML format. -func encodeResponse(response interface{}) []byte { +func encodeResponse(response any) []byte { var buf bytes.Buffer buf.WriteString(xml.Header) if err := xml.NewEncoder(&buf).Encode(response); err != nil { - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return nil } return buf.Bytes() @@ -82,18 +83,18 @@ func encodeResponse(response interface{}) []byte { // Do not use this function for anything other than ListObjects() // variants, please open a github discussion if you wish to use // this in other places. -func encodeResponseList(response interface{}) []byte { +func encodeResponseList(response any) []byte { var buf bytes.Buffer buf.WriteString(xxml.Header) if err := xxml.NewEncoder(&buf).Encode(response); err != nil { - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) return nil } return buf.Bytes() } // Encodes the response headers into JSON format. -func encodeResponseJSON(response interface{}) []byte { +func encodeResponseJSON(response any) []byte { var bytesBuffer bytes.Buffer e := json.NewEncoder(&bytesBuffer) e.Encode(response) @@ -108,7 +109,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) { } // Write object header -func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) { +func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) { // set common headers setCommonHeaders(w) @@ -136,7 +137,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp // Set tag count if object has tags if len(objInfo.UserTags) > 0 { tags, _ := tags.ParseObjectTags(objInfo.UserTags) - if tags.Count() > 0 { + if tags != nil && tags.Count() > 0 { w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())} if opts.Tagging { // This is MinIO only extension to return back tags along with the count. @@ -168,6 +169,32 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp if !stringsHasPrefixFold(k, userMetadataPrefix) { continue } + // check the doc https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html + // For metadata values like "ö", "ÄMÄZÕÑ S3", and "öha, das sollte eigentlich + // funktionieren", tested against a real AWS S3 bucket, S3 may encode incorrectly. For + // example, "ö" was encoded as =?UTF-8?B?w4PCtg==?=, producing invalid UTF-8 instead + // of =?UTF-8?B?w7Y=?=. This mirrors errors like the ä½ in another string. + // + // S3 uses B-encoding (Base64) for non-ASCII-heavy metadata and Q-encoding + // (quoted-printable) for mostly ASCII strings. Long strings are split at word + // boundaries to fit RFC 2047’s 75-character limit, ensuring HTTP parser + // compatibility. + // + // However, this splitting increases header size and can introduce errors, unlike Go’s + // mime package in MinIO, which correctly encodes strings with fixed B/Q encodings, + // avoiding S3’s heuristic-driven issues. + // + // For MinIO developers, decode S3 metadata with mime.WordDecoder, validate outputs, + // report encoding bugs to AWS, and use ASCII-only metadata to ensure reliable S3 API + // compatibility. + if needsMimeEncoding(v) { + // see https://github.com/golang/go/blob/release-branch.go1.24/src/net/mail/message.go#L325 + if strings.ContainsAny(v, "\"#$%&'(),.:;<>@[]^`{|}~") { + v = mime.BEncoding.Encode("UTF-8", v) + } else { + v = mime.QEncoding.Encode("UTF-8", v) + } + } w.Header()[strings.ToLower(k)] = []string{v} isSet = true break @@ -213,7 +240,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp if objInfo.IsRemote() { // Check if object is being restored. For more information on x-amz-restore header see // https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax - w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier} + w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)} } if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil { @@ -229,3 +256,14 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp return nil } + +// needsEncoding reports whether s contains any bytes that need to be encoded. +// see mime.needsEncoding +func needsMimeEncoding(s string) bool { + for _, b := range s { + if (b < ' ' || b > '~') && b != '\t' { + return true + } + } + return false +} diff --git a/cmd/api-headers_test.go b/cmd/api-headers_test.go index 3c3030f09bbb3..9db65460e9bc6 100644 --- a/cmd/api-headers_test.go +++ b/cmd/api-headers_test.go @@ -34,7 +34,8 @@ func TestNewRequestID(t *testing.T) { e = char // Ensure that it is alphanumeric, in this case, between 0-9 and A-Z. - if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) { + isAlnum := ('0' <= e && e <= '9') || ('A' <= e && e <= 'Z') + if !isAlnum { t.Fail() } } diff --git a/cmd/api-resources.go b/cmd/api-resources.go index b77d1a09c872e..11cf53ad674e5 100644 --- a/cmd/api-resources.go +++ b/cmd/api-resources.go @@ -31,7 +31,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, var err error if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { errCode = ErrInvalidMaxKeys - return + return prefix, marker, delimiter, maxkeys, encodingType, errCode } } else { maxkeys = maxObjectList @@ -41,7 +41,7 @@ func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, marker = values.Get("marker") delimiter = values.Get("delimiter") encodingType = values.Get("encoding-type") - return + return prefix, marker, delimiter, maxkeys, encodingType, errCode } func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) { @@ -51,7 +51,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit var err error if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { errCode = ErrInvalidMaxKeys - return + return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode } } else { maxkeys = maxObjectList @@ -62,7 +62,7 @@ func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimit delimiter = values.Get("delimiter") encodingType = values.Get("encoding-type") versionIDMarker = values.Get("version-id-marker") - return + return prefix, marker, delimiter, maxkeys, encodingType, versionIDMarker, errCode } // Parse bucket url queries for ListObjects V2. @@ -73,7 +73,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit if val, ok := values["continuation-token"]; ok { if len(val[0]) == 0 { errCode = ErrIncorrectContinuationToken - return + return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode } } @@ -81,7 +81,7 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit var err error if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil { errCode = ErrInvalidMaxKeys - return + return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode } } else { maxkeys = maxObjectList @@ -97,11 +97,11 @@ func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimit decodedToken, err := base64.StdEncoding.DecodeString(token) if err != nil { errCode = ErrIncorrectContinuationToken - return + return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode } token = string(decodedToken) } - return + return prefix, token, startAfter, delimiter, fetchOwner, maxkeys, encodingType, errCode } // Parse bucket url queries for ?uploads @@ -112,7 +112,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID var err error if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil { errCode = ErrInvalidMaxUploads - return + return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode } } else { maxUploads = maxUploadsList @@ -123,7 +123,7 @@ func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadID uploadIDMarker = values.Get("upload-id-marker") delimiter = values.Get("delimiter") encodingType = values.Get("encoding-type") - return + return prefix, keyMarker, uploadIDMarker, delimiter, maxUploads, encodingType, errCode } // Parse object url queries @@ -134,7 +134,7 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m if values.Get("max-parts") != "" { if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil { errCode = ErrInvalidMaxParts - return + return uploadID, partNumberMarker, maxParts, encodingType, errCode } } else { maxParts = maxPartsList @@ -143,11 +143,11 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m if values.Get("part-number-marker") != "" { if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil { errCode = ErrInvalidPartNumberMarker - return + return uploadID, partNumberMarker, maxParts, encodingType, errCode } } uploadID = values.Get("uploadId") encodingType = values.Get("encoding-type") - return + return uploadID, partNumberMarker, maxParts, encodingType, errCode } diff --git a/cmd/api-response.go b/cmd/api-response.go index fb2ce4f91f34e..8d58c6e4254ec 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -35,7 +35,7 @@ import ( "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" xxml "github.com/minio/xxml" ) @@ -166,10 +166,11 @@ type Part struct { Size int64 // Checksum values - ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` - ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` - ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` - ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` } // ListPartsResponse - format for list parts response. @@ -192,6 +193,8 @@ type ListPartsResponse struct { IsTruncated bool ChecksumAlgorithm string + ChecksumType string + // List of parts. Parts []Part `xml:"Part"` } @@ -413,10 +416,11 @@ type CompleteMultipartUploadResponse struct { Key string ETag string - ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` - ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` - ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` - ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC32 string `xml:"ChecksumCRC32,omitempty"` + ChecksumCRC32C string `xml:"ChecksumCRC32C,omitempty"` + ChecksumSHA1 string `xml:"ChecksumSHA1,omitempty"` + ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` } // DeleteError structure. @@ -516,7 +520,6 @@ func cleanReservedKeys(metadata map[string]string) map[string]string { } case crypto.SSEC: m[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES - } var toRemove []string @@ -544,7 +547,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string { } // generates an ListBucketVersions response for the said bucket with other enumerated options. -func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse { +func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse { versions := make([]ObjectVersion, 0, len(resp.Objects)) owner := &Owner{ @@ -573,7 +576,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim } content.Size = object.Size if object.StorageClass != "" { - content.StorageClass = object.StorageClass + content.StorageClass = filterStorageClass(ctx, object.StorageClass) } else { content.StorageClass = globalMinioDefaultStorageClass } @@ -593,8 +596,6 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim for k, v := range cleanReservedKeys(object.UserDefined) { content.UserMetadata.Set(k, v) } - - content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat)) content.Internal = &ObjectInternalInfo{ K: object.DataBlocks, M: object.ParityBlocks, @@ -634,7 +635,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim } // generates an ListObjectsV1 response for the said bucket with other enumerated options. -func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse { +func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse { contents := make([]Object, 0, len(resp.Objects)) owner := &Owner{ ID: globalMinioDefaultOwnerID, @@ -654,7 +655,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy } content.Size = object.Size if object.StorageClass != "" { - content.StorageClass = object.StorageClass + content.StorageClass = filterStorageClass(ctx, object.StorageClass) } else { content.StorageClass = globalMinioDefaultStorageClass } @@ -683,7 +684,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy } // generates an ListObjectsV2 response for the said bucket with other enumerated options. -func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response { +func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response { contents := make([]Object, 0, len(objects)) var owner *Owner if fetchOwner { @@ -707,7 +708,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, } content.Size = object.Size if object.StorageClass != "" { - content.StorageClass = object.StorageClass + content.StorageClass = filterStorageClass(ctx, object.StorageClass) } else { content.StorageClass = globalMinioDefaultStorageClass } @@ -729,7 +730,6 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, for k, v := range cleanReservedKeys(object.UserDefined) { content.UserMetadata.Set(k, v) } - content.UserMetadata.Set("expires", object.Expires.Format(http.TimeFormat)) content.Internal = &ObjectInternalInfo{ K: object.DataBlocks, M: object.ParityBlocks, @@ -789,18 +789,19 @@ func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) Initi } // generates CompleteMultipartUploadResponse for given bucket, key, location and ETag. -func generateCompleteMultpartUploadResponse(bucket, key, location string, oi ObjectInfo) CompleteMultipartUploadResponse { - cs := oi.decryptChecksums(0) +func generateCompleteMultipartUploadResponse(bucket, key, location string, oi ObjectInfo, h http.Header) CompleteMultipartUploadResponse { + cs, _ := oi.decryptChecksums(0, h) c := CompleteMultipartUploadResponse{ Location: location, Bucket: bucket, Key: key, // AWS S3 quotes the ETag in XML, make sure we are compatible here. - ETag: "\"" + oi.ETag + "\"", - ChecksumSHA1: cs[hash.ChecksumSHA1.String()], - ChecksumSHA256: cs[hash.ChecksumSHA256.String()], - ChecksumCRC32: cs[hash.ChecksumCRC32.String()], - ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()], + ETag: "\"" + oi.ETag + "\"", + ChecksumSHA1: cs[hash.ChecksumSHA1.String()], + ChecksumSHA256: cs[hash.ChecksumSHA256.String()], + ChecksumCRC32: cs[hash.ChecksumCRC32.String()], + ChecksumCRC32C: cs[hash.ChecksumCRC32C.String()], + ChecksumCRC64NVME: cs[hash.ChecksumCRC64NVME.String()], } return c } @@ -828,6 +829,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis listPartsResponse.IsTruncated = partsInfo.IsTruncated listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker listPartsResponse.ChecksumAlgorithm = partsInfo.ChecksumAlgorithm + listPartsResponse.ChecksumType = partsInfo.ChecksumType listPartsResponse.Parts = make([]Part, len(partsInfo.Parts)) for index, part := range partsInfo.Parts { @@ -840,6 +842,7 @@ func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) Lis newPart.ChecksumCRC32C = part.ChecksumCRC32C newPart.ChecksumSHA1 = part.ChecksumSHA1 newPart.ChecksumSHA256 = part.ChecksumSHA256 + newPart.ChecksumCRC64NVME = part.ChecksumCRC64NVME listPartsResponse.Parts[index] = newPart } return listPartsResponse @@ -886,12 +889,18 @@ func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, err } func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) { + // Don't write a response if one has already been written. + // Fixes https://github.com/minio/minio/issues/21633 + if headersAlreadyWritten(w) { + return + } + if statusCode == 0 { statusCode = 200 } // Similar check to http.checkWriteHeaderCode if statusCode < 100 || statusCode > 999 { - logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode)) + bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode)) statusCode = http.StatusInternalServerError } setCommonHeaders(w) @@ -946,22 +955,23 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) { // writeErrorResponse writes error headers func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) { - if err.HTTPStatusCode == http.StatusServiceUnavailable { - // Set retry-after header to indicate user-agents to retry request after 120secs. + switch err.HTTPStatusCode { + case http.StatusServiceUnavailable, http.StatusTooManyRequests: + // Set retry-after header to indicate user-agents to retry request after 60 seconds. // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After - w.Header().Set(xhttp.RetryAfter, "120") + w.Header().Set(xhttp.RetryAfter, "60") } switch err.Code { case "InvalidRegion": - err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region) + err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region()) case "AuthorizationHeaderMalformed": - err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region) + err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region()) } // Similar check to http.checkWriteHeaderCode if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 { - logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code)) + bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code)) err.HTTPStatusCode = http.StatusInternalServerError } @@ -1011,3 +1021,45 @@ func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, er encodedErrorResponse := encodeResponseJSON(errorResponse) writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON) } + +type unwrapper interface { + Unwrap() http.ResponseWriter +} + +// headersAlreadyWritten returns true if the headers have already been written +// to this response writer. It will unwrap the ResponseWriter if possible to try +// and find a trackingResponseWriter. +func headersAlreadyWritten(w http.ResponseWriter) bool { + for { + if trw, ok := w.(*trackingResponseWriter); ok { + return trw.headerWritten + } else if uw, ok := w.(unwrapper); ok { + w = uw.Unwrap() + } else { + return false + } + } +} + +// trackingResponseWriter wraps a ResponseWriter and notes when WriterHeader has +// been called. This allows high level request handlers to check if something +// has already sent the header. +type trackingResponseWriter struct { + http.ResponseWriter + headerWritten bool +} + +func (w *trackingResponseWriter) WriteHeader(statusCode int) { + if !w.headerWritten { + w.headerWritten = true + w.ResponseWriter.WriteHeader(statusCode) + } +} + +func (w *trackingResponseWriter) Write(b []byte) (int, error) { + return w.ResponseWriter.Write(b) +} + +func (w *trackingResponseWriter) Unwrap() http.ResponseWriter { + return w.ResponseWriter +} diff --git a/cmd/api-response_test.go b/cmd/api-response_test.go index 6736e5259bbc2..4f7891947887c 100644 --- a/cmd/api-response_test.go +++ b/cmd/api-response_test.go @@ -18,8 +18,12 @@ package cmd import ( + "io" "net/http" + "net/http/httptest" "testing" + + "github.com/klauspost/compress/gzhttp" ) // Tests object location. @@ -100,7 +104,6 @@ func TestObjectLocation(t *testing.T) { }, } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object) if testCase.expectedLocation != gotLocation { @@ -123,3 +126,89 @@ func TestGetURLScheme(t *testing.T) { t.Errorf("Expected %s, got %s", httpsScheme, gotScheme) } } + +func TestTrackingResponseWriter(t *testing.T) { + rw := httptest.NewRecorder() + trw := &trackingResponseWriter{ResponseWriter: rw} + trw.WriteHeader(123) + if !trw.headerWritten { + t.Fatal("headerWritten was not set by WriteHeader call") + } + + _, err := trw.Write([]byte("hello")) + if err != nil { + t.Fatalf("Write unexpectedly failed: %v", err) + } + + // Check that WriteHeader and Write were called on the underlying response writer + resp := rw.Result() + if resp.StatusCode != 123 { + t.Fatalf("unexpected status: %v", resp.StatusCode) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("reading response body failed: %v", err) + } + if string(body) != "hello" { + t.Fatalf("response body incorrect: %v", string(body)) + } + + // Check that Unwrap works + if trw.Unwrap() != rw { + t.Fatalf("Unwrap returned wrong result: %v", trw.Unwrap()) + } +} + +func TestHeadersAlreadyWritten(t *testing.T) { + rw := httptest.NewRecorder() + trw := &trackingResponseWriter{ResponseWriter: rw} + + if headersAlreadyWritten(trw) { + t.Fatal("headers have not been written yet") + } + + trw.WriteHeader(123) + if !headersAlreadyWritten(trw) { + t.Fatal("headers were written") + } +} + +func TestHeadersAlreadyWrittenWrapped(t *testing.T) { + rw := httptest.NewRecorder() + trw := &trackingResponseWriter{ResponseWriter: rw} + wrap1 := &gzhttp.NoGzipResponseWriter{ResponseWriter: trw} + wrap2 := &gzhttp.NoGzipResponseWriter{ResponseWriter: wrap1} + + if headersAlreadyWritten(wrap2) { + t.Fatal("headers have not been written yet") + } + + wrap2.WriteHeader(123) + if !headersAlreadyWritten(wrap2) { + t.Fatal("headers were written") + } +} + +func TestWriteResponseHeadersNotWritten(t *testing.T) { + rw := httptest.NewRecorder() + trw := &trackingResponseWriter{ResponseWriter: rw} + + writeResponse(trw, 299, []byte("hello"), "application/foo") + + resp := rw.Result() + if resp.StatusCode != 299 { + t.Fatal("response wasn't written") + } +} + +func TestWriteResponseHeadersWritten(t *testing.T) { + rw := httptest.NewRecorder() + rw.Code = -1 + trw := &trackingResponseWriter{ResponseWriter: rw, headerWritten: true} + + writeResponse(trw, 200, []byte("hello"), "application/foo") + + if rw.Code != -1 { + t.Fatalf("response was written when it shouldn't have been (Code=%v)", rw.Code) + } +} diff --git a/cmd/api-router.go b/cmd/api-router.go index 6943ed423cc1a..188dd854fdf3d 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -24,7 +24,7 @@ import ( consoleapi "github.com/minio/console/api" xhttp "github.com/minio/minio/internal/http" "github.com/minio/mux" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/wildcard" "github.com/rs/cors" ) @@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) { globalObjLayerMutex.Unlock() } -// objectAPIHandler implements and provides http handlers for S3 API. +// objectAPIHandlers implements and provides http handlers for S3 API. type objectAPIHandlers struct { ObjectAPI func() ObjectLayer } @@ -218,6 +218,8 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc { handlerName := getHandlerName(f, "objectAPIHandlers") var handler http.HandlerFunc = func(w http.ResponseWriter, r *http.Request) { + w = &trackingResponseWriter{ResponseWriter: w} + // Wrap the actual handler with the appropriate tracing middleware. var tracedHandler http.HandlerFunc if handlerFlags.has(traceHdrsS3HFlag) { @@ -227,13 +229,13 @@ func s3APIMiddleware(f http.HandlerFunc, flags ...s3HFlag) http.HandlerFunc { } // Skip wrapping with the gzip middleware if specified. - var gzippedHandler http.HandlerFunc = tracedHandler + gzippedHandler := tracedHandler if !handlerFlags.has(noGZS3HFlag) { gzippedHandler = gzipHandler(gzippedHandler) } // Skip wrapping with throttling middleware if specified. - var throttledHandler http.HandlerFunc = gzippedHandler + throttledHandler := gzippedHandler if !handlerFlags.has(noThrottleS3HFlag) { throttledHandler = maxClients(throttledHandler) } @@ -387,6 +389,11 @@ func registerAPIRouter(router *mux.Router) { HeadersRegexp(xhttp.AmzSnowballExtract, "true"). HandlerFunc(s3APIMiddleware(api.PutObjectExtractHandler, traceHdrsS3HFlag)) + // AppendObject to be rejected + router.Methods(http.MethodPut).Path("/{object:.+}"). + HeadersRegexp(xhttp.AmzWriteOffsetBytes, ""). + HandlerFunc(s3APIMiddleware(errorResponseHandler)) + // PutObject router.Methods(http.MethodPut).Path("/{object:.+}"). HandlerFunc(s3APIMiddleware(api.PutObjectHandler, traceHdrsS3HFlag)) @@ -436,7 +443,7 @@ func registerAPIRouter(router *mux.Router) { Queries("notification", "") // ListenNotification router.Methods(http.MethodGet). - HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)). + HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)). Queries("events", "{events:.*}") // ResetBucketReplicationStatus - MinIO extension API router.Methods(http.MethodGet). @@ -456,6 +463,14 @@ func registerAPIRouter(router *mux.Router) { router.Methods(http.MethodGet). HandlerFunc(s3APIMiddleware(api.GetBucketCorsHandler)). Queries("cors", "") + // PutBucketCors - this is a dummy call. + router.Methods(http.MethodPut). + HandlerFunc(s3APIMiddleware(api.PutBucketCorsHandler)). + Queries("cors", "") + // DeleteBucketCors - this is a dummy call. + router.Methods(http.MethodDelete). + HandlerFunc(s3APIMiddleware(api.DeleteBucketCorsHandler)). + Queries("cors", "") // GetBucketWebsiteHandler - this is a dummy call. router.Methods(http.MethodGet). HandlerFunc(s3APIMiddleware(api.GetBucketWebsiteHandler)). @@ -472,6 +487,7 @@ func registerAPIRouter(router *mux.Router) { router.Methods(http.MethodGet). HandlerFunc(s3APIMiddleware(api.GetBucketLoggingHandler)). Queries("logging", "") + // GetBucketTaggingHandler router.Methods(http.MethodGet). HandlerFunc(s3APIMiddleware(api.GetBucketTaggingHandler)). @@ -615,7 +631,7 @@ func registerAPIRouter(router *mux.Router) { // ListenNotification apiRouter.Methods(http.MethodGet).Path(SlashSeparator). - HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag)). + HandlerFunc(s3APIMiddleware(api.ListenNotificationHandler, noThrottleS3HFlag, traceHdrsS3HFlag)). Queries("events", "{events:.*}") // ListBuckets diff --git a/cmd/api-utils.go b/cmd/api-utils.go index ab191f067b770..ee8fa5335201f 100644 --- a/cmd/api-utils.go +++ b/cmd/api-utils.go @@ -43,7 +43,7 @@ func shouldEscape(c byte) bool { // - Force encoding of '~' func s3URLEncode(s string) string { spaceCount, hexCount := 0, 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { c := s[i] if shouldEscape(c) { if c == ' ' { @@ -70,7 +70,7 @@ func s3URLEncode(s string) string { if hexCount == 0 { copy(t, s) - for i := 0; i < len(s); i++ { + for i := range len(s) { if s[i] == ' ' { t[i] = '+' } @@ -79,7 +79,7 @@ func s3URLEncode(s string) string { } j := 0 - for i := 0; i < len(s); i++ { + for i := range len(s) { switch c := s[i]; { case c == ' ': t[j] = '+' diff --git a/cmd/apierrorcode_string.go b/cmd/apierrorcode_string.go index 34f184e71a947..d9973d8871254 100644 --- a/cmd/apierrorcode_string.go +++ b/cmd/apierrorcode_string.go @@ -139,202 +139,212 @@ func _() { _ = x[ErrPolicyAlreadyAttached-128] _ = x[ErrPolicyNotAttached-129] _ = x[ErrExcessData-130] - _ = x[ErrInvalidEncryptionMethod-131] - _ = x[ErrInvalidEncryptionKeyID-132] - _ = x[ErrInsecureSSECustomerRequest-133] - _ = x[ErrSSEMultipartEncrypted-134] - _ = x[ErrSSEEncryptedObject-135] - _ = x[ErrInvalidEncryptionParameters-136] - _ = x[ErrInvalidEncryptionParametersSSEC-137] - _ = x[ErrInvalidSSECustomerAlgorithm-138] - _ = x[ErrInvalidSSECustomerKey-139] - _ = x[ErrMissingSSECustomerKey-140] - _ = x[ErrMissingSSECustomerKeyMD5-141] - _ = x[ErrSSECustomerKeyMD5Mismatch-142] - _ = x[ErrInvalidSSECustomerParameters-143] - _ = x[ErrIncompatibleEncryptionMethod-144] - _ = x[ErrKMSNotConfigured-145] - _ = x[ErrKMSKeyNotFoundException-146] - _ = x[ErrKMSDefaultKeyAlreadyConfigured-147] - _ = x[ErrNoAccessKey-148] - _ = x[ErrInvalidToken-149] - _ = x[ErrEventNotification-150] - _ = x[ErrARNNotification-151] - _ = x[ErrRegionNotification-152] - _ = x[ErrOverlappingFilterNotification-153] - _ = x[ErrFilterNameInvalid-154] - _ = x[ErrFilterNamePrefix-155] - _ = x[ErrFilterNameSuffix-156] - _ = x[ErrFilterValueInvalid-157] - _ = x[ErrOverlappingConfigs-158] - _ = x[ErrUnsupportedNotification-159] - _ = x[ErrContentSHA256Mismatch-160] - _ = x[ErrContentChecksumMismatch-161] - _ = x[ErrStorageFull-162] - _ = x[ErrRequestBodyParse-163] - _ = x[ErrObjectExistsAsDirectory-164] - _ = x[ErrInvalidObjectName-165] - _ = x[ErrInvalidObjectNamePrefixSlash-166] - _ = x[ErrInvalidResourceName-167] - _ = x[ErrInvalidLifecycleQueryParameter-168] - _ = x[ErrServerNotInitialized-169] - _ = x[ErrRequestTimedout-170] - _ = x[ErrClientDisconnected-171] - _ = x[ErrTooManyRequests-172] - _ = x[ErrInvalidRequest-173] - _ = x[ErrTransitionStorageClassNotFoundError-174] - _ = x[ErrInvalidStorageClass-175] - _ = x[ErrBackendDown-176] - _ = x[ErrMalformedJSON-177] - _ = x[ErrAdminNoSuchUser-178] - _ = x[ErrAdminNoSuchUserLDAPWarn-179] - _ = x[ErrAdminNoSuchGroup-180] - _ = x[ErrAdminGroupNotEmpty-181] - _ = x[ErrAdminGroupDisabled-182] - _ = x[ErrAdminNoSuchJob-183] - _ = x[ErrAdminNoSuchPolicy-184] - _ = x[ErrAdminPolicyChangeAlreadyApplied-185] - _ = x[ErrAdminInvalidArgument-186] - _ = x[ErrAdminInvalidAccessKey-187] - _ = x[ErrAdminInvalidSecretKey-188] - _ = x[ErrAdminConfigNoQuorum-189] - _ = x[ErrAdminConfigTooLarge-190] - _ = x[ErrAdminConfigBadJSON-191] - _ = x[ErrAdminNoSuchConfigTarget-192] - _ = x[ErrAdminConfigEnvOverridden-193] - _ = x[ErrAdminConfigDuplicateKeys-194] - _ = x[ErrAdminConfigInvalidIDPType-195] - _ = x[ErrAdminConfigLDAPNonDefaultConfigName-196] - _ = x[ErrAdminConfigLDAPValidation-197] - _ = x[ErrAdminConfigIDPCfgNameAlreadyExists-198] - _ = x[ErrAdminConfigIDPCfgNameDoesNotExist-199] - _ = x[ErrInsecureClientRequest-200] - _ = x[ErrObjectTampered-201] - _ = x[ErrSiteReplicationInvalidRequest-202] - _ = x[ErrSiteReplicationPeerResp-203] - _ = x[ErrSiteReplicationBackendIssue-204] - _ = x[ErrSiteReplicationServiceAccountError-205] - _ = x[ErrSiteReplicationBucketConfigError-206] - _ = x[ErrSiteReplicationBucketMetaError-207] - _ = x[ErrSiteReplicationIAMError-208] - _ = x[ErrSiteReplicationConfigMissing-209] - _ = x[ErrSiteReplicationIAMConfigMismatch-210] - _ = x[ErrAdminRebalanceAlreadyStarted-211] - _ = x[ErrAdminRebalanceNotStarted-212] - _ = x[ErrAdminBucketQuotaExceeded-213] - _ = x[ErrAdminNoSuchQuotaConfiguration-214] - _ = x[ErrHealNotImplemented-215] - _ = x[ErrHealNoSuchProcess-216] - _ = x[ErrHealInvalidClientToken-217] - _ = x[ErrHealMissingBucket-218] - _ = x[ErrHealAlreadyRunning-219] - _ = x[ErrHealOverlappingPaths-220] - _ = x[ErrIncorrectContinuationToken-221] - _ = x[ErrEmptyRequestBody-222] - _ = x[ErrUnsupportedFunction-223] - _ = x[ErrInvalidExpressionType-224] - _ = x[ErrBusy-225] - _ = x[ErrUnauthorizedAccess-226] - _ = x[ErrExpressionTooLong-227] - _ = x[ErrIllegalSQLFunctionArgument-228] - _ = x[ErrInvalidKeyPath-229] - _ = x[ErrInvalidCompressionFormat-230] - _ = x[ErrInvalidFileHeaderInfo-231] - _ = x[ErrInvalidJSONType-232] - _ = x[ErrInvalidQuoteFields-233] - _ = x[ErrInvalidRequestParameter-234] - _ = x[ErrInvalidDataType-235] - _ = x[ErrInvalidTextEncoding-236] - _ = x[ErrInvalidDataSource-237] - _ = x[ErrInvalidTableAlias-238] - _ = x[ErrMissingRequiredParameter-239] - _ = x[ErrObjectSerializationConflict-240] - _ = x[ErrUnsupportedSQLOperation-241] - _ = x[ErrUnsupportedSQLStructure-242] - _ = x[ErrUnsupportedSyntax-243] - _ = x[ErrUnsupportedRangeHeader-244] - _ = x[ErrLexerInvalidChar-245] - _ = x[ErrLexerInvalidOperator-246] - _ = x[ErrLexerInvalidLiteral-247] - _ = x[ErrLexerInvalidIONLiteral-248] - _ = x[ErrParseExpectedDatePart-249] - _ = x[ErrParseExpectedKeyword-250] - _ = x[ErrParseExpectedTokenType-251] - _ = x[ErrParseExpected2TokenTypes-252] - _ = x[ErrParseExpectedNumber-253] - _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-254] - _ = x[ErrParseExpectedTypeName-255] - _ = x[ErrParseExpectedWhenClause-256] - _ = x[ErrParseUnsupportedToken-257] - _ = x[ErrParseUnsupportedLiteralsGroupBy-258] - _ = x[ErrParseExpectedMember-259] - _ = x[ErrParseUnsupportedSelect-260] - _ = x[ErrParseUnsupportedCase-261] - _ = x[ErrParseUnsupportedCaseClause-262] - _ = x[ErrParseUnsupportedAlias-263] - _ = x[ErrParseUnsupportedSyntax-264] - _ = x[ErrParseUnknownOperator-265] - _ = x[ErrParseMissingIdentAfterAt-266] - _ = x[ErrParseUnexpectedOperator-267] - _ = x[ErrParseUnexpectedTerm-268] - _ = x[ErrParseUnexpectedToken-269] - _ = x[ErrParseUnexpectedKeyword-270] - _ = x[ErrParseExpectedExpression-271] - _ = x[ErrParseExpectedLeftParenAfterCast-272] - _ = x[ErrParseExpectedLeftParenValueConstructor-273] - _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-274] - _ = x[ErrParseExpectedArgumentDelimiter-275] - _ = x[ErrParseCastArity-276] - _ = x[ErrParseInvalidTypeParam-277] - _ = x[ErrParseEmptySelect-278] - _ = x[ErrParseSelectMissingFrom-279] - _ = x[ErrParseExpectedIdentForGroupName-280] - _ = x[ErrParseExpectedIdentForAlias-281] - _ = x[ErrParseUnsupportedCallWithStar-282] - _ = x[ErrParseNonUnaryAggregateFunctionCall-283] - _ = x[ErrParseMalformedJoin-284] - _ = x[ErrParseExpectedIdentForAt-285] - _ = x[ErrParseAsteriskIsNotAloneInSelectList-286] - _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-287] - _ = x[ErrParseInvalidContextForWildcardInSelectList-288] - _ = x[ErrIncorrectSQLFunctionArgumentType-289] - _ = x[ErrValueParseFailure-290] - _ = x[ErrEvaluatorInvalidArguments-291] - _ = x[ErrIntegerOverflow-292] - _ = x[ErrLikeInvalidInputs-293] - _ = x[ErrCastFailed-294] - _ = x[ErrInvalidCast-295] - _ = x[ErrEvaluatorInvalidTimestampFormatPattern-296] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-297] - _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-298] - _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-299] - _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-300] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-301] - _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-302] - _ = x[ErrEvaluatorBindingDoesNotExist-303] - _ = x[ErrMissingHeaders-304] - _ = x[ErrInvalidColumnIndex-305] - _ = x[ErrAdminConfigNotificationTargetsFailed-306] - _ = x[ErrAdminProfilerNotEnabled-307] - _ = x[ErrInvalidDecompressedSize-308] - _ = x[ErrAddUserInvalidArgument-309] - _ = x[ErrAdminResourceInvalidArgument-310] - _ = x[ErrAdminAccountNotEligible-311] - _ = x[ErrAccountNotEligible-312] - _ = x[ErrAdminServiceAccountNotFound-313] - _ = x[ErrPostPolicyConditionInvalidFormat-314] - _ = x[ErrInvalidChecksum-315] - _ = x[ErrLambdaARNInvalid-316] - _ = x[ErrLambdaARNNotFound-317] - _ = x[ErrInvalidAttributeName-318] - _ = x[ErrAdminNoAccessKey-319] - _ = x[ErrAdminNoSecretKey-320] - _ = x[apiErrCodeEnd-321] + _ = x[ErrPolicyInvalidName-131] + _ = x[ErrNoTokenRevokeType-132] + _ = x[ErrAdminOpenIDNotEnabled-133] + _ = x[ErrAdminNoSuchAccessKey-134] + _ = x[ErrInvalidEncryptionMethod-135] + _ = x[ErrInvalidEncryptionKeyID-136] + _ = x[ErrInsecureSSECustomerRequest-137] + _ = x[ErrSSEMultipartEncrypted-138] + _ = x[ErrSSEEncryptedObject-139] + _ = x[ErrInvalidEncryptionParameters-140] + _ = x[ErrInvalidEncryptionParametersSSEC-141] + _ = x[ErrInvalidSSECustomerAlgorithm-142] + _ = x[ErrInvalidSSECustomerKey-143] + _ = x[ErrMissingSSECustomerKey-144] + _ = x[ErrMissingSSECustomerKeyMD5-145] + _ = x[ErrSSECustomerKeyMD5Mismatch-146] + _ = x[ErrInvalidSSECustomerParameters-147] + _ = x[ErrIncompatibleEncryptionMethod-148] + _ = x[ErrKMSNotConfigured-149] + _ = x[ErrKMSKeyNotFoundException-150] + _ = x[ErrKMSDefaultKeyAlreadyConfigured-151] + _ = x[ErrNoAccessKey-152] + _ = x[ErrInvalidToken-153] + _ = x[ErrEventNotification-154] + _ = x[ErrARNNotification-155] + _ = x[ErrRegionNotification-156] + _ = x[ErrOverlappingFilterNotification-157] + _ = x[ErrFilterNameInvalid-158] + _ = x[ErrFilterNamePrefix-159] + _ = x[ErrFilterNameSuffix-160] + _ = x[ErrFilterValueInvalid-161] + _ = x[ErrOverlappingConfigs-162] + _ = x[ErrUnsupportedNotification-163] + _ = x[ErrContentSHA256Mismatch-164] + _ = x[ErrContentChecksumMismatch-165] + _ = x[ErrStorageFull-166] + _ = x[ErrRequestBodyParse-167] + _ = x[ErrObjectExistsAsDirectory-168] + _ = x[ErrInvalidObjectName-169] + _ = x[ErrInvalidObjectNamePrefixSlash-170] + _ = x[ErrInvalidResourceName-171] + _ = x[ErrInvalidLifecycleQueryParameter-172] + _ = x[ErrServerNotInitialized-173] + _ = x[ErrBucketMetadataNotInitialized-174] + _ = x[ErrRequestTimedout-175] + _ = x[ErrClientDisconnected-176] + _ = x[ErrTooManyRequests-177] + _ = x[ErrInvalidRequest-178] + _ = x[ErrTransitionStorageClassNotFoundError-179] + _ = x[ErrInvalidStorageClass-180] + _ = x[ErrBackendDown-181] + _ = x[ErrMalformedJSON-182] + _ = x[ErrAdminNoSuchUser-183] + _ = x[ErrAdminNoSuchUserLDAPWarn-184] + _ = x[ErrAdminLDAPExpectedLoginName-185] + _ = x[ErrAdminNoSuchGroup-186] + _ = x[ErrAdminGroupNotEmpty-187] + _ = x[ErrAdminGroupDisabled-188] + _ = x[ErrAdminInvalidGroupName-189] + _ = x[ErrAdminNoSuchJob-190] + _ = x[ErrAdminNoSuchPolicy-191] + _ = x[ErrAdminPolicyChangeAlreadyApplied-192] + _ = x[ErrAdminInvalidArgument-193] + _ = x[ErrAdminInvalidAccessKey-194] + _ = x[ErrAdminInvalidSecretKey-195] + _ = x[ErrAdminConfigNoQuorum-196] + _ = x[ErrAdminConfigTooLarge-197] + _ = x[ErrAdminConfigBadJSON-198] + _ = x[ErrAdminNoSuchConfigTarget-199] + _ = x[ErrAdminConfigEnvOverridden-200] + _ = x[ErrAdminConfigDuplicateKeys-201] + _ = x[ErrAdminConfigInvalidIDPType-202] + _ = x[ErrAdminConfigLDAPNonDefaultConfigName-203] + _ = x[ErrAdminConfigLDAPValidation-204] + _ = x[ErrAdminConfigIDPCfgNameAlreadyExists-205] + _ = x[ErrAdminConfigIDPCfgNameDoesNotExist-206] + _ = x[ErrInsecureClientRequest-207] + _ = x[ErrObjectTampered-208] + _ = x[ErrAdminLDAPNotEnabled-209] + _ = x[ErrSiteReplicationInvalidRequest-210] + _ = x[ErrSiteReplicationPeerResp-211] + _ = x[ErrSiteReplicationBackendIssue-212] + _ = x[ErrSiteReplicationServiceAccountError-213] + _ = x[ErrSiteReplicationBucketConfigError-214] + _ = x[ErrSiteReplicationBucketMetaError-215] + _ = x[ErrSiteReplicationIAMError-216] + _ = x[ErrSiteReplicationConfigMissing-217] + _ = x[ErrSiteReplicationIAMConfigMismatch-218] + _ = x[ErrAdminRebalanceAlreadyStarted-219] + _ = x[ErrAdminRebalanceNotStarted-220] + _ = x[ErrAdminBucketQuotaExceeded-221] + _ = x[ErrAdminNoSuchQuotaConfiguration-222] + _ = x[ErrHealNotImplemented-223] + _ = x[ErrHealNoSuchProcess-224] + _ = x[ErrHealInvalidClientToken-225] + _ = x[ErrHealMissingBucket-226] + _ = x[ErrHealAlreadyRunning-227] + _ = x[ErrHealOverlappingPaths-228] + _ = x[ErrIncorrectContinuationToken-229] + _ = x[ErrEmptyRequestBody-230] + _ = x[ErrUnsupportedFunction-231] + _ = x[ErrInvalidExpressionType-232] + _ = x[ErrBusy-233] + _ = x[ErrUnauthorizedAccess-234] + _ = x[ErrExpressionTooLong-235] + _ = x[ErrIllegalSQLFunctionArgument-236] + _ = x[ErrInvalidKeyPath-237] + _ = x[ErrInvalidCompressionFormat-238] + _ = x[ErrInvalidFileHeaderInfo-239] + _ = x[ErrInvalidJSONType-240] + _ = x[ErrInvalidQuoteFields-241] + _ = x[ErrInvalidRequestParameter-242] + _ = x[ErrInvalidDataType-243] + _ = x[ErrInvalidTextEncoding-244] + _ = x[ErrInvalidDataSource-245] + _ = x[ErrInvalidTableAlias-246] + _ = x[ErrMissingRequiredParameter-247] + _ = x[ErrObjectSerializationConflict-248] + _ = x[ErrUnsupportedSQLOperation-249] + _ = x[ErrUnsupportedSQLStructure-250] + _ = x[ErrUnsupportedSyntax-251] + _ = x[ErrUnsupportedRangeHeader-252] + _ = x[ErrLexerInvalidChar-253] + _ = x[ErrLexerInvalidOperator-254] + _ = x[ErrLexerInvalidLiteral-255] + _ = x[ErrLexerInvalidIONLiteral-256] + _ = x[ErrParseExpectedDatePart-257] + _ = x[ErrParseExpectedKeyword-258] + _ = x[ErrParseExpectedTokenType-259] + _ = x[ErrParseExpected2TokenTypes-260] + _ = x[ErrParseExpectedNumber-261] + _ = x[ErrParseExpectedRightParenBuiltinFunctionCall-262] + _ = x[ErrParseExpectedTypeName-263] + _ = x[ErrParseExpectedWhenClause-264] + _ = x[ErrParseUnsupportedToken-265] + _ = x[ErrParseUnsupportedLiteralsGroupBy-266] + _ = x[ErrParseExpectedMember-267] + _ = x[ErrParseUnsupportedSelect-268] + _ = x[ErrParseUnsupportedCase-269] + _ = x[ErrParseUnsupportedCaseClause-270] + _ = x[ErrParseUnsupportedAlias-271] + _ = x[ErrParseUnsupportedSyntax-272] + _ = x[ErrParseUnknownOperator-273] + _ = x[ErrParseMissingIdentAfterAt-274] + _ = x[ErrParseUnexpectedOperator-275] + _ = x[ErrParseUnexpectedTerm-276] + _ = x[ErrParseUnexpectedToken-277] + _ = x[ErrParseUnexpectedKeyword-278] + _ = x[ErrParseExpectedExpression-279] + _ = x[ErrParseExpectedLeftParenAfterCast-280] + _ = x[ErrParseExpectedLeftParenValueConstructor-281] + _ = x[ErrParseExpectedLeftParenBuiltinFunctionCall-282] + _ = x[ErrParseExpectedArgumentDelimiter-283] + _ = x[ErrParseCastArity-284] + _ = x[ErrParseInvalidTypeParam-285] + _ = x[ErrParseEmptySelect-286] + _ = x[ErrParseSelectMissingFrom-287] + _ = x[ErrParseExpectedIdentForGroupName-288] + _ = x[ErrParseExpectedIdentForAlias-289] + _ = x[ErrParseUnsupportedCallWithStar-290] + _ = x[ErrParseNonUnaryAggregateFunctionCall-291] + _ = x[ErrParseMalformedJoin-292] + _ = x[ErrParseExpectedIdentForAt-293] + _ = x[ErrParseAsteriskIsNotAloneInSelectList-294] + _ = x[ErrParseCannotMixSqbAndWildcardInSelectList-295] + _ = x[ErrParseInvalidContextForWildcardInSelectList-296] + _ = x[ErrIncorrectSQLFunctionArgumentType-297] + _ = x[ErrValueParseFailure-298] + _ = x[ErrEvaluatorInvalidArguments-299] + _ = x[ErrIntegerOverflow-300] + _ = x[ErrLikeInvalidInputs-301] + _ = x[ErrCastFailed-302] + _ = x[ErrInvalidCast-303] + _ = x[ErrEvaluatorInvalidTimestampFormatPattern-304] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing-305] + _ = x[ErrEvaluatorTimestampFormatPatternDuplicateFields-306] + _ = x[ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch-307] + _ = x[ErrEvaluatorUnterminatedTimestampFormatPatternToken-308] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternToken-309] + _ = x[ErrEvaluatorInvalidTimestampFormatPatternSymbol-310] + _ = x[ErrEvaluatorBindingDoesNotExist-311] + _ = x[ErrMissingHeaders-312] + _ = x[ErrInvalidColumnIndex-313] + _ = x[ErrAdminConfigNotificationTargetsFailed-314] + _ = x[ErrAdminProfilerNotEnabled-315] + _ = x[ErrInvalidDecompressedSize-316] + _ = x[ErrAddUserInvalidArgument-317] + _ = x[ErrAddUserValidUTF-318] + _ = x[ErrAdminResourceInvalidArgument-319] + _ = x[ErrAdminAccountNotEligible-320] + _ = x[ErrAccountNotEligible-321] + _ = x[ErrAdminServiceAccountNotFound-322] + _ = x[ErrPostPolicyConditionInvalidFormat-323] + _ = x[ErrInvalidChecksum-324] + _ = x[ErrLambdaARNInvalid-325] + _ = x[ErrLambdaARNNotFound-326] + _ = x[ErrInvalidAttributeName-327] + _ = x[ErrAdminNoAccessKey-328] + _ = x[ErrAdminNoSecretKey-329] + _ = x[ErrIAMNotInitialized-330] + _ = x[apiErrCodeEnd-331] } -const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDAccessKeyDisabledInvalidArgumentInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationInvalidLifecycleWithObjectLockNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationDenyEditErrorRemoteTargetDenyAddErrorReplicationNoExistingObjectsReplicationValidationErrorReplicationPermissionCheckErrorObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderMissingPartAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledPolicyInvalidVersionMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsMetadataTooLargeUnsupportedMetadataUnsupportedHostHeaderMaximumExpiresSlowDownReadSlowDownWriteMaxVersionsExceededInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectivePolicyAlreadyAttachedPolicyNotAttachedExcessDataInvalidEncryptionMethodInvalidEncryptionKeyIDInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidEncryptionParametersSSECInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredKMSKeyNotFoundExceptionKMSDefaultKeyAlreadyConfiguredNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchContentChecksumMismatchStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameInvalidLifecycleQueryParameterServerNotInitializedRequestTimedoutClientDisconnectedTooManyRequestsInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchUserLDAPWarnAdminNoSuchGroupAdminGroupNotEmptyAdminGroupDisabledAdminNoSuchJobAdminNoSuchPolicyAdminPolicyChangeAlreadyAppliedAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminNoSuchConfigTargetAdminConfigEnvOverriddenAdminConfigDuplicateKeysAdminConfigInvalidIDPTypeAdminConfigLDAPNonDefaultConfigNameAdminConfigLDAPValidationAdminConfigIDPCfgNameAlreadyExistsAdminConfigIDPCfgNameDoesNotExistInsecureClientRequestObjectTamperedSiteReplicationInvalidRequestSiteReplicationPeerRespSiteReplicationBackendIssueSiteReplicationServiceAccountErrorSiteReplicationBucketConfigErrorSiteReplicationBucketMetaErrorSiteReplicationIAMErrorSiteReplicationConfigMissingSiteReplicationIAMConfigMismatchAdminRebalanceAlreadyStartedAdminRebalanceNotStartedAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAggregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAdminResourceInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormatInvalidChecksumLambdaARNInvalidLambdaARNNotFoundInvalidAttributeNameAdminNoAccessKeyAdminNoSecretKeyapiErrCodeEnd" +const _APIErrorCode_name = "NoneAccessDeniedBadDigestEntityTooSmallEntityTooLargePolicyTooLargeIncompleteBodyInternalErrorInvalidAccessKeyIDAccessKeyDisabledInvalidArgumentInvalidBucketNameInvalidDigestInvalidRangeInvalidRangePartNumberInvalidCopyPartRangeInvalidCopyPartRangeSourceInvalidMaxKeysInvalidEncodingMethodInvalidMaxUploadsInvalidMaxPartsInvalidPartNumberMarkerInvalidPartNumberInvalidRequestBodyInvalidCopySourceInvalidMetadataDirectiveInvalidCopyDestInvalidPolicyDocumentInvalidObjectStateMalformedXMLMissingContentLengthMissingContentMD5MissingRequestBodyErrorMissingSecurityHeaderNoSuchBucketNoSuchBucketPolicyNoSuchBucketLifecycleNoSuchLifecycleConfigurationInvalidLifecycleWithObjectLockNoSuchBucketSSEConfigNoSuchCORSConfigurationNoSuchWebsiteConfigurationReplicationConfigurationNotFoundErrorRemoteDestinationNotFoundErrorReplicationDestinationMissingLockRemoteTargetNotFoundErrorReplicationRemoteConnectionErrorReplicationBandwidthLimitErrorBucketRemoteIdenticalToSourceBucketRemoteAlreadyExistsBucketRemoteLabelInUseBucketRemoteArnTypeInvalidBucketRemoteArnInvalidBucketRemoteRemoveDisallowedRemoteTargetNotVersionedErrorReplicationSourceNotVersionedErrorReplicationNeedsVersioningErrorReplicationBucketNeedsVersioningErrorReplicationDenyEditErrorRemoteTargetDenyAddErrorReplicationNoExistingObjectsReplicationValidationErrorReplicationPermissionCheckErrorObjectRestoreAlreadyInProgressNoSuchKeyNoSuchUploadInvalidVersionIDNoSuchVersionNotImplementedPreconditionFailedRequestTimeTooSkewedSignatureDoesNotMatchMethodNotAllowedInvalidPartInvalidPartOrderMissingPartAuthorizationHeaderMalformedMalformedPOSTRequestPOSTFileRequiredSignatureVersionNotSupportedBucketNotEmptyAllAccessDisabledPolicyInvalidVersionMissingFieldsMissingCredTagCredMalformedInvalidRegionInvalidServiceS3InvalidServiceSTSInvalidRequestVersionMissingSignTagMissingSignHeadersTagMalformedDateMalformedPresignedDateMalformedCredentialDateMalformedExpiresNegativeExpiresAuthHeaderEmptyExpiredPresignRequestRequestNotReadyYetUnsignedHeadersMissingDateHeaderInvalidQuerySignatureAlgoInvalidQueryParamsBucketAlreadyOwnedByYouInvalidDurationBucketAlreadyExistsMetadataTooLargeUnsupportedMetadataUnsupportedHostHeaderMaximumExpiresSlowDownReadSlowDownWriteMaxVersionsExceededInvalidPrefixMarkerBadRequestKeyTooLongErrorInvalidBucketObjectLockConfigurationObjectLockConfigurationNotFoundObjectLockConfigurationNotAllowedNoSuchObjectLockConfigurationObjectLockedInvalidRetentionDatePastObjectLockRetainDateUnknownWORMModeDirectiveBucketTaggingNotFoundObjectLockInvalidHeadersInvalidTagDirectivePolicyAlreadyAttachedPolicyNotAttachedExcessDataPolicyInvalidNameNoTokenRevokeTypeAdminOpenIDNotEnabledAdminNoSuchAccessKeyInvalidEncryptionMethodInvalidEncryptionKeyIDInsecureSSECustomerRequestSSEMultipartEncryptedSSEEncryptedObjectInvalidEncryptionParametersInvalidEncryptionParametersSSECInvalidSSECustomerAlgorithmInvalidSSECustomerKeyMissingSSECustomerKeyMissingSSECustomerKeyMD5SSECustomerKeyMD5MismatchInvalidSSECustomerParametersIncompatibleEncryptionMethodKMSNotConfiguredKMSKeyNotFoundExceptionKMSDefaultKeyAlreadyConfiguredNoAccessKeyInvalidTokenEventNotificationARNNotificationRegionNotificationOverlappingFilterNotificationFilterNameInvalidFilterNamePrefixFilterNameSuffixFilterValueInvalidOverlappingConfigsUnsupportedNotificationContentSHA256MismatchContentChecksumMismatchStorageFullRequestBodyParseObjectExistsAsDirectoryInvalidObjectNameInvalidObjectNamePrefixSlashInvalidResourceNameInvalidLifecycleQueryParameterServerNotInitializedBucketMetadataNotInitializedRequestTimedoutClientDisconnectedTooManyRequestsInvalidRequestTransitionStorageClassNotFoundErrorInvalidStorageClassBackendDownMalformedJSONAdminNoSuchUserAdminNoSuchUserLDAPWarnAdminLDAPExpectedLoginNameAdminNoSuchGroupAdminGroupNotEmptyAdminGroupDisabledAdminInvalidGroupNameAdminNoSuchJobAdminNoSuchPolicyAdminPolicyChangeAlreadyAppliedAdminInvalidArgumentAdminInvalidAccessKeyAdminInvalidSecretKeyAdminConfigNoQuorumAdminConfigTooLargeAdminConfigBadJSONAdminNoSuchConfigTargetAdminConfigEnvOverriddenAdminConfigDuplicateKeysAdminConfigInvalidIDPTypeAdminConfigLDAPNonDefaultConfigNameAdminConfigLDAPValidationAdminConfigIDPCfgNameAlreadyExistsAdminConfigIDPCfgNameDoesNotExistInsecureClientRequestObjectTamperedAdminLDAPNotEnabledSiteReplicationInvalidRequestSiteReplicationPeerRespSiteReplicationBackendIssueSiteReplicationServiceAccountErrorSiteReplicationBucketConfigErrorSiteReplicationBucketMetaErrorSiteReplicationIAMErrorSiteReplicationConfigMissingSiteReplicationIAMConfigMismatchAdminRebalanceAlreadyStartedAdminRebalanceNotStartedAdminBucketQuotaExceededAdminNoSuchQuotaConfigurationHealNotImplementedHealNoSuchProcessHealInvalidClientTokenHealMissingBucketHealAlreadyRunningHealOverlappingPathsIncorrectContinuationTokenEmptyRequestBodyUnsupportedFunctionInvalidExpressionTypeBusyUnauthorizedAccessExpressionTooLongIllegalSQLFunctionArgumentInvalidKeyPathInvalidCompressionFormatInvalidFileHeaderInfoInvalidJSONTypeInvalidQuoteFieldsInvalidRequestParameterInvalidDataTypeInvalidTextEncodingInvalidDataSourceInvalidTableAliasMissingRequiredParameterObjectSerializationConflictUnsupportedSQLOperationUnsupportedSQLStructureUnsupportedSyntaxUnsupportedRangeHeaderLexerInvalidCharLexerInvalidOperatorLexerInvalidLiteralLexerInvalidIONLiteralParseExpectedDatePartParseExpectedKeywordParseExpectedTokenTypeParseExpected2TokenTypesParseExpectedNumberParseExpectedRightParenBuiltinFunctionCallParseExpectedTypeNameParseExpectedWhenClauseParseUnsupportedTokenParseUnsupportedLiteralsGroupByParseExpectedMemberParseUnsupportedSelectParseUnsupportedCaseParseUnsupportedCaseClauseParseUnsupportedAliasParseUnsupportedSyntaxParseUnknownOperatorParseMissingIdentAfterAtParseUnexpectedOperatorParseUnexpectedTermParseUnexpectedTokenParseUnexpectedKeywordParseExpectedExpressionParseExpectedLeftParenAfterCastParseExpectedLeftParenValueConstructorParseExpectedLeftParenBuiltinFunctionCallParseExpectedArgumentDelimiterParseCastArityParseInvalidTypeParamParseEmptySelectParseSelectMissingFromParseExpectedIdentForGroupNameParseExpectedIdentForAliasParseUnsupportedCallWithStarParseNonUnaryAggregateFunctionCallParseMalformedJoinParseExpectedIdentForAtParseAsteriskIsNotAloneInSelectListParseCannotMixSqbAndWildcardInSelectListParseInvalidContextForWildcardInSelectListIncorrectSQLFunctionArgumentTypeValueParseFailureEvaluatorInvalidArgumentsIntegerOverflowLikeInvalidInputsCastFailedInvalidCastEvaluatorInvalidTimestampFormatPatternEvaluatorInvalidTimestampFormatPatternSymbolForParsingEvaluatorTimestampFormatPatternDuplicateFieldsEvaluatorTimestampFormatPatternHourClockAmPmMismatchEvaluatorUnterminatedTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternTokenEvaluatorInvalidTimestampFormatPatternSymbolEvaluatorBindingDoesNotExistMissingHeadersInvalidColumnIndexAdminConfigNotificationTargetsFailedAdminProfilerNotEnabledInvalidDecompressedSizeAddUserInvalidArgumentAddUserValidUTFAdminResourceInvalidArgumentAdminAccountNotEligibleAccountNotEligibleAdminServiceAccountNotFoundPostPolicyConditionInvalidFormatInvalidChecksumLambdaARNInvalidLambdaARNNotFoundInvalidAttributeNameAdminNoAccessKeyAdminNoSecretKeyIAMNotInitializedapiErrCodeEnd" -var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 144, 161, 174, 186, 208, 228, 254, 268, 289, 306, 321, 344, 361, 379, 396, 420, 435, 456, 474, 486, 506, 523, 546, 567, 579, 597, 618, 646, 676, 697, 720, 746, 783, 813, 846, 871, 903, 933, 962, 987, 1009, 1035, 1057, 1085, 1114, 1148, 1179, 1216, 1240, 1264, 1292, 1318, 1349, 1379, 1388, 1400, 1416, 1429, 1443, 1461, 1481, 1502, 1518, 1529, 1545, 1556, 1584, 1604, 1620, 1648, 1662, 1679, 1699, 1712, 1726, 1739, 1752, 1768, 1785, 1806, 1820, 1841, 1854, 1876, 1899, 1915, 1930, 1945, 1966, 1984, 1999, 2016, 2041, 2059, 2082, 2097, 2116, 2132, 2151, 2172, 2186, 2198, 2211, 2230, 2249, 2259, 2274, 2310, 2341, 2374, 2403, 2415, 2435, 2459, 2483, 2504, 2528, 2547, 2568, 2585, 2595, 2618, 2640, 2666, 2687, 2705, 2732, 2763, 2790, 2811, 2832, 2856, 2881, 2909, 2937, 2953, 2976, 3006, 3017, 3029, 3046, 3061, 3079, 3108, 3125, 3141, 3157, 3175, 3193, 3216, 3237, 3260, 3271, 3287, 3310, 3327, 3355, 3374, 3404, 3424, 3439, 3457, 3472, 3486, 3521, 3540, 3551, 3564, 3579, 3602, 3618, 3636, 3654, 3668, 3685, 3716, 3736, 3757, 3778, 3797, 3816, 3834, 3857, 3881, 3905, 3930, 3965, 3990, 4024, 4057, 4078, 4092, 4121, 4144, 4171, 4205, 4237, 4267, 4290, 4318, 4350, 4378, 4402, 4426, 4455, 4473, 4490, 4512, 4529, 4547, 4567, 4593, 4609, 4628, 4649, 4653, 4671, 4688, 4714, 4728, 4752, 4773, 4788, 4806, 4829, 4844, 4863, 4880, 4897, 4921, 4948, 4971, 4994, 5011, 5033, 5049, 5069, 5088, 5110, 5131, 5151, 5173, 5197, 5216, 5258, 5279, 5302, 5323, 5354, 5373, 5395, 5415, 5441, 5462, 5484, 5504, 5528, 5551, 5570, 5590, 5612, 5635, 5666, 5704, 5745, 5775, 5789, 5810, 5826, 5848, 5878, 5904, 5932, 5966, 5984, 6007, 6042, 6082, 6124, 6156, 6173, 6198, 6213, 6230, 6240, 6251, 6289, 6343, 6389, 6441, 6489, 6532, 6576, 6604, 6618, 6636, 6672, 6695, 6718, 6740, 6768, 6791, 6809, 6836, 6868, 6883, 6899, 6916, 6936, 6952, 6968, 6981} +var _APIErrorCode_index = [...]uint16{0, 4, 16, 25, 39, 53, 67, 81, 94, 112, 129, 144, 161, 174, 186, 208, 228, 254, 268, 289, 306, 321, 344, 361, 379, 396, 420, 435, 456, 474, 486, 506, 523, 546, 567, 579, 597, 618, 646, 676, 697, 720, 746, 783, 813, 846, 871, 903, 933, 962, 987, 1009, 1035, 1057, 1085, 1114, 1148, 1179, 1216, 1240, 1264, 1292, 1318, 1349, 1379, 1388, 1400, 1416, 1429, 1443, 1461, 1481, 1502, 1518, 1529, 1545, 1556, 1584, 1604, 1620, 1648, 1662, 1679, 1699, 1712, 1726, 1739, 1752, 1768, 1785, 1806, 1820, 1841, 1854, 1876, 1899, 1915, 1930, 1945, 1966, 1984, 1999, 2016, 2041, 2059, 2082, 2097, 2116, 2132, 2151, 2172, 2186, 2198, 2211, 2230, 2249, 2259, 2274, 2310, 2341, 2374, 2403, 2415, 2435, 2459, 2483, 2504, 2528, 2547, 2568, 2585, 2595, 2612, 2629, 2650, 2670, 2693, 2715, 2741, 2762, 2780, 2807, 2838, 2865, 2886, 2907, 2931, 2956, 2984, 3012, 3028, 3051, 3081, 3092, 3104, 3121, 3136, 3154, 3183, 3200, 3216, 3232, 3250, 3268, 3291, 3312, 3335, 3346, 3362, 3385, 3402, 3430, 3449, 3479, 3499, 3527, 3542, 3560, 3575, 3589, 3624, 3643, 3654, 3667, 3682, 3705, 3731, 3747, 3765, 3783, 3804, 3818, 3835, 3866, 3886, 3907, 3928, 3947, 3966, 3984, 4007, 4031, 4055, 4080, 4115, 4140, 4174, 4207, 4228, 4242, 4261, 4290, 4313, 4340, 4374, 4406, 4436, 4459, 4487, 4519, 4547, 4571, 4595, 4624, 4642, 4659, 4681, 4698, 4716, 4736, 4762, 4778, 4797, 4818, 4822, 4840, 4857, 4883, 4897, 4921, 4942, 4957, 4975, 4998, 5013, 5032, 5049, 5066, 5090, 5117, 5140, 5163, 5180, 5202, 5218, 5238, 5257, 5279, 5300, 5320, 5342, 5366, 5385, 5427, 5448, 5471, 5492, 5523, 5542, 5564, 5584, 5610, 5631, 5653, 5673, 5697, 5720, 5739, 5759, 5781, 5804, 5835, 5873, 5914, 5944, 5958, 5979, 5995, 6017, 6047, 6073, 6101, 6135, 6153, 6176, 6211, 6251, 6293, 6325, 6342, 6367, 6382, 6399, 6409, 6420, 6458, 6512, 6558, 6610, 6658, 6701, 6745, 6773, 6787, 6805, 6841, 6864, 6887, 6909, 6924, 6952, 6975, 6993, 7020, 7052, 7067, 7083, 7100, 7120, 7136, 7152, 7169, 7182} func (i APIErrorCode) String() string { if i < 0 || i >= APIErrorCode(len(_APIErrorCode_index)-1) { diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index 752fdd530f78b..6e824e3126d46 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -41,7 +41,7 @@ import ( xjwt "github.com/minio/minio/internal/jwt" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mcontext" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // Verify if request has JWT. @@ -96,7 +96,7 @@ func isRequestSignStreamingTrailerV4(r *http.Request) bool { // Verify if the request has AWS Streaming Signature Version '4', with unsigned content and trailer. func isRequestUnsignedTrailerV4(r *http.Request) bool { return r.Header.Get(xhttp.AmzContentSha256) == unsignedPayloadTrailer && - r.Method == http.MethodPut && strings.Contains(r.Header.Get(xhttp.ContentEncoding), streamingContentEncoding) + r.Method == http.MethodPut } // Authorization type. @@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) { var err error r.Form, err = url.ParseQuery(r.URL.RawQuery) if err != nil { - logger.LogIf(r.Context(), err) + authNLogIf(r.Context(), err) return authTypeUnknown } } @@ -162,7 +162,6 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string) s3Err := ErrAccessDenied if _, ok := r.Header[xhttp.AmzContentSha256]; ok && getRequestAuthType(r) == authTypeSigned { - // Get credential information from the request. cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3) if s3Err != ErrNone { @@ -178,7 +177,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string) logger.GetReqInfo(ctx).Cred = cred logger.GetReqInfo(ctx).Owner = owner - logger.GetReqInfo(ctx).Region = globalSite.Region + logger.GetReqInfo(ctx).Region = globalSite.Region() return cred, owner, ErrNone } @@ -217,12 +216,12 @@ func getSessionToken(r *http.Request) (token string) { // Fetch claims in the security token returned by the client, doesn't return // errors - upon errors the returned claims map will be empty. -func mustGetClaimsFromToken(r *http.Request) map[string]interface{} { +func mustGetClaimsFromToken(r *http.Request) map[string]any { claims, _ := getClaimsFromToken(getSessionToken(r)) return claims } -func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, error) { +func getClaimsFromTokenWithSecret(token, secret string) (*xjwt.MapClaims, error) { // JWT token for x-amz-security-token is signed with admin // secret key, temporary credentials become invalid if // server admin credentials change. This is done to ensure @@ -244,7 +243,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, // If AuthZPlugin is set, return without any further checks. if newGlobalAuthZPluginFn() != nil { - return claims.Map(), nil + return claims, nil } // Check if a session policy is set. If so, decode it here. @@ -257,22 +256,26 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{}, if err != nil { // Base64 decoding fails, we should log to indicate // something is malforming the request sent by client. - logger.LogIf(GlobalContext, err, logger.ErrorKind) + authNLogIf(GlobalContext, err, logger.ErrorKind) return nil, errAuthentication } claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes) } - return claims.Map(), nil + return claims, nil } // Fetch claims in the security token returned by the client. -func getClaimsFromToken(token string) (map[string]interface{}, error) { - return getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey) +func getClaimsFromToken(token string) (map[string]any, error) { + jwtClaims, err := getClaimsFromTokenWithSecret(token, globalActiveCred.SecretKey) + if err != nil { + return nil, err + } + return jwtClaims.Map(), nil } // Fetch claims in the security token returned by the client and validate the token. -func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) { +func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]any, APIErrorCode) { token := getSessionToken(r) if token != "" && cred.AccessKey == "" { // x-amz-security-token is not allowed for anonymous access. @@ -319,7 +322,7 @@ func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]in if err != nil { return nil, toAPIErrorCode(r.Context(), err) } - return claims, ErrNone + return claims.Map(), ErrNone } claims := xjwt.NewMapClaims() @@ -353,14 +356,14 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) { if logger.GetReqInfo(ctx) == nil { - logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind) + bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind) return ErrAccessDenied } var cred auth.Credentials var owner bool switch getRequestAuthType(r) { - case authTypeUnknown, authTypeStreamingSigned: + case authTypeUnknown, authTypeStreamingSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer: return ErrSignatureVersionNotSupported case authTypePresignedV2, authTypeSignedV2: if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone { @@ -368,7 +371,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act } cred, owner, s3Err = getReqAccessKeyV2(r) case authTypeSigned, authTypePresigned: - region := globalSite.Region + region := globalSite.Region() switch action { case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction: region = "" @@ -384,7 +387,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act logger.GetReqInfo(ctx).Cred = cred logger.GetReqInfo(ctx).Owner = owner - logger.GetReqInfo(ctx).Region = globalSite.Region + logger.GetReqInfo(ctx).Region = globalSite.Region() // region is valid only for CreateBucketAction. var region string @@ -392,7 +395,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act // To extract region from XML in request body, get copy of request body. payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize)) if err != nil { - logger.LogIf(ctx, err, logger.ErrorKind) + authZLogIf(ctx, err, logger.ErrorKind) return ErrMalformedXML } @@ -671,32 +674,6 @@ func setAuthMiddleware(h http.Handler) http.Handler { }) } -func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, APIErrorCode) { - var cred auth.Credentials - var owner bool - var s3Err APIErrorCode - switch atype { - case authTypeUnknown, authTypeStreamingSigned: - return cred, owner, ErrSignatureVersionNotSupported - case authTypeSignedV2, authTypePresignedV2: - if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone { - return cred, owner, s3Err - } - cred, owner, s3Err = getReqAccessKeyV2(r) - case authTypePresigned, authTypeSigned: - region := globalSite.Region - if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone { - return cred, owner, s3Err - } - cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3) - } - if s3Err != ErrNone { - return cred, owner, s3Err - } - - return cred, owner, ErrNone -} - func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool) (s3Err APIErrorCode) { var retSet bool if cred.AccessKey == "" { @@ -745,14 +722,20 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) { var cred auth.Credentials var owner bool - region := globalSite.Region + region := globalSite.Region() switch atype { case authTypeUnknown: return ErrSignatureVersionNotSupported case authTypeSignedV2, authTypePresignedV2: cred, owner, s3Err = getReqAccessKeyV2(r) - case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer, authTypeStreamingUnsignedTrailer: + case authTypeStreamingSigned, authTypePresigned, authTypeSigned, authTypeStreamingSignedTrailer: + cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3) + case authTypeStreamingUnsignedTrailer: cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3) + if s3Err == ErrMissingFields { + // Could be anonymous. cred + owner is zero value. + s3Err = ErrNone + } } if s3Err != ErrNone { return s3Err diff --git a/cmd/auth-handler_test.go b/cmd/auth-handler_test.go index 3965be7ac7f1a..47f12add68325 100644 --- a/cmd/auth-handler_test.go +++ b/cmd/auth-handler_test.go @@ -28,7 +28,7 @@ import ( "time" "github.com/minio/minio/internal/auth" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) type nullReader struct{} @@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) { // Validates all testcases. for i, testCase := range testCases { - s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3) + s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3) if s3Error != testCase.s3Error { if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error { t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code) @@ -413,7 +413,7 @@ func TestIsReqAuthenticated(t *testing.T) { } func TestCheckAdminRequestAuthType(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() objLayer, fsDir, err := prepareFS(ctx) @@ -443,14 +443,14 @@ func TestCheckAdminRequestAuthType(t *testing.T) { {Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied}, } for i, testCase := range testCases { - if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode { + if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode { t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error) } } } func TestValidateAdminSignature(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() objLayer, fsDir, err := prepareFS(ctx) diff --git a/cmd/background-heal-ops.go b/cmd/background-heal-ops.go index f4affaed987cc..3eeff5098be77 100644 --- a/cmd/background-heal-ops.go +++ b/cmd/background-heal-ops.go @@ -25,8 +25,7 @@ import ( "time" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // healTask represents what to heal along with options @@ -101,16 +100,17 @@ func waitForLowHTTPReq() { } func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) { + bgSeq := newBgHealSequence() // Run the background healer - for i := 0; i < globalBackgroundHealRoutine.workers; i++ { - go globalBackgroundHealRoutine.AddWorker(ctx, objAPI) + for range globalBackgroundHealRoutine.workers { + go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq) } - globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI) + globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI) } // Wait for heal requests and process them -func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) { +func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) { for { select { case task, ok := <-h.tasks: @@ -135,8 +135,18 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) { if task.respCh != nil { task.respCh <- healResult{result: res, err: err} + continue } + // when respCh is not set caller is not waiting but we + // update the relevant metrics for them + if bgSeq != nil { + if err == nil { + bgSeq.countHealed(res.Type) + } else { + bgSeq.countFailed(res.Type) + } + } case <-ctx.Done(): return } @@ -148,7 +158,7 @@ func newHealRoutine() *healRoutine { if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" { if numHealers, err := strconv.Atoi(envHealWorkers); err != nil { - logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err)) + bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err)) } else { workers = numHealers } diff --git a/cmd/background-newdisks-heal-ops.go b/cmd/background-newdisks-heal-ops.go index 4c96bc7f13763..330bace419773 100644 --- a/cmd/background-newdisks-heal-ops.go +++ b/cmd/background-newdisks-heal-ops.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "os" + "slices" "sort" "strings" "sync" @@ -33,8 +34,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) const ( @@ -73,10 +73,12 @@ type healingTracker struct { // Numbers when current bucket started healing, // for resuming with correct numbers. - ResumeItemsHealed uint64 `json:"-"` - ResumeItemsFailed uint64 `json:"-"` - ResumeBytesDone uint64 `json:"-"` - ResumeBytesFailed uint64 `json:"-"` + ResumeItemsHealed uint64 `json:"-"` + ResumeItemsFailed uint64 `json:"-"` + ResumeItemsSkipped uint64 `json:"-"` + ResumeBytesDone uint64 `json:"-"` + ResumeBytesFailed uint64 `json:"-"` + ResumeBytesSkipped uint64 `json:"-"` // Filled on startup/restarts. QueuedBuckets []string @@ -89,6 +91,11 @@ type healingTracker struct { ItemsSkipped uint64 BytesSkipped uint64 + + RetryAttempts uint64 + + Finished bool // finished healing, whether with errors or not + // Add future tracking capabilities // Be sure that they are included in toHealingDisk } @@ -142,14 +149,34 @@ func initHealingTracker(disk StorageAPI, healID string) *healingTracker { return h } -func (h healingTracker) getLastUpdate() time.Time { +func (h *healingTracker) resetHealing() { + h.mu.Lock() + defer h.mu.Unlock() + + h.ItemsHealed = 0 + h.ItemsFailed = 0 + h.BytesDone = 0 + h.BytesFailed = 0 + h.ResumeItemsHealed = 0 + h.ResumeItemsFailed = 0 + h.ResumeBytesDone = 0 + h.ResumeBytesFailed = 0 + h.ItemsSkipped = 0 + h.BytesSkipped = 0 + + h.HealedBuckets = nil + h.Object = "" + h.Bucket = "" +} + +func (h *healingTracker) getLastUpdate() time.Time { h.mu.RLock() defer h.mu.RUnlock() return h.LastUpdate } -func (h healingTracker) getBucket() string { +func (h *healingTracker) getBucket() string { h.mu.RLock() defer h.mu.RUnlock() @@ -163,7 +190,7 @@ func (h *healingTracker) setBucket(bucket string) { h.Bucket = bucket } -func (h healingTracker) getObject() string { +func (h *healingTracker) getObject() string { h.mu.RLock() defer h.mu.RUnlock() @@ -197,9 +224,6 @@ func (h *healingTracker) updateProgress(success, skipped bool, bytes uint64) { // update will update the tracker on the disk. // If the tracker has been deleted an error is returned. func (h *healingTracker) update(ctx context.Context) error { - if h.disk.Healing() == nil { - return fmt.Errorf("healingTracker: drive %q is not marked as healing", h.ID) - } h.mu.Lock() if h.ID == "" || h.PoolIndex < 0 || h.SetIndex < 0 || h.DiskIndex < 0 { h.ID, _ = h.disk.GetDiskID() @@ -246,12 +270,7 @@ func (h *healingTracker) delete(ctx context.Context) error { func (h *healingTracker) isHealed(bucket string) bool { h.mu.RLock() defer h.mu.RUnlock() - for _, v := range h.HealedBuckets { - if v == bucket { - return true - } - } - return false + return slices.Contains(h.HealedBuckets, bucket) } // resume will reset progress to the numbers at the start of the bucket. @@ -261,8 +280,10 @@ func (h *healingTracker) resume() { h.ItemsHealed = h.ResumeItemsHealed h.ItemsFailed = h.ResumeItemsFailed + h.ItemsSkipped = h.ResumeItemsSkipped h.BytesDone = h.ResumeBytesDone h.BytesFailed = h.ResumeBytesFailed + h.BytesSkipped = h.ResumeBytesSkipped } // bucketDone should be called when a bucket is done healing. @@ -273,8 +294,10 @@ func (h *healingTracker) bucketDone(bucket string) { h.ResumeItemsHealed = h.ItemsHealed h.ResumeItemsFailed = h.ItemsFailed + h.ResumeItemsSkipped = h.ItemsSkipped h.ResumeBytesDone = h.BytesDone h.ResumeBytesFailed = h.BytesFailed + h.ResumeBytesSkipped = h.BytesSkipped h.HealedBuckets = append(h.HealedBuckets, bucket) for i, b := range h.QueuedBuckets { if b == bucket { @@ -323,6 +346,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk { PoolIndex: h.PoolIndex, SetIndex: h.SetIndex, DiskIndex: h.DiskIndex, + Finished: h.Finished, Path: h.Path, Started: h.Started.UTC(), LastUpdate: h.LastUpdate.UTC(), @@ -338,6 +362,7 @@ func (h *healingTracker) toHealingDisk() madmin.HealingDisk { Object: h.Object, QueuedBuckets: h.QueuedBuckets, HealedBuckets: h.HealedBuckets, + RetryAttempts: h.RetryAttempts, ObjectsHealed: h.ItemsHealed, // Deprecated July 2021 ObjectsFailed: h.ItemsFailed, // Deprecated July 2021 @@ -352,24 +377,26 @@ func initAutoHeal(ctx context.Context, objAPI ObjectLayer) { } initBackgroundHealing(ctx, objAPI) // start quick background healing - - if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn || env.Get("_MINIO_AUTO_DISK_HEALING", config.EnableOn) == config.EnableOn { + if env.Get("_MINIO_AUTO_DRIVE_HEALING", config.EnableOn) == config.EnableOn { globalBackgroundHealState.pushHealLocalDisks(getLocalDisksToHeal()...) go monitorLocalDisksAndHeal(ctx, z) } + + go globalMRFState.startMRFPersistence() + go globalMRFState.healRoutine(z) } func getLocalDisksToHeal() (disksToHeal Endpoints) { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() for _, disk := range localDrives { - _, err := disk.GetDiskID() + _, err := disk.DiskInfo(context.Background(), DiskInfoOptions{}) if errors.Is(err, errUnformattedDisk) { disksToHeal = append(disksToHeal, disk.Endpoint()) continue } - if disk.Healing() != nil { + if h := disk.Healing(); h != nil && !h.Finished { disksToHeal = append(disksToHeal, disk.Endpoint()) } } @@ -383,6 +410,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) { var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second) +var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again") + func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error { poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx disk := getStorageViaEndpoint(endpoint) @@ -390,6 +419,17 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint) } + _, err := disk.DiskInfo(ctx, DiskInfoOptions{}) + if err != nil { + if errors.Is(err, errDriveIsRoot) { + // This is a root drive, ignore and move on + return nil + } + if !errors.Is(err, errUnformattedDisk) { + return err + } + } + // Prevent parallel erasure set healing locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx)) lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout) @@ -409,11 +449,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint if errors.Is(err, errFileNotFound) { return nil } - logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err)) + healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err)) tracker = initHealingTracker(disk, mustGetUUID()) } - logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint) + healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint) buckets, _ := z.ListBuckets(ctx, BucketOptions{}) // Buckets data are dispersed in multiple pools/sets, make @@ -452,19 +492,37 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint return err } - logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) + // if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up. + if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 { + tracker.RetryAttempts++ + + healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk, + humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) - if len(tracker.QueuedBuckets) > 0 { - return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets) + tracker.resetHealing() + bugLogIf(ctx, tracker.update(ctx)) + + return errRetryHealing } + if tracker.ItemsFailed > 0 { + healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk, + tracker.RetryAttempts, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed) + } else { + if tracker.RetryAttempts > 0 { + healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk, + tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped) + } else { + healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped) + } + } if serverDebugLog { tracker.printTo(os.Stdout) fmt.Printf("\n") } if tracker.HealID == "" { // HealID was empty only before Feb 2023 - logger.LogIf(ctx, tracker.delete(ctx)) + bugLogIf(ctx, tracker.delete(ctx)) return nil } @@ -482,12 +540,13 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint t, err := loadHealingTracker(ctx, disk) if err != nil { if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) } continue } if t.HealID == tracker.HealID { - t.delete(ctx) + t.Finished = true + t.update(ctx) } } @@ -517,7 +576,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) { // Reformat disks immediately _, err := z.HealFormat(context.Background(), false) if err != nil && !errors.Is(err, errNoHealRequired) { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) // Reset for next interval. diskCheckTimer.Reset(defaultMonitorNewDiskInterval) continue @@ -529,7 +588,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) { if err := healFreshDisk(ctx, z, disk); err != nil { globalBackgroundHealState.setDiskHealingStatus(disk, false) timedout := OperationTimedOut{} - if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) { + if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) { printEndpointError(disk, err, false) } return diff --git a/cmd/background-newdisks-heal-ops_gen.go b/cmd/background-newdisks-heal-ops_gen.go index 2cb5078132e9c..7c3eb6b8b9012 100644 --- a/cmd/background-newdisks-heal-ops_gen.go +++ b/cmd/background-newdisks-heal-ops_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -132,6 +132,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "ResumeItemsFailed") return } + case "ResumeItemsSkipped": + z.ResumeItemsSkipped, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "ResumeItemsSkipped") + return + } case "ResumeBytesDone": z.ResumeBytesDone, err = dc.ReadUint64() if err != nil { @@ -144,6 +150,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "ResumeBytesFailed") return } + case "ResumeBytesSkipped": + z.ResumeBytesSkipped, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "ResumeBytesSkipped") + return + } case "QueuedBuckets": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() @@ -200,6 +212,18 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "BytesSkipped") return } + case "RetryAttempts": + z.RetryAttempts, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + case "Finished": + z.Finished, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Finished") + return + } default: err = dc.Skip() if err != nil { @@ -213,9 +237,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 25 + // map header, size 29 // write "ID" - err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44) + err = en.Append(0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44) if err != nil { return } @@ -394,6 +418,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "ResumeItemsFailed") return } + // write "ResumeItemsSkipped" + err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.ResumeItemsSkipped) + if err != nil { + err = msgp.WrapError(err, "ResumeItemsSkipped") + return + } // write "ResumeBytesDone" err = en.Append(0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65) if err != nil { @@ -414,6 +448,16 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "ResumeBytesFailed") return } + // write "ResumeBytesSkipped" + err = en.Append(0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteUint64(z.ResumeBytesSkipped) + if err != nil { + err = msgp.WrapError(err, "ResumeBytesSkipped") + return + } // write "QueuedBuckets" err = en.Append(0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) if err != nil { @@ -478,15 +522,35 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "BytesSkipped") return } + // write "RetryAttempts" + err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.RetryAttempts) + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + // write "Finished" + err = en.Append(0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteBool(z.Finished) + if err != nil { + err = msgp.WrapError(err, "Finished") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 25 + // map header, size 29 // string "ID" - o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44) + o = append(o, 0xde, 0x0, 0x1d, 0xa2, 0x49, 0x44) o = msgp.AppendString(o, z.ID) // string "PoolIndex" o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78) @@ -539,12 +603,18 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) { // string "ResumeItemsFailed" o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64) o = msgp.AppendUint64(o, z.ResumeItemsFailed) + // string "ResumeItemsSkipped" + o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64) + o = msgp.AppendUint64(o, z.ResumeItemsSkipped) // string "ResumeBytesDone" o = append(o, 0xaf, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x6e, 0x65) o = msgp.AppendUint64(o, z.ResumeBytesDone) // string "ResumeBytesFailed" o = append(o, 0xb1, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x46, 0x61, 0x69, 0x6c, 0x65, 0x64) o = msgp.AppendUint64(o, z.ResumeBytesFailed) + // string "ResumeBytesSkipped" + o = append(o, 0xb2, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64) + o = msgp.AppendUint64(o, z.ResumeBytesSkipped) // string "QueuedBuckets" o = append(o, 0xad, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73) o = msgp.AppendArrayHeader(o, uint32(len(z.QueuedBuckets))) @@ -566,6 +636,12 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) { // string "BytesSkipped" o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64) o = msgp.AppendUint64(o, z.BytesSkipped) + // string "RetryAttempts" + o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73) + o = msgp.AppendUint64(o, z.RetryAttempts) + // string "Finished" + o = append(o, 0xa8, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64) + o = msgp.AppendBool(o, z.Finished) return } @@ -695,6 +771,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ResumeItemsFailed") return } + case "ResumeItemsSkipped": + z.ResumeItemsSkipped, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ResumeItemsSkipped") + return + } case "ResumeBytesDone": z.ResumeBytesDone, bts, err = msgp.ReadUint64Bytes(bts) if err != nil { @@ -707,6 +789,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ResumeBytesFailed") return } + case "ResumeBytesSkipped": + z.ResumeBytesSkipped, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ResumeBytesSkipped") + return + } case "QueuedBuckets": var zb0002 uint32 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) @@ -763,6 +851,18 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "BytesSkipped") return } + case "RetryAttempts": + z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "RetryAttempts") + return + } + case "Finished": + z.Finished, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Finished") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -777,7 +877,7 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *healingTracker) Msgsize() (s int) { - s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize + s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 10 + msgp.IntSize + 9 + msgp.IntSize + 10 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.Path) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 8 + msgp.TimeSize + 11 + msgp.TimeSize + 18 + msgp.Uint64Size + 17 + msgp.Uint64Size + 12 + msgp.Uint64Size + 12 + msgp.Uint64Size + 10 + msgp.Uint64Size + 12 + msgp.Uint64Size + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 18 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 16 + msgp.Uint64Size + 18 + msgp.Uint64Size + 19 + msgp.Uint64Size + 14 + msgp.ArrayHeaderSize for za0001 := range z.QueuedBuckets { s += msgp.StringPrefixSize + len(z.QueuedBuckets[za0001]) } @@ -785,6 +885,6 @@ func (z *healingTracker) Msgsize() (s int) { for za0002 := range z.HealedBuckets { s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002]) } - s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size + 9 + msgp.BoolSize return } diff --git a/cmd/background-newdisks-heal-ops_gen_test.go b/cmd/background-newdisks-heal-ops_gen_test.go index 177aa91ab4fb2..36e9eb71d0a04 100644 --- a/cmd/background-newdisks-heal-ops_gen_test.go +++ b/cmd/background-newdisks-heal-ops_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/batch-expire.go b/cmd/batch-expire.go index b454f47440b04..fab592664a5d7 100644 --- a/cmd/batch-expire.go +++ b/cmd/batch-expire.go @@ -33,10 +33,10 @@ import ( "github.com/minio/minio/internal/bucket/versioning" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/wildcard" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/wildcard" + "github.com/minio/pkg/v3/workers" + "github.com/minio/pkg/v3/xtime" "gopkg.in/yaml.v3" ) @@ -117,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error { // BatchJobExpireFilter holds all the filters currently supported for batch replication type BatchJobExpireFilter struct { line, col int - OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"` + OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"` CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"` Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"` Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"` @@ -156,14 +156,14 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool { } default: // we should never come here, Validate should have caught this. - logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type) + batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type) return false } if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) { return false } - if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan { + if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() { return false } @@ -195,8 +195,8 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool { return false } } - } + if len(ef.Metadata) > 0 && !obj.DeleteMarker { for _, kv := range ef.Metadata { // Object (version) must match all x-amz-meta and @@ -281,7 +281,7 @@ type BatchJobExpire struct { line, col int APIVersion string `yaml:"apiVersion" json:"apiVersion"` Bucket string `yaml:"bucket" json:"bucket"` - Prefix string `yaml:"prefix" json:"prefix"` + Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"` NotificationCfg BatchJobNotification `yaml:"notify" json:"notify"` Retry BatchJobRetry `yaml:"retry" json:"retry"` Rules []BatchJobExpireFilter `yaml:"rules" json:"rules"` @@ -289,6 +289,16 @@ type BatchJobExpire struct { var _ yaml.Unmarshaler = &BatchJobExpire{} +// RedactSensitive will redact any sensitive information in b. +func (r *BatchJobExpire) RedactSensitive() { + if r == nil { + return + } + if r.NotificationCfg.Token != "" { + r.NotificationCfg.Token = redactedText + } +} + // UnmarshalYAML - BatchJobExpire extends default unmarshal to extract line, col information. func (r *BatchJobExpire) UnmarshalYAML(val *yaml.Node) error { type expireJob BatchJobExpire @@ -321,7 +331,7 @@ func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error { req.Header.Set("Authorization", r.NotificationCfg.Token) } - clnt := http.Client{Transport: getRemoteInstanceTransport} + clnt := http.Client{Transport: getRemoteInstanceTransport()} resp, err := clnt.Do(req) if err != nil { return err @@ -341,8 +351,24 @@ func (r *BatchJobExpire) Expire(ctx context.Context, api ObjectLayer, vc *versio PrefixEnabledFn: vc.PrefixEnabled, VersionSuspended: vc.Suspended(), } - _, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel, opts) - return errs + + allErrs := make([]error, 0, len(objsToDel)) + + for { + count := len(objsToDel) + if count == 0 { + break + } + if count > maxDeleteList { + count = maxDeleteList + } + _, errs := api.DeleteObjects(ctx, r.Bucket, objsToDel[:count], opts) + allErrs = append(allErrs, errs...) + // Next batch of deletion + objsToDel = objsToDel[count:] + } + + return allErrs } const ( @@ -372,9 +398,12 @@ func (oiCache objInfoCache) Get(toDel ObjectToDelete) (*ObjectInfo, bool) { func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo, job BatchJobRequest, api ObjectLayer, wk *workers.Workers, expireCh <-chan []expireObjInfo) { vc, _ := globalBucketVersioningSys.Get(r.Bucket) - retryAttempts := r.Retry.Attempts + retryAttempts := job.Expire.Retry.Attempts + if retryAttempts <= 0 { + retryAttempts = batchExpireJobDefaultRetries + } delay := job.Expire.Retry.Delay - if delay == 0 { + if delay <= 0 { delay = batchExpireJobDefaultRetryDelay } @@ -395,12 +424,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo go func(toExpire []expireObjInfo) { defer wk.Give() - toExpireAll := make([]ObjectInfo, 0, len(toExpire)) + toExpireAll := make([]expireObjInfo, 0, len(toExpire)) toDel := make([]ObjectToDelete, 0, len(toExpire)) oiCache := newObjInfoCache() for _, exp := range toExpire { if exp.ExpireAll { - toExpireAll = append(toExpireAll, exp.ObjectInfo) + toExpireAll = append(toExpireAll, exp) continue } // Cache ObjectInfo value via pointers for @@ -416,37 +445,31 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo oiCache.Add(od, &exp.ObjectInfo) } - var done bool // DeleteObject(deletePrefix: true) to expire all versions of an object for _, exp := range toExpireAll { var success bool for attempts := 1; attempts <= retryAttempts; attempts++ { select { case <-ctx.Done(): - done = true + ri.trackMultipleObjectVersions(exp, success) + return default: } stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts) - _, err := api.DeleteObject(ctx, exp.Bucket, exp.Name, ObjectOptions{ - DeletePrefix: true, + _, err := api.DeleteObject(ctx, exp.Bucket, encodeDirObject(exp.Name), ObjectOptions{ + DeletePrefix: true, + DeletePrefixObject: true, // use prefix delete on exact object (this is an optimization to avoid fan-out calls) }) if err != nil { stopFn(exp, err) - logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts)) + batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s due to %v (attempts=%d)", exp.Bucket, exp.Name, err, attempts)) } else { stopFn(exp, err) success = true break } } - ri.trackMultipleObjectVersions(r.Bucket, exp, success) - if done { - break - } - } - - if done { - return + ri.trackMultipleObjectVersions(exp, success) } // DeleteMultiple objects @@ -464,25 +487,25 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo copy(toDelCopy, toDel) var failed int errs := r.Expire(ctx, api, vc, toDel) - // reslice toDel in preparation for next retry - // attempt + // reslice toDel in preparation for next retry attempt toDel = toDel[:0] for i, err := range errs { if err != nil { stopFn(toDelCopy[i], err) - logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts)) + batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, + err, attempts)) failed++ - if attempts == retryAttempts { // all retry attempts failed, record failure - if oi, ok := oiCache.Get(toDelCopy[i]); ok { - ri.trackCurrentBucketObject(r.Bucket, *oi, false) - } - } else { + if oi, ok := oiCache.Get(toDelCopy[i]); ok { + ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts) + } + if attempts != retryAttempts { + // retry toDel = append(toDel, toDelCopy[i]) } } else { stopFn(toDelCopy[i], nil) if oi, ok := oiCache.Get(toDelCopy[i]); ok { - ri.trackCurrentBucketObject(r.Bucket, *oi, true) + ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts) } } } @@ -504,7 +527,8 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo type expireObjInfo struct { ObjectInfo - ExpireAll bool + ExpireAll bool + DeleteMarkerCount int64 } // Start the batch expiration job, resumes if there was a pending job via "job.ID" @@ -514,7 +538,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo JobType: string(job.Type()), StartTime: job.Started, } - if err := ri.load(ctx, api, job); err != nil { + if err := ri.loadOrInit(ctx, api, job); err != nil { return err } @@ -534,40 +558,58 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo return err } - ctx, cancel := context.WithCancel(ctx) - defer cancel() + ctx, cancelCause := context.WithCancelCause(ctx) + defer cancelCause(nil) - results := make(chan ObjectInfo, workerSize) - if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{ - Marker: lastObject, - LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions - VersionsSort: WalkVersionsSortDesc, - }); err != nil { - // Do not need to retry if we can't list objects on source. - return err - } + results := make(chan itemOrErr[ObjectInfo], workerSize) + go func() { + prefixes := r.Prefix.F() + if len(prefixes) == 0 { + prefixes = []string{""} + } + for _, prefix := range prefixes { + prefixResultCh := make(chan itemOrErr[ObjectInfo], workerSize) + err := api.Walk(ctx, r.Bucket, prefix, prefixResultCh, WalkOptions{ + Marker: lastObject, + LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions + VersionsSort: WalkVersionsSortDesc, + }) + if err != nil { + cancelCause(err) + xioutil.SafeClose(results) + return + } + for result := range prefixResultCh { + results <- result + } + } + xioutil.SafeClose(results) + }() // Goroutine to periodically save batch-expire job's in-memory state saverQuitCh := make(chan struct{}) go func() { saveTicker := time.NewTicker(10 * time.Second) defer saveTicker.Stop() - for { + quit := false + after := time.Minute + for !quit { select { case <-saveTicker.C: - // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) - case <-ctx.Done(): - // persist in-memory state immediately before exiting due to context cancellation. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) - return - + quit = true case <-saverQuitCh: - // persist in-memory state immediately to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) - return + quit = true + } + + if quit { + // save immediately if we are quitting + after = 0 } + + ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context + batchLogIf(ctx, ri.updateAfter(ctx, api, after, job)) + cancel() } }() @@ -583,69 +625,115 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo matchedFilter BatchJobExpireFilter versionsCount int toDel []expireObjInfo + failed bool + done bool ) - for result := range results { - // Apply filter to find the matching rule to apply expiry - // actions accordingly. - // nolint:gocritic - if result.IsLatest { - // send down filtered entries to be deleted using - // DeleteObjects method - if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously. - xfer := make([]expireObjInfo, len(toDel)) - copy(xfer, toDel) - - var done bool - select { - case <-ctx.Done(): - done = true - case expireCh <- xfer: - toDel = toDel[:0] // resetting toDel - } - if done { - break - } + deleteMarkerCountMap := map[string]int64{} + pushToExpire := func() { + // set preObject deleteMarkerCount + if len(toDel) > 0 { + lastDelIndex := len(toDel) - 1 + lastDel := toDel[lastDelIndex] + if lastDel.ExpireAll { + toDel[lastDelIndex].DeleteMarkerCount = deleteMarkerCountMap[lastDel.Name] + // delete the key + delete(deleteMarkerCountMap, lastDel.Name) } - var match BatchJobExpireFilter - var found bool - for _, rule := range r.Rules { - if rule.Matches(result, now) { - match = rule - found = true - break - } + } + // send down filtered entries to be deleted using + // DeleteObjects method + if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously. + xfer := make([]expireObjInfo, len(toDel)) + copy(xfer, toDel) + select { + case expireCh <- xfer: + toDel = toDel[:0] // resetting toDel + case <-ctx.Done(): + done = true + } + } + } + for { + select { + case result, ok := <-results: + if !ok { + done = true + break } - if !found { + if result.Err != nil { + failed = true + batchLogIf(ctx, result.Err) continue } + if result.Item.DeleteMarker { + deleteMarkerCountMap[result.Item.Name]++ + } + // Apply filter to find the matching rule to apply expiry + // actions accordingly. + // nolint:gocritic + if result.Item.IsLatest { + var match BatchJobExpireFilter + var found bool + for _, rule := range r.Rules { + if rule.Matches(result.Item, now) { + match = rule + found = true + break + } + } + if !found { + continue + } + + if prevObj.Name != result.Item.Name { + // switch the object + pushToExpire() + } - prevObj = result - matchedFilter = match - versionsCount = 1 - // Include the latest version - if matchedFilter.Purge.RetainVersions == 0 { - toDel = append(toDel, expireObjInfo{ - ObjectInfo: result, - ExpireAll: true, - }) + prevObj = result.Item + matchedFilter = match + versionsCount = 1 + // Include the latest version + if matchedFilter.Purge.RetainVersions == 0 { + toDel = append(toDel, expireObjInfo{ + ObjectInfo: result.Item, + ExpireAll: true, + }) + continue + } + } else if prevObj.Name == result.Item.Name { + if matchedFilter.Purge.RetainVersions == 0 { + continue // including latest version in toDel suffices, skipping other versions + } + versionsCount++ + } else { + // switch the object + pushToExpire() + // a file switched with no LatestVersion, logging it + batchLogIf(ctx, fmt.Errorf("skipping object %s, no latest version found", result.Item.Name)) continue } - } else if prevObj.Name == result.Name { - if matchedFilter.Purge.RetainVersions == 0 { - continue // including latest version in toDel suffices, skipping other versions + + if versionsCount <= matchedFilter.Purge.RetainVersions { + continue // retain versions } - versionsCount++ - } else { - continue + toDel = append(toDel, expireObjInfo{ + ObjectInfo: result.Item, + }) + pushToExpire() + case <-ctx.Done(): + done = true } - - if versionsCount <= matchedFilter.Purge.RetainVersions { - continue // retain versions + if done { + break } - toDel = append(toDel, expireObjInfo{ - ObjectInfo: result, - }) } + + if context.Cause(ctx) != nil { + xioutil.SafeClose(expireCh) + return context.Cause(ctx) + } + pushToExpire() // Send any remaining objects downstream if len(toDel) > 0 { select { @@ -658,8 +746,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo <-expireDoneCh // waits for the expire goroutine to complete wk.Wait() // waits for all expire workers to retire - ri.Complete = ri.ObjectsFailed == 0 - ri.Failed = ri.ObjectsFailed > 0 + ri.Complete = !failed && ri.ObjectsFailed == 0 + ri.Failed = failed || ri.ObjectsFailed > 0 globalBatchJobsMetrics.save(job.ID, ri) // Close the saverQuitCh - this also triggers saving in-memory state @@ -669,7 +757,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo // Notify expire jobs final status to the configured endpoint buf, _ := json.Marshal(ri) if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil { - logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err)) + batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err)) } return nil diff --git a/cmd/batch-expire_gen.go b/cmd/batch-expire_gen.go index 12ce733a3c8f7..bb2021fca87ca 100644 --- a/cmd/batch-expire_gen.go +++ b/cmd/batch-expire_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "time" @@ -39,7 +39,7 @@ func (z *BatchJobExpire) DecodeMsg(dc *msgp.Reader) (err error) { return } case "Prefix": - z.Prefix, err = dc.ReadString() + err = z.Prefix.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -114,7 +114,7 @@ func (z *BatchJobExpire) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteString(z.Prefix) + err = z.Prefix.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -171,7 +171,11 @@ func (z *BatchJobExpire) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendString(o, z.Bucket) // string "Prefix" o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - o = msgp.AppendString(o, z.Prefix) + o, err = z.Prefix.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } // string "NotificationCfg" o = append(o, 0xaf, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x66, 0x67) o, err = z.NotificationCfg.MarshalMsg(o) @@ -230,7 +234,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) { return } case "Prefix": - z.Prefix, bts, err = msgp.ReadStringBytes(bts) + bts, err = z.Prefix.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -280,7 +284,7 @@ func (z *BatchJobExpire) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BatchJobExpire) Msgsize() (s int) { - s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize + s = 1 + 11 + msgp.StringPrefixSize + len(z.APIVersion) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 16 + z.NotificationCfg.Msgsize() + 6 + z.Retry.Msgsize() + 6 + msgp.ArrayHeaderSize for za0001 := range z.Rules { s += z.Rules[za0001].Msgsize() } @@ -306,7 +310,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "OlderThan": - z.OlderThan, err = dc.ReadDuration() + err = z.OlderThan.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -433,7 +437,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteDuration(z.OlderThan) + err = z.OlderThan.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -544,7 +548,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) { // map header, size 8 // string "OlderThan" o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) - o = msgp.AppendDuration(o, z.OlderThan) + o, err = z.OlderThan.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "OlderThan") + return + } // string "CreatedBefore" o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65) if z.CreatedBefore == nil { @@ -613,7 +621,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "OlderThan": - z.OlderThan, bts, err = msgp.ReadDurationBytes(bts) + bts, err = z.OlderThan.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -734,7 +742,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BatchJobExpireFilter) Msgsize() (s int) { - s = 1 + 10 + msgp.DurationSize + 14 + s = 1 + 10 + z.OlderThan.Msgsize() + 14 if z.CreatedBefore == nil { s += msgp.NilSize } else { diff --git a/cmd/batch-expire_gen_test.go b/cmd/batch-expire_gen_test.go index ed5eab6cadfd2..d30cc33738a81 100644 --- a/cmd/batch-expire_gen_test.go +++ b/cmd/batch-expire_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/batch-expire_test.go b/cmd/batch-expire_test.go index 65eb73d601d33..18f7150ddfce1 100644 --- a/cmd/batch-expire_test.go +++ b/cmd/batch-expire_test.go @@ -18,9 +18,10 @@ package cmd import ( + "slices" "testing" - "gopkg.in/yaml.v2" + "gopkg.in/yaml.v3" ) func TestParseBatchJobExpire(t *testing.T) { @@ -32,7 +33,7 @@ expire: # Expire objects that match a condition rules: - type: object # regular objects with zero or more older versions name: NAME # match object names that satisfy the wildcard expression. - olderThan: 70h # match objects older than this value + olderThan: 7d10h # match objects older than this value createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date" tags: - key: name @@ -64,8 +65,61 @@ expire: # Expire objects that match a condition delay: 500ms # least amount of delay between each retry ` var job BatchJobRequest - err := yaml.UnmarshalStrict([]byte(expireYaml), &job) + err := yaml.Unmarshal([]byte(expireYaml), &job) if err != nil { t.Fatal("Failed to parse batch-job-expire yaml", err) } + if !slices.Equal(job.Expire.Prefix.F(), []string{"myprefix"}) { + t.Fatal("Failed to parse batch-job-expire yaml") + } + + multiPrefixExpireYaml := ` +expire: # Expire objects that match a condition + apiVersion: v1 + bucket: mybucket # Bucket where this batch job will expire matching objects from + prefix: # (Optional) Prefix under which this job will expire objects matching the rules below. + - myprefix + - myprefix1 + rules: + - type: object # regular objects with zero or more older versions + name: NAME # match object names that satisfy the wildcard expression. + olderThan: 7d10h # match objects older than this value + createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date" + tags: + - key: name + value: pick* # match objects with tag 'name', all values starting with 'pick' + metadata: + - key: content-type + value: image/* # match objects with 'content-type', all values starting with 'image/' + size: + lessThan: "10MiB" # match objects with size less than this value (e.g. 10MiB) + greaterThan: 1MiB # match objects with size greater than this value (e.g. 1MiB) + purge: + # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest. + # retainVersions: 5 # keep the latest 5 versions of the object. + + - type: deleted # objects with delete marker as their latest version + name: NAME # match object names that satisfy the wildcard expression. + olderThan: 10h # match objects older than this value (e.g. 7d10h31s) + createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date" + purge: + # retainVersions: 0 # (default) delete all versions of the object. This option is the fastest. + # retainVersions: 5 # keep the latest 5 versions of the object including delete markers. + + notify: + endpoint: https://notify.endpoint # notification endpoint to receive job completion status + token: Bearer xxxxx # optional authentication token for the notification endpoint + + retry: + attempts: 10 # number of retries for the job before giving up + delay: 500ms # least amount of delay between each retry +` + var multiPrefixJob BatchJobRequest + err = yaml.Unmarshal([]byte(multiPrefixExpireYaml), &multiPrefixJob) + if err != nil { + t.Fatal("Failed to parse batch-job-expire yaml", err) + } + if !slices.Equal(multiPrefixJob.Expire.Prefix.F(), []string{"myprefix", "myprefix1"}) { + t.Fatal("Failed to parse batch-job-expire yaml") + } } diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index 4a932b8e3f0b9..484882e969f99 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -25,9 +25,11 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/url" + "path/filepath" "runtime" "strconv" "strings" @@ -38,7 +40,6 @@ import ( "github.com/lithammer/shortuuid/v4" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" - miniogo "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/tags" @@ -46,18 +47,23 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/workers" "gopkg.in/yaml.v3" ) var globalBatchConfig batch.Config +const ( + // Keep the completed/failed job stats 3 days before removing it + oldJobsExpiration = 3 * 24 * time.Hour + + redactedText = "**REDACTED**" +) + // BatchJobRequest this is an internal data structure not for external consumption. type BatchJobRequest struct { ID string `yaml:"-" json:"name"` @@ -69,6 +75,29 @@ type BatchJobRequest struct { ctx context.Context `msg:"-"` } +// RedactSensitive will redact any sensitive information in b. +func (j *BatchJobRequest) RedactSensitive() { + j.Replicate.RedactSensitive() + j.Expire.RedactSensitive() + j.KeyRotate.RedactSensitive() +} + +// RedactSensitive will redact any sensitive information in b. +func (r *BatchJobReplicateV1) RedactSensitive() { + if r == nil { + return + } + if r.Target.Creds.SecretKey != "" { + r.Target.Creds.SecretKey = redactedText + } + if r.Target.Creds.SessionToken != "" { + r.Target.Creds.SessionToken = redactedText + } +} + +// RedactSensitive will redact any sensitive information in b. +func (r *BatchJobKeyRotateV1) RedactSensitive() {} + func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token string) error { if endpoint == "" { return nil @@ -92,7 +121,7 @@ func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token strin } req.Header.Set("Content-Type", "application/json") - clnt := http.Client{Transport: getRemoteInstanceTransport} + clnt := http.Client{Transport: getRemoteInstanceTransport()} resp, err := clnt.Do(req) if err != nil { return err @@ -112,7 +141,7 @@ func (r BatchJobReplicateV1) Notify(ctx context.Context, ri *batchJobInfo) error } // ReplicateFromSource - this is not implemented yet where source is 'remote' and target is local. -func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error { +func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api ObjectLayer, core *minio.Core, srcObjInfo ObjectInfo, retry bool) error { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket srcObject := srcObjInfo.Name @@ -159,7 +188,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec } return r.copyWithMultipartfromSource(ctx, api, core, srcObjInfo, opts, partsCount) } - gopts := miniogo.GetObjectOptions{ + gopts := minio.GetObjectOptions{ VersionID: srcObjInfo.VersionID, } if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil { @@ -180,7 +209,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec return err } -func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) { +func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, opts ObjectOptions, partsCount int) (err error) { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket srcObject := srcObjInfo.Name @@ -206,7 +235,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a if aerr == nil { return } - logger.LogIf(ctx, + batchLogIf(ctx, fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr)) attempts++ @@ -220,8 +249,8 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a pInfo PartInfo ) - for i := 0; i < partsCount; i++ { - gopts := miniogo.GetObjectOptions{ + for i := range partsCount { + gopts := minio.GetObjectOptions{ VersionID: srcObjInfo.VersionID, PartNumber: i + 1, } @@ -263,7 +292,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay JobType: string(job.Type()), StartTime: job.Started, } - if err := ri.load(ctx, api, job); err != nil { + if err := ri.loadOrInit(ctx, api, job); err != nil { return err } if ri.Complete { @@ -271,38 +300,42 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } globalBatchJobsMetrics.save(job.ID, ri) + retryAttempts := job.Replicate.Flags.Retry.Attempts + if retryAttempts <= 0 { + retryAttempts = batchReplJobDefaultRetries + } delay := job.Replicate.Flags.Retry.Delay - if delay == 0 { + if delay <= 0 { delay = batchReplJobDefaultRetryDelay } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - isTags := len(r.Flags.Filter.Tags) != 0 + hasTags := len(r.Flags.Filter.Tags) != 0 isMetadata := len(r.Flags.Filter.Metadata) != 0 isStorageClassOnly := len(r.Flags.Filter.Metadata) == 1 && strings.EqualFold(r.Flags.Filter.Metadata[0].Key, xhttp.AmzStorageClass) skip := func(oi ObjectInfo) (ok bool) { - if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan { + if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan.D() { // skip all objects that are newer than specified older duration return true } - if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan { + if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan.D() { // skip all objects that are older than specified newer duration return true } - if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(oi.ModTime) { + if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(oi.ModTime) { // skip all objects that are created before the specified time. return true } - if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(oi.ModTime) { + if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(oi.ModTime) { // skip all objects that are created after the specified time. return true } - if isTags { + if hasTags { // Only parse object tags if tags filter is specified. tagMap := map[string]string{} tagStr := oi.UserTags @@ -348,10 +381,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay cred := r.Source.Creds - c, err := miniogo.New(u.Host, &miniogo.Options{ + c, err := minio.New(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", - Transport: getRemoteInstanceTransport, + Transport: getRemoteInstanceTransport(), BucketLookup: lookupStyle(r.Source.Path), }) if err != nil { @@ -359,7 +392,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID) - core := &miniogo.Core{Client: c} + core := &minio.Core{Client: c} workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) if err != nil { @@ -372,7 +405,6 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay return err } - retryAttempts := ri.RetryAttempts retry := false for attempts := 1; attempts <= retryAttempts; attempts++ { attempts := attempts @@ -380,12 +412,27 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 minioSrc := r.Source.Type == BatchJobReplicateResourceMinIO ctx, cancel := context.WithCancel(ctx) - objInfoCh := c.ListObjects(ctx, r.Source.Bucket, miniogo.ListObjectsOptions{ - Prefix: r.Source.Prefix, - WithVersions: minioSrc, - Recursive: true, - WithMetadata: true, - }) + + objInfoCh := make(chan minio.ObjectInfo, 1) + go func() { + prefixes := r.Source.Prefix.F() + if len(prefixes) == 0 { + prefixes = []string{""} + } + for _, prefix := range prefixes { + prefixObjInfoCh := c.ListObjects(ctx, r.Source.Bucket, minio.ListObjectsOptions{ + Prefix: prefix, + WithVersions: minioSrc, + Recursive: true, + WithMetadata: true, + }) + for obj := range prefixObjInfoCh { + objInfoCh <- obj + } + } + xioutil.SafeClose(objInfoCh) + }() + prevObj := "" skipReplicate := false @@ -396,25 +443,25 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay // all user metadata or just storageClass. If its only storageClass // List() already returns relevant information for filter to be applied. if isMetadata && !isStorageClassOnly { - oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, miniogo.StatObjectOptions{}) + oi2, err := c.StatObject(ctx, r.Source.Bucket, obj.Key, minio.StatObjectOptions{}) if err == nil { oi = toObjectInfo(r.Source.Bucket, obj.Key, oi2) } else { if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } } - if isTags { + if hasTags { tags, err := c.GetObjectTagging(ctx, r.Source.Bucket, obj.Key, minio.GetObjectTaggingOptions{}) if err == nil { oi.UserTags = tags.String() } else { if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) && !isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } @@ -443,15 +490,15 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay return } stopFn(oi, err) - logger.LogIf(ctx, err) + batchLogIf(ctx, err) success = false } else { stopFn(oi, nil) } - ri.trackCurrentBucketObject(r.Target.Bucket, oi, success) + ri.trackCurrentBucketObject(r.Target.Bucket, oi, success, attempts) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if wait := globalBatchConfig.ReplicationWait(); wait > 0 { time.Sleep(wait) @@ -466,10 +513,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() @@ -492,7 +539,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay } // toObjectInfo converts minio.ObjectInfo to ObjectInfo -func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo { +func toObjectInfo(bucket, object string, objInfo minio.ObjectInfo) ObjectInfo { tags, _ := tags.MapToObjectTags(objInfo.UserTags) oi := ObjectInfo{ Bucket: bucket, @@ -528,14 +575,12 @@ func toObjectInfo(bucket, object string, objInfo miniogo.ObjectInfo) ObjectInfo oi.UserDefined[xhttp.AmzStorageClass] = objInfo.StorageClass } - for k, v := range objInfo.UserMetadata { - oi.UserDefined[k] = v - } + maps.Copy(oi.UserDefined, objInfo.UserMetadata) return oi } -func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo) error { +func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLayer, remoteClnt *minio.Client, entries []ObjectInfo, prefix string) error { input := make(chan minio.SnowballObject, 1) opts := minio.SnowballOptions{ Opts: minio.PutObjectOptions{}, @@ -553,10 +598,14 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa VersionID: entry.VersionID, }) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } + if prefix != "" { + entry.Name = pathJoin(prefix, entry.Name) + } + snowballObj := minio.SnowballObject{ // Create path to store objects within the bucket. Key: entry.Name, @@ -570,12 +619,12 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa }, } - opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo) + opts, _, err := batchReplicationOpts(ctx, "", gr.ObjInfo) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } - + // TODO: I am not sure we read it back, but we aren't sending whether checksums are single/multipart. for k, vals := range opts.Header() { for _, v := range vals { snowballObj.Headers.Add(k, v) @@ -591,7 +640,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa } // ReplicateToTarget read from source and replicate to configured target -func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *miniogo.Core, srcObjInfo ObjectInfo, retry bool) error { +func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectLayer, c *minio.Core, srcObjInfo ObjectInfo, retry bool) error { srcBucket := r.Source.Bucket tgtBucket := r.Target.Bucket tgtPrefix := r.Target.Prefix @@ -600,9 +649,9 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL if srcObjInfo.DeleteMarker || !srcObjInfo.VersionPurgeStatus.Empty() { if retry && !s3Type { - if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.StatObjectOptions{ + if _, err := c.StatObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.StatObjectOptions{ VersionID: srcObjInfo.VersionID, - Internal: miniogo.AdvancedGetOptions{ + Internal: minio.AdvancedGetOptions{ ReplicationProxyRequest: "false", }, }); isErrMethodNotAllowed(ErrorRespToObjectError(err, tgtBucket, pathJoin(tgtPrefix, srcObject))) { @@ -619,19 +668,19 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL dmVersionID = "" versionID = "" } - return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), miniogo.RemoveObjectOptions{ + return c.RemoveObject(ctx, tgtBucket, pathJoin(tgtPrefix, srcObject), minio.RemoveObjectOptions{ VersionID: versionID, - Internal: miniogo.AdvancedRemoveOptions{ + Internal: minio.AdvancedRemoveOptions{ ReplicationDeleteMarker: dmVersionID != "", ReplicationMTime: srcObjInfo.ModTime, - ReplicationStatus: miniogo.ReplicationStatusReplica, + ReplicationStatus: minio.ReplicationStatusReplica, ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside }, }) } if retry && !s3Type { // when we are retrying avoid copying if necessary. - gopts := miniogo.GetObjectOptions{} + gopts := minio.GetObjectOptions{} if err := gopts.SetMatchETag(srcObjInfo.ETag); err != nil { return err } @@ -660,14 +709,14 @@ func (r *BatchJobReplicateV1) ReplicateToTarget(ctx context.Context, api ObjectL return err } - putOpts, err := batchReplicationOpts(ctx, "", objInfo) + putOpts, isMP, err := batchReplicationOpts(ctx, "", objInfo) if err != nil { return err } if r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 { - putOpts.Internal = miniogo.AdvancedPutOptions{} + putOpts.Internal = minio.AdvancedPutOptions{} } - if objInfo.isMultipart() { + if isMP { if err := replicateObjectWithMultipart(ctx, c, tgtBucket, pathJoin(tgtPrefix, objInfo.Name), rd, objInfo, putOpts); err != nil { return err } @@ -691,6 +740,7 @@ type batchJobInfo struct { StartTime time.Time `json:"startTime" msg:"st"` LastUpdate time.Time `json:"lastUpdate" msg:"lu"` RetryAttempts int `json:"retryAttempts" msg:"ra"` + Attempts int `json:"attempts" msg:"at"` Complete bool `json:"complete" msg:"cmp"` Failed bool `json:"failed" msg:"fld"` @@ -719,63 +769,73 @@ const ( batchReplJobAPIVersion = "v1" batchReplJobDefaultRetries = 3 - batchReplJobDefaultRetryDelay = 250 * time.Millisecond + batchReplJobDefaultRetryDelay = time.Second ) -func getJobReportPath(job BatchJobRequest) string { +func getJobPath(job BatchJobRequest) string { + return pathJoin(batchJobPrefix, job.ID) +} + +func (ri *batchJobInfo) getJobReportPath() (string, error) { var fileName string - switch { - case job.Replicate != nil: + switch madmin.BatchJobType(ri.JobType) { + case madmin.BatchJobReplicate: fileName = batchReplName - case job.KeyRotate != nil: + case madmin.BatchJobKeyRotate: fileName = batchKeyRotationName - case job.Expire != nil: + case madmin.BatchJobExpire: fileName = batchExpireName + default: + return "", fmt.Errorf("unknown job type: %v", ri.JobType) } - return pathJoin(batchJobReportsPrefix, job.ID, fileName) + return pathJoin(batchJobReportsPrefix, ri.JobID, fileName), nil } -func getJobPath(job BatchJobRequest) string { - return pathJoin(batchJobPrefix, job.ID) +func (ri *batchJobInfo) loadOrInit(ctx context.Context, api ObjectLayer, job BatchJobRequest) error { + err := ri.load(ctx, api, job) + if errors.Is(err, errNoSuchJob) { + switch { + case job.Replicate != nil: + ri.Version = batchReplVersionV1 + case job.KeyRotate != nil: + ri.Version = batchKeyRotateVersionV1 + case job.Expire != nil: + ri.Version = batchExpireVersionV1 + } + return nil + } + return err } func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error { + path, err := job.getJobReportPath() + if err != nil { + batchLogIf(ctx, err) + return err + } + return ri.loadByPath(ctx, api, path) +} + +func (ri *batchJobInfo) loadByPath(ctx context.Context, api ObjectLayer, path string) error { var format, version uint16 - switch { - case job.Replicate != nil: + switch filepath.Base(path) { + case batchReplName: version = batchReplVersionV1 format = batchReplFormat - case job.KeyRotate != nil: + case batchKeyRotationName: version = batchKeyRotateVersionV1 format = batchKeyRotationFormat - case job.Expire != nil: + case batchExpireName: version = batchExpireVersionV1 format = batchExpireFormat default: return errors.New("no supported batch job request specified") } - data, err := readConfig(ctx, api, getJobReportPath(job)) + + data, err := readConfig(ctx, api, path) if err != nil { if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) { - ri.Version = int(version) - switch { - case job.Replicate != nil: - ri.RetryAttempts = batchReplJobDefaultRetries - if job.Replicate.Flags.Retry.Attempts > 0 { - ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts - } - case job.KeyRotate != nil: - ri.RetryAttempts = batchKeyRotateJobDefaultRetries - if job.KeyRotate.Flags.Retry.Attempts > 0 { - ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts - } - case job.Expire != nil: - ri.RetryAttempts = batchExpireJobDefaultRetries - if job.Expire.Retry.Attempts > 0 { - ri.RetryAttempts = job.Expire.Retry.Attempts - } - } - return nil + return errNoSuchJob } return err } @@ -820,27 +880,31 @@ func (ri *batchJobInfo) clone() *batchJobInfo { defer ri.mu.RUnlock() return &batchJobInfo{ - Version: ri.Version, - JobID: ri.JobID, - JobType: ri.JobType, - RetryAttempts: ri.RetryAttempts, - Complete: ri.Complete, - Failed: ri.Failed, - StartTime: ri.StartTime, - LastUpdate: ri.LastUpdate, - Bucket: ri.Bucket, - Object: ri.Object, - Objects: ri.Objects, - ObjectsFailed: ri.ObjectsFailed, - BytesTransferred: ri.BytesTransferred, - BytesFailed: ri.BytesFailed, - } -} - -func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) { + Version: ri.Version, + JobID: ri.JobID, + JobType: ri.JobType, + RetryAttempts: ri.RetryAttempts, + Complete: ri.Complete, + Failed: ri.Failed, + StartTime: ri.StartTime, + LastUpdate: ri.LastUpdate, + Bucket: ri.Bucket, + Object: ri.Object, + Objects: ri.Objects, + ObjectsFailed: ri.ObjectsFailed, + DeleteMarkers: ri.DeleteMarkers, + DeleteMarkersFailed: ri.DeleteMarkersFailed, + BytesTransferred: ri.BytesTransferred, + BytesFailed: ri.BytesFailed, + Attempts: ri.Attempts, + } +} + +func (ri *batchJobInfo) countItem(size int64, dmarker, success bool, attempt int) { if ri == nil { return } + ri.Attempts++ if success { if dmarker { ri.DeleteMarkers++ @@ -848,7 +912,19 @@ func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) { ri.Objects++ ri.BytesTransferred += size } + if attempt > 1 { + if dmarker { + ri.DeleteMarkersFailed-- + } else { + ri.ObjectsFailed-- + ri.BytesFailed += size + } + } } else { + if attempt > 1 { + // Only count first attempt + return + } if dmarker { ri.DeleteMarkersFailed++ } else { @@ -905,7 +981,12 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati if err != nil { return err } - return saveConfig(ctx, api, getJobReportPath(job), buf) + path, err := ri.getJobReportPath() + if err != nil { + batchLogIf(ctx, err) + return err + } + return saveConfig(ctx, api, path, buf) } ri.mu.Unlock() return nil @@ -914,15 +995,26 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati // Note: to be used only with batch jobs that affect multiple versions through // a single action. e.g batch-expire has an option to expire all versions of an // object which matches the given filters. -func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectInfo, success bool) { +func (ri *batchJobInfo) trackMultipleObjectVersions(info expireObjInfo, success bool) { + if ri == nil { + return + } + + ri.mu.Lock() + defer ri.mu.Unlock() + if success { - ri.Objects += int64(info.NumVersions) + ri.Bucket = info.Bucket + ri.Object = info.Name + ri.Objects += int64(info.NumVersions) - info.DeleteMarkerCount + ri.DeleteMarkers += info.DeleteMarkerCount } else { - ri.ObjectsFailed += int64(info.NumVersions) + ri.ObjectsFailed += int64(info.NumVersions) - info.DeleteMarkerCount + ri.DeleteMarkersFailed += info.DeleteMarkerCount } } -func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) { +func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool, attempt int) { if ri == nil { return } @@ -930,9 +1022,11 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, ri.mu.Lock() defer ri.mu.Unlock() - ri.Bucket = bucket - ri.Object = info.Name - ri.countItem(info.Size, info.DeleteMarker, success) + if success { + ri.Bucket = bucket + ri.Object = info.Name + } + ri.countItem(info.Size, info.DeleteMarker, success, attempt) } func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) { @@ -946,7 +1040,7 @@ func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInf ri.Bucket = bucket for i := range batch { ri.Object = batch[i].Name - ri.countItem(batch[i].Size, batch[i].DeleteMarker, true) + ri.countItem(batch[i].Size, batch[i].DeleteMarker, true, 1) } } @@ -957,7 +1051,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba JobType: string(job.Type()), StartTime: job.Started, } - if err := ri.load(ctx, api, job); err != nil { + if err := ri.loadOrInit(ctx, api, job); err != nil { return err } if ri.Complete { @@ -966,29 +1060,34 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba globalBatchJobsMetrics.save(job.ID, ri) lastObject := ri.Object + retryAttempts := job.Replicate.Flags.Retry.Attempts + if retryAttempts <= 0 { + retryAttempts = batchReplJobDefaultRetries + } delay := job.Replicate.Flags.Retry.Delay - if delay == 0 { + if delay <= 0 { delay = batchReplJobDefaultRetryDelay } + rnd := rand.New(rand.NewSource(time.Now().UnixNano())) selectObj := func(info FileInfo) (ok bool) { - if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan { + if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan.D() { // skip all objects that are newer than specified older duration return false } - if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan { + if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan.D() { // skip all objects that are older than specified newer duration return false } - if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.Before(info.ModTime) { + if !r.Flags.Filter.CreatedAfter.IsZero() && r.Flags.Filter.CreatedAfter.After(info.ModTime) { // skip all objects that are created before the specified time. return false } - if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.After(info.ModTime) { + if !r.Flags.Filter.CreatedBefore.IsZero() && r.Flags.Filter.CreatedBefore.Before(info.ModTime) { // skip all objects that are created after the specified time. return false } @@ -1035,7 +1134,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba } // if one of source or target is non MinIO, just replicate the top most version like `mc mirror` - return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest) + isSourceOrTargetS3 := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 + return !isSourceOrTargetS3 || info.IsLatest } u, err := url.Parse(r.Target.Endpoint) @@ -1045,10 +1145,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba cred := r.Target.Creds - c, err := miniogo.NewCore(u.Host, &miniogo.Options{ + c, err := minio.NewCore(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", - Transport: getRemoteInstanceTransport, + Transport: getRemoteInstanceTransport(), BucketLookup: lookupStyle(r.Target.Path), }) if err != nil { @@ -1057,115 +1157,120 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID) - var ( - walkCh = make(chan ObjectInfo, 100) - slowCh = make(chan ObjectInfo, 100) - ) - - if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() { - go func() { - defer xioutil.SafeClose(slowCh) - - // Snowball currently needs the high level minio-go Client, not the Core one - cl, err := miniogo.New(u.Host, &miniogo.Options{ - Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), - Secure: u.Scheme == "https", - Transport: getRemoteInstanceTransport, - BucketLookup: lookupStyle(r.Target.Path), - }) - if err != nil { - logger.LogIf(ctx, err) - return - } - - // Already validated before arriving here - smallerThan, _ := humanize.ParseBytes(*r.Source.Snowball.SmallerThan) - - var ( - obj = ObjectInfo{} - batch = make([]ObjectInfo, 0, *r.Source.Snowball.Batch) - valid = true - ) - - for valid { - obj, valid = <-walkCh + retry := false + for attempts := 1; attempts <= retryAttempts; attempts++ { + attempts := attempts + var ( + walkCh = make(chan itemOrErr[ObjectInfo], 100) + slowCh = make(chan itemOrErr[ObjectInfo], 100) + ) - if !valid { - goto write + if r.Source.Snowball.Disable != nil && !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() { + go func() { + // Snowball currently needs the high level minio-go Client, not the Core one + cl, err := minio.New(u.Host, &minio.Options{ + Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), + Secure: u.Scheme == "https", + Transport: getRemoteInstanceTransport(), + BucketLookup: lookupStyle(r.Target.Path), + }) + if err != nil { + batchLogOnceIf(ctx, err, job.ID+"minio.New") + return } - if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) { - slowCh <- obj - continue + // Already validated before arriving here + smallerThan, _ := humanize.ParseBytes(*r.Source.Snowball.SmallerThan) + + batch := make([]ObjectInfo, 0, *r.Source.Snowball.Batch) + writeFn := func(batch []ObjectInfo) { + if len(batch) > 0 { + if err := r.writeAsArchive(ctx, api, cl, batch, r.Target.Prefix); err != nil { + batchLogOnceIf(ctx, err, job.ID+"writeAsArchive") + for _, b := range batch { + slowCh <- itemOrErr[ObjectInfo]{Item: b} + } + } else { + ri.trackCurrentBucketBatch(r.Source.Bucket, batch) + globalBatchJobsMetrics.save(job.ID, ri) + // persist in-memory state to disk after every 10secs. + batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter") + } + } } + for obj := range walkCh { + if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) { + slowCh <- obj + continue + } - batch = append(batch, obj) + batch = append(batch, obj.Item) - if len(batch) < *r.Source.Snowball.Batch { - continue - } - - write: - if len(batch) > 0 { - if err := r.writeAsArchive(ctx, api, cl, batch); err != nil { - logger.LogIf(ctx, err) - for _, b := range batch { - slowCh <- b - } - } else { - ri.trackCurrentBucketBatch(r.Source.Bucket, batch) - globalBatchJobsMetrics.save(job.ID, ri) - // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + if len(batch) < *r.Source.Snowball.Batch { + continue } + writeFn(batch) batch = batch[:0] } - } - }() - } else { - slowCh = walkCh - } - - workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) - if err != nil { - return err - } - - wk, err := workers.New(workerSize) - if err != nil { - // invalid worker size. - return err - } + writeFn(batch) + xioutil.SafeClose(slowCh) + }() + } else { + slowCh = walkCh + } - walkQuorum := env.Get("_MINIO_BATCH_REPLICATION_WALK_QUORUM", "strict") - if walkQuorum == "" { - walkQuorum = "strict" - } + workerSize, err := strconv.Atoi(env.Get("_MINIO_BATCH_REPLICATION_WORKERS", strconv.Itoa(runtime.GOMAXPROCS(0)/2))) + if err != nil { + return err + } - retryAttempts := ri.RetryAttempts - retry := false - for attempts := 1; attempts <= retryAttempts; attempts++ { - attempts := attempts + wk, err := workers.New(workerSize) + if err != nil { + // invalid worker size. + return err + } - ctx, cancel := context.WithCancel(ctx) + walkQuorum := env.Get("_MINIO_BATCH_REPLICATION_WALK_QUORUM", "strict") + if walkQuorum == "" { + walkQuorum = "strict" + } + ctx, cancelCause := context.WithCancelCause(ctx) // one of source/target is s3, skip delete marker and all versions under the same object name. s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3 - if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{ - Marker: lastObject, - Filter: selectObj, - AskDisks: walkQuorum, - }); err != nil { - cancel() - // Do not need to retry if we can't list objects on source. - return err - } + go func() { + prefixes := r.Source.Prefix.F() + if len(prefixes) == 0 { + prefixes = []string{""} + } + for _, prefix := range prefixes { + prefixWalkCh := make(chan itemOrErr[ObjectInfo], 100) + if err := api.Walk(ctx, r.Source.Bucket, prefix, prefixWalkCh, WalkOptions{ + Marker: lastObject, + Filter: selectObj, + AskDisks: walkQuorum, + }); err != nil { + cancelCause(err) + xioutil.SafeClose(walkCh) + return + } + for obj := range prefixWalkCh { + walkCh <- obj + } + } + xioutil.SafeClose(walkCh) + }() prevObj := "" skipReplicate := false - for result := range slowCh { - result := result + for res := range slowCh { + if res.Err != nil { + ri.Failed = true + batchLogOnceIf(ctx, res.Err, job.ID+"res.Err") + continue + } + result := res.Item if result.Name != prevObj { prevObj = result.Name skipReplicate = result.DeleteMarker && s3Type @@ -1180,7 +1285,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba stopFn := globalBatchJobsMetrics.trace(batchJobMetricReplication, job.ID, attempts) success := true if err := r.ReplicateToTarget(ctx, api, c, result, retry); err != nil { - if miniogo.ToErrorResponse(err).Code == "PreconditionFailed" { + if minio.ToErrorResponse(err).Code == "PreconditionFailed" { // pre-condition failed means we already have the object copied over. return } @@ -1189,15 +1294,15 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba return } stopFn(result, err) - logger.LogIf(ctx, err) + batchLogOnceIf(ctx, err, job.ID+"ReplicateToTarget") success = false } else { stopFn(result, nil) } - ri.trackCurrentBucketObject(r.Source.Bucket, result, success) + ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job), job.ID+"updateAfter2") if wait := globalBatchConfig.ReplicationWait(); wait > 0 { time.Sleep(wait) @@ -1205,20 +1310,23 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba }() } wk.Wait() - + // Do not need to retry if we can't list objects on source. + if context.Cause(ctx) != nil { + return context.Cause(ctx) + } ri.RetryAttempts = attempts ri.Complete = ri.ObjectsFailed == 0 ri.Failed = ri.ObjectsFailed > 0 globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogOnceIf(ctx, ri.updateAfter(ctx, api, 0, job), job.ID+"updateAfter3") if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogOnceIf(ctx, fmt.Errorf("unable to notify %v", err), job.ID+"notify") } - cancel() + cancelCause(nil) if ri.Failed { ri.ObjectsFailed = 0 ri.Bucket = "" @@ -1242,6 +1350,7 @@ type batchReplicationJobError struct { Code string Description string HTTPStatusCode int + ObjectSize int64 } func (e batchReplicationJobError) Error() string { @@ -1258,9 +1367,18 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, return errInvalidArgument } - if r.Source.Bucket == "" { + if r.Source.Endpoint != "" && r.Target.Endpoint != "" { return errInvalidArgument } + + if r.Source.Creds.Empty() && r.Target.Creds.Empty() { + return errInvalidArgument + } + + if r.Source.Bucket == "" || r.Target.Bucket == "" { + return errInvalidArgument + } + var isRemoteToLocal bool localBkt := r.Source.Bucket if r.Source.Endpoint != "" { @@ -1285,9 +1403,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, if err := r.Source.Snowball.Validate(); err != nil { return err } - if r.Source.Creds.Empty() && r.Target.Creds.Empty() { - return errInvalidArgument - } if !r.Source.Creds.Empty() { if err := r.Source.Creds.Validate(); err != nil { @@ -1309,9 +1424,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() { return errInvalidArgument } - if r.Target.Bucket == "" { - return errInvalidArgument - } if !r.Target.Creds.Empty() { if err := r.Target.Creds.Validate(); err != nil { @@ -1319,10 +1431,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, } } - if r.Source.Creds.Empty() && r.Target.Creds.Empty() { - return errInvalidArgument - } - if err := r.Target.Type.Validate(); err != nil { return err } @@ -1353,7 +1461,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, cred = r.Source.Creds remoteBkt = r.Source.Bucket pathStyle = r.Source.Path - } u, err := url.Parse(remoteEp) @@ -1361,10 +1468,10 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, return err } - c, err := miniogo.NewCore(u.Host, &miniogo.Options{ + c, err := minio.NewCore(u.Host, &minio.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: u.Scheme == "https", - Transport: getRemoteInstanceTransport, + Transport: getRemoteInstanceTransport(), BucketLookup: lookupStyle(pathStyle), }) if err != nil { @@ -1374,7 +1481,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest, vcfg, err := c.GetBucketVersioning(ctx, remoteBkt) if err != nil { - if miniogo.ToErrorResponse(err).Code == "NoSuchBucket" { + if minio.ToErrorResponse(err).Code == "NoSuchBucket" { return batchReplicationJobError{ Code: "NoSuchTargetBucket", Description: "The specified target bucket does not exist", @@ -1427,10 +1534,24 @@ func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error { } func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) { - deleteConfig(ctx, api, getJobReportPath(j)) deleteConfig(ctx, api, getJobPath(j)) } +func (j BatchJobRequest) getJobReportPath() (string, error) { + var fileName string + switch { + case j.Replicate != nil: + fileName = batchReplName + case j.KeyRotate != nil: + fileName = batchKeyRotationName + case j.Expire != nil: + fileName = batchExpireName + default: + return "", errors.New("unknown job type") + } + return pathJoin(batchJobReportsPrefix, j.ID, fileName), nil +} + func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error { if j.Replicate == nil && j.KeyRotate == nil && j.Expire == nil { return errInvalidArgument @@ -1465,19 +1586,19 @@ func (j *BatchJobRequest) load(ctx context.Context, api ObjectLayer, name string return err } -func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts miniogo.PutObjectOptions, err error) { +func batchReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) { // TODO: support custom storage class for remote replication - putOpts, err = putReplicationOpts(ctx, "", objInfo) + putOpts, isMP, err = putReplicationOpts(ctx, "", objInfo) if err != nil { - return putOpts, err + return putOpts, isMP, err } - putOpts.Internal = miniogo.AdvancedPutOptions{ + putOpts.Internal = minio.AdvancedPutOptions{ SourceVersionID: objInfo.VersionID, SourceMTime: objInfo.ModTime, SourceETag: objInfo.ETag, ReplicationRequest: true, } - return putOpts, nil + return putOpts, isMP, nil } // ListBatchJobs - lists all currently active batch jobs, optionally takes {jobType} @@ -1491,11 +1612,8 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) } jobType := r.Form.Get("jobType") - if jobType == "" { - jobType = string(madmin.BatchJobReplicate) - } - resultCh := make(chan ObjectInfo) + resultCh := make(chan itemOrErr[ObjectInfo]) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -1507,15 +1625,22 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) listResult := madmin.ListBatchJobsResult{} for result := range resultCh { + if result.Err != nil { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL) + return + } + if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) { + continue + } req := &BatchJobRequest{} - if err := req.load(ctx, objectAPI, result.Name); err != nil { + if err := req.load(ctx, objectAPI, result.Item.Name); err != nil { if !errors.Is(err, errNoSuchJob) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } continue } - if jobType == string(req.Type()) { + if jobType == string(req.Type()) || jobType == "" { listResult.Jobs = append(listResult.Jobs, madmin.BatchJobResult{ ID: req.ID, Type: req.Type(), @@ -1526,7 +1651,56 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request) } } - logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult)) + batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult)) +} + +// BatchJobStatus - returns the status of a batch job saved in the disk +func (a adminAPIHandlers) BatchJobStatus(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListBatchJobsAction) + if objectAPI == nil { + return + } + + jobID := r.Form.Get("jobId") + if jobID == "" { + writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL) + return + } + + req := BatchJobRequest{ID: jobID} + if i := strings.Index(jobID, "-"); i > 0 { + switch madmin.BatchJobType(jobID[:i]) { + case madmin.BatchJobReplicate: + req.Replicate = &BatchJobReplicateV1{} + case madmin.BatchJobKeyRotate: + req.KeyRotate = &BatchJobKeyRotateV1{} + case madmin.BatchJobExpire: + req.Expire = &BatchJobExpire{} + default: + writeErrorResponseJSON(ctx, w, toAPIError(ctx, errors.New("job ID format unrecognized")), r.URL) + return + } + } + + ri := &batchJobInfo{} + if err := ri.load(ctx, objectAPI, req); err != nil { + if !errors.Is(err, errNoSuchJob) { + batchLogIf(ctx, err) + } + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + buf, err := json.Marshal(madmin.BatchJobStatus{LastMetric: ri.metric()}) + if err != nil { + batchLogIf(ctx, err) + writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + w.Write(buf) } var errNoSuchJob = errors.New("no such job") @@ -1549,16 +1723,18 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques req := &BatchJobRequest{} if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil { if !errors.Is(err, errNoSuchJob) { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) } writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) return } + // Remove sensitive fields. + req.RedactSensitive() buf, err := yaml.Marshal(req) if err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1566,7 +1742,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques w.Write(buf) } -// StarBatchJob queue a new job for execution +// StartBatchJob queue a new job for execution func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) { ctx := r.Context() @@ -1575,7 +1751,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) return } - buf, err := io.ReadAll(ioutil.HardLimitReader(r.Body, humanize.MiByte*4)) + buf, err := io.ReadAll(xioutil.HardLimitReader(r.Body, humanize.MiByte*4)) if err != nil { writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1620,7 +1796,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request) return } - job.ID = fmt.Sprintf("%s:%d", shortuuid.New(), GetProxyEndpointLocalIndex(globalProxyEndpoints)) + job.ID = fmt.Sprintf("%s-%s%s%d", job.Type(), shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints)) job.User = user job.Started = time.Now() @@ -1663,7 +1839,7 @@ func (a adminAPIHandlers) CancelBatchJob(w http.ResponseWriter, r *http.Request) return } - if _, success := proxyRequestByToken(ctx, w, r, jobID); success { + if _, proxied, _ := proxyRequestByToken(ctx, w, r, jobID, true); proxied { return } @@ -1708,26 +1884,82 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP jobCancelers: make(map[string]context.CancelFunc), } jpool.ResizeWorkers(workers) - jpool.resume() + + randomWait := func() time.Duration { + // randomWait depends on the number of nodes to avoid triggering resume and cleanups at the same time. + return time.Duration(rand.Float64() * float64(time.Duration(globalEndpoints.NEndpoints())*time.Hour)) + } + + go func() { + jpool.resume(randomWait) + jpool.cleanupReports(randomWait) + }() + return jpool } -func (j *BatchJobPool) resume() { - results := make(chan ObjectInfo, 100) +func (j *BatchJobPool) cleanupReports(randomWait func() time.Duration) { + t := time.NewTimer(randomWait()) + defer t.Stop() + + for { + select { + case <-GlobalContext.Done(): + return + case <-t.C: + results := make(chan itemOrErr[ObjectInfo], 100) + ctx, cancel := context.WithCancel(j.ctx) + defer cancel() + if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, results, WalkOptions{}); err != nil { + batchLogIf(j.ctx, err) + t.Reset(randomWait()) + continue + } + for result := range results { + if result.Err != nil { + batchLogIf(j.ctx, result.Err) + continue + } + ri := &batchJobInfo{} + if err := ri.loadByPath(ctx, j.objLayer, result.Item.Name); err != nil { + batchLogIf(ctx, err) + continue + } + if (ri.Complete || ri.Failed) && time.Since(ri.LastUpdate) > oldJobsExpiration { + deleteConfig(ctx, j.objLayer, result.Item.Name) + } + } + + t.Reset(randomWait()) + } + } +} + +func (j *BatchJobPool) resume(randomWait func() time.Duration) { + time.Sleep(randomWait()) + + results := make(chan itemOrErr[ObjectInfo], 100) ctx, cancel := context.WithCancel(j.ctx) defer cancel() if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) return } for result := range results { + if result.Err != nil { + batchLogIf(j.ctx, result.Err) + continue + } + if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) { + continue + } // ignore batch-replicate.bin and batch-rotate.bin entries - if strings.HasSuffix(result.Name, slashSeparator) { + if strings.HasSuffix(result.Item.Name, slashSeparator) { continue } req := &BatchJobRequest{} - if err := req.load(ctx, j.objLayer, result.Name); err != nil { - logger.LogIf(ctx, err) + if err := req.load(ctx, j.objLayer, result.Item.Name); err != nil { + batchLogIf(ctx, err) continue } _, nodeIdx := parseRequestToken(req.ID) @@ -1736,7 +1968,7 @@ func (j *BatchJobPool) resume() { continue } if err := j.queueJob(req); err != nil { - logger.LogIf(ctx, err) + batchLogIf(ctx, err) continue } } @@ -1760,7 +1992,7 @@ func (j *BatchJobPool) AddWorker() { if job.Replicate.RemoteToLocal() { if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) j.canceler(job.ID, false) continue } @@ -1769,7 +2001,7 @@ func (j *BatchJobPool) AddWorker() { } else { if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) j.canceler(job.ID, false) continue } @@ -1779,19 +2011,18 @@ func (j *BatchJobPool) AddWorker() { case job.KeyRotate != nil: if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) continue } } case job.Expire != nil: if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil { if !isErrBucketNotFound(err) { - logger.LogIf(j.ctx, err) + batchLogIf(j.ctx, err) continue } } } - job.delete(j.ctx, j.objLayer) j.canceler(job.ID, false) case <-j.workerKillCh: return @@ -1852,7 +2083,9 @@ func (j *BatchJobPool) canceler(jobID string, cancel bool) error { canceler() } } - delete(j.jobCancelers, jobID) + if cancel { + delete(j.jobCancelers, jobID) + } return nil } @@ -1913,12 +2146,14 @@ func (ri *batchJobInfo) metric() madmin.JobMetric { switch ri.JobType { case string(madmin.BatchJobReplicate): m.Replicate = &madmin.ReplicateInfo{ - Bucket: ri.Bucket, - Object: ri.Object, - Objects: ri.Objects, - ObjectsFailed: ri.ObjectsFailed, - BytesTransferred: ri.BytesTransferred, - BytesFailed: ri.BytesFailed, + Bucket: ri.Bucket, + Object: ri.Object, + Objects: ri.Objects, + DeleteMarkers: ri.DeleteMarkers, + ObjectsFailed: ri.ObjectsFailed, + DeleteMarkersFailed: ri.DeleteMarkersFailed, + BytesTransferred: ri.BytesTransferred, + BytesFailed: ri.BytesFailed, } case string(madmin.BatchJobKeyRotate): m.KeyRotate = &madmin.KeyRotationInfo{ @@ -1929,10 +2164,12 @@ func (ri *batchJobInfo) metric() madmin.JobMetric { } case string(madmin.BatchJobExpire): m.Expired = &madmin.ExpirationInfo{ - Bucket: ri.Bucket, - Object: ri.Object, - Objects: ri.Objects, - ObjectsFailed: ri.ObjectsFailed, + Bucket: ri.Bucket, + Object: ri.Object, + Objects: ri.Objects, + DeleteMarkers: ri.DeleteMarkers, + ObjectsFailed: ri.ObjectsFailed, + DeleteMarkersFailed: ri.DeleteMarkersFailed, } } @@ -1971,16 +2208,47 @@ func (m *batchJobMetrics) purgeJobMetrics() { var toDeleteJobMetrics []string m.RLock() for id, metrics := range m.metrics { - if time.Since(metrics.LastUpdate) > 24*time.Hour && (metrics.Complete || metrics.Failed) { + if time.Since(metrics.LastUpdate) > oldJobsExpiration && (metrics.Complete || metrics.Failed) { toDeleteJobMetrics = append(toDeleteJobMetrics, id) } } m.RUnlock() for _, jobID := range toDeleteJobMetrics { m.delete(jobID) + j := BatchJobRequest{ + ID: jobID, + } + j.delete(GlobalContext, newObjectLayerFn()) + } + } + } +} + +// load metrics from disk on startup +func (m *batchJobMetrics) init(ctx context.Context, objectAPI ObjectLayer) error { + resultCh := make(chan itemOrErr[ObjectInfo]) + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + if err := objectAPI.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, resultCh, WalkOptions{}); err != nil { + return err + } + + for result := range resultCh { + if result.Err != nil { + return result.Err + } + ri := &batchJobInfo{} + if err := ri.loadByPath(ctx, objectAPI, result.Item.Name); err != nil { + if !errors.Is(err, errNoSuchJob) { + batchLogIf(ctx, err) } + continue } + m.metrics[ri.JobID] = ri } + return nil } func (m *batchJobMetrics) delete(jobID string) { @@ -2047,16 +2315,42 @@ func (m *batchJobMetrics) trace(d batchJobMetric, job string, attempts int) func } } -func lookupStyle(s string) miniogo.BucketLookupType { - var lookup miniogo.BucketLookupType +func lookupStyle(s string) minio.BucketLookupType { + var lookup minio.BucketLookupType switch s { case "on": - lookup = miniogo.BucketLookupPath + lookup = minio.BucketLookupPath case "off": - lookup = miniogo.BucketLookupDNS + lookup = minio.BucketLookupDNS default: - lookup = miniogo.BucketLookupAuto - + lookup = minio.BucketLookupAuto } return lookup } + +// BatchJobPrefix - to support prefix field yaml unmarshalling with string or slice of strings +type BatchJobPrefix []string + +var _ yaml.Unmarshaler = &BatchJobPrefix{} + +// UnmarshalYAML - to support prefix field yaml unmarshalling with string or slice of strings +func (b *BatchJobPrefix) UnmarshalYAML(value *yaml.Node) error { + // try slice first + tmpSlice := []string{} + if err := value.Decode(&tmpSlice); err == nil { + *b = tmpSlice + return nil + } + // try string + tmpStr := "" + if err := value.Decode(&tmpStr); err == nil { + *b = []string{tmpStr} + return nil + } + return fmt.Errorf("unable to decode %s", value.Value) +} + +// F - return prefix(es) as slice +func (b *BatchJobPrefix) F() []string { + return *b +} diff --git a/cmd/batch-handlers_gen.go b/cmd/batch-handlers_gen.go index 09d3cec1245be..ad29932b4d21a 100644 --- a/cmd/batch-handlers_gen.go +++ b/cmd/batch-handlers_gen.go @@ -1,11 +1,94 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) +// DecodeMsg implements msgp.Decodable +func (z *BatchJobPrefix) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(BatchJobPrefix, zb0002) + } + for zb0001 := range *z { + (*z)[zb0001], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z BatchJobPrefix) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(uint32(len(z))) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0003 := range z { + err = en.WriteString(z[zb0003]) + if err != nil { + err = msgp.WrapError(err, zb0003) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z BatchJobPrefix) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, uint32(len(z))) + for zb0003 := range z { + o = msgp.AppendString(o, z[zb0003]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *BatchJobPrefix) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if cap((*z)) >= int(zb0002) { + (*z) = (*z)[:zb0002] + } else { + (*z) = make(BatchJobPrefix, zb0002) + } + for zb0001 := range *z { + (*z)[zb0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001) + return + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z BatchJobPrefix) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + for zb0003 := range z { + s += msgp.StringPrefixSize + len(z[zb0003]) + } + return +} + // DecodeMsg implements msgp.Decodable func (z *BatchJobRequest) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte @@ -419,6 +502,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "RetryAttempts") return } + case "at": + z.Attempts, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } case "cmp": z.Complete, err = dc.ReadBool() if err != nil { @@ -492,9 +581,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 16 + // map header, size 17 // write "v" - err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76) + err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76) if err != nil { return } @@ -553,6 +642,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "RetryAttempts") return } + // write "at" + err = en.Append(0xa2, 0x61, 0x74) + if err != nil { + return + } + err = en.WriteInt(z.Attempts) + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } // write "cmp" err = en.Append(0xa3, 0x63, 0x6d, 0x70) if err != nil { @@ -659,9 +758,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 16 + // map header, size 17 // string "v" - o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76) + o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76) o = msgp.AppendInt(o, z.Version) // string "jid" o = append(o, 0xa3, 0x6a, 0x69, 0x64) @@ -678,6 +777,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) { // string "ra" o = append(o, 0xa2, 0x72, 0x61) o = msgp.AppendInt(o, z.RetryAttempts) + // string "at" + o = append(o, 0xa2, 0x61, 0x74) + o = msgp.AppendInt(o, z.Attempts) // string "cmp" o = append(o, 0xa3, 0x63, 0x6d, 0x70) o = msgp.AppendBool(o, z.Complete) @@ -765,6 +867,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "RetryAttempts") return } + case "at": + z.Attempts, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attempts") + return + } case "cmp": z.Complete, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -839,6 +947,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *batchJobInfo) Msgsize() (s int) { - s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size + s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size return } diff --git a/cmd/batch-handlers_gen_test.go b/cmd/batch-handlers_gen_test.go index 64a04ca6fc7d4..be48e477491b8 100644 --- a/cmd/batch-handlers_gen_test.go +++ b/cmd/batch-handlers_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" @@ -9,6 +9,119 @@ import ( "github.com/tinylib/msgp/msgp" ) +func TestMarshalUnmarshalBatchJobPrefix(t *testing.T) { + v := BatchJobPrefix{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgBatchJobPrefix(b *testing.B) { + v := BatchJobPrefix{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgBatchJobPrefix(b *testing.B) { + v := BatchJobPrefix{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalBatchJobPrefix(b *testing.B) { + v := BatchJobPrefix{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeBatchJobPrefix(t *testing.T) { + v := BatchJobPrefix{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeBatchJobPrefix Msgsize() is inaccurate") + } + + vn := BatchJobPrefix{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeBatchJobPrefix(b *testing.B) { + v := BatchJobPrefix{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeBatchJobPrefix(b *testing.B) { + v := BatchJobPrefix{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalBatchJobRequest(t *testing.T) { v := BatchJobRequest{} bts, err := v.MarshalMsg(nil) diff --git a/cmd/batch-handlers_test.go b/cmd/batch-handlers_test.go new file mode 100644 index 0000000000000..213e76703e6b7 --- /dev/null +++ b/cmd/batch-handlers_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "slices" + "testing" + + "gopkg.in/yaml.v3" +) + +func TestBatchJobPrefix_UnmarshalYAML(t *testing.T) { + type args struct { + yamlStr string + } + type PrefixTemp struct { + Prefix BatchJobPrefix `yaml:"prefix"` + } + tests := []struct { + name string + b PrefixTemp + args args + want []string + wantErr bool + }{ + { + name: "test1", + b: PrefixTemp{}, + args: args{ + yamlStr: ` +prefix: "foo" +`, + }, + want: []string{"foo"}, + wantErr: false, + }, + { + name: "test2", + b: PrefixTemp{}, + args: args{ + yamlStr: ` +prefix: + - "foo" + - "bar" +`, + }, + want: []string{"foo", "bar"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := yaml.Unmarshal([]byte(tt.args.yamlStr), &tt.b); (err != nil) != tt.wantErr { + t.Errorf("UnmarshalYAML() error = %v, wantErr %v", err, tt.wantErr) + } + if !slices.Equal(tt.b.Prefix.F(), tt.want) { + t.Errorf("UnmarshalYAML() = %v, want %v", tt.b.Prefix.F(), tt.want) + } + }) + } +} diff --git a/cmd/batch-job-common-types.go b/cmd/batch-job-common-types.go index 3c256378b738d..d02a71dd53509 100644 --- a/cmd/batch-job-common-types.go +++ b/cmd/batch-job-common-types.go @@ -23,7 +23,7 @@ import ( "time" "github.com/dustin/go-humanize" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/wildcard" "gopkg.in/yaml.v3" ) @@ -275,7 +275,7 @@ func (sf BatchJobSizeFilter) Validate() error { type BatchJobSize int64 // UnmarshalYAML to parse humanized byte values -func (s *BatchJobSize) UnmarshalYAML(unmarshal func(interface{}) error) error { +func (s *BatchJobSize) UnmarshalYAML(unmarshal func(any) error) error { var batchExpireSz string err := unmarshal(&batchExpireSz) if err != nil { diff --git a/cmd/batch-job-common-types_gen.go b/cmd/batch-job-common-types_gen.go index dc3ee6e70c8c4..12edddf434376 100644 --- a/cmd/batch-job-common-types_gen.go +++ b/cmd/batch-job-common-types_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/batch-job-common-types_gen_test.go b/cmd/batch-job-common-types_gen_test.go index 96d79ef5812ed..3e0684a8c5e96 100644 --- a/cmd/batch-job-common-types_gen_test.go +++ b/cmd/batch-job-common-types_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/batch-replicate.go b/cmd/batch-replicate.go index 2e90b0f364f4d..37a1834d489e5 100644 --- a/cmd/batch-replicate.go +++ b/cmd/batch-replicate.go @@ -21,8 +21,8 @@ import ( "time" miniogo "github.com/minio/minio-go/v7" - "github.com/minio/minio/internal/auth" + "github.com/minio/pkg/v3/xtime" ) //go:generate msgp -file $GOFILE @@ -65,12 +65,12 @@ import ( // BatchReplicateFilter holds all the filters currently supported for batch replication type BatchReplicateFilter struct { - NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"` - OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"` - CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"` - CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"` - Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"` - Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"` + NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"` + OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"` + CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"` + CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"` + Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"` + Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"` } // BatchJobReplicateFlags various configurations for replication job definition currently includes @@ -151,7 +151,7 @@ func (t BatchJobReplicateTarget) ValidPath() bool { type BatchJobReplicateSource struct { Type BatchJobReplicateResourceType `yaml:"type" json:"type"` Bucket string `yaml:"bucket" json:"bucket"` - Prefix string `yaml:"prefix" json:"prefix"` + Prefix BatchJobPrefix `yaml:"prefix" json:"prefix"` Endpoint string `yaml:"endpoint" json:"endpoint"` Path string `yaml:"path" json:"path"` Creds BatchJobReplicateCredentials `yaml:"credentials" json:"credentials"` diff --git a/cmd/batch-replicate_gen.go b/cmd/batch-replicate_gen.go index 26a433ddffbd2..16f3eed44ac09 100644 --- a/cmd/batch-replicate_gen.go +++ b/cmd/batch-replicate_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -411,7 +411,7 @@ func (z *BatchJobReplicateSource) DecodeMsg(dc *msgp.Reader) (err error) { return } case "Prefix": - z.Prefix, err = dc.ReadString() + err = z.Prefix.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -514,7 +514,7 @@ func (z *BatchJobReplicateSource) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteString(z.Prefix) + err = z.Prefix.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -600,7 +600,11 @@ func (z *BatchJobReplicateSource) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendString(o, z.Bucket) // string "Prefix" o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - o = msgp.AppendString(o, z.Prefix) + o, err = z.Prefix.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } // string "Endpoint" o = append(o, 0xa8, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74) o = msgp.AppendString(o, z.Endpoint) @@ -664,7 +668,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error) return } case "Prefix": - z.Prefix, bts, err = msgp.ReadStringBytes(bts) + bts, err = z.Prefix.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Prefix") return @@ -742,7 +746,7 @@ func (z *BatchJobReplicateSource) UnmarshalMsg(bts []byte) (o []byte, err error) // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BatchJobReplicateSource) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize() + s = 1 + 5 + msgp.StringPrefixSize + len(string(z.Type)) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + z.Prefix.Msgsize() + 9 + msgp.StringPrefixSize + len(z.Endpoint) + 5 + msgp.StringPrefixSize + len(z.Path) + 6 + 1 + 10 + msgp.StringPrefixSize + len(z.Creds.AccessKey) + 10 + msgp.StringPrefixSize + len(z.Creds.SecretKey) + 13 + msgp.StringPrefixSize + len(z.Creds.SessionToken) + 9 + z.Snowball.Msgsize() return } @@ -1409,13 +1413,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) { } switch msgp.UnsafeString(field) { case "NewerThan": - z.NewerThan, err = dc.ReadDuration() + err = z.NewerThan.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "NewerThan") return } case "OlderThan": - z.OlderThan, err = dc.ReadDuration() + err = z.OlderThan.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -1489,7 +1493,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteDuration(z.NewerThan) + err = z.NewerThan.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "NewerThan") return @@ -1499,7 +1503,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteDuration(z.OlderThan) + err = z.OlderThan.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -1567,10 +1571,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) { // map header, size 6 // string "NewerThan" o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) - o = msgp.AppendDuration(o, z.NewerThan) + o, err = z.NewerThan.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "NewerThan") + return + } // string "OlderThan" o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e) - o = msgp.AppendDuration(o, z.OlderThan) + o, err = z.OlderThan.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "OlderThan") + return + } // string "CreatedAfter" o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72) o = msgp.AppendTime(o, z.CreatedAfter) @@ -1619,13 +1631,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) { } switch msgp.UnsafeString(field) { case "NewerThan": - z.NewerThan, bts, err = msgp.ReadDurationBytes(bts) + bts, err = z.NewerThan.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "NewerThan") return } case "OlderThan": - z.OlderThan, bts, err = msgp.ReadDurationBytes(bts) + bts, err = z.OlderThan.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "OlderThan") return @@ -1694,7 +1706,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BatchReplicateFilter) Msgsize() (s int) { - s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize + s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize for za0001 := range z.Tags { s += z.Tags[za0001].Msgsize() } diff --git a/cmd/batch-replicate_gen_test.go b/cmd/batch-replicate_gen_test.go index f59a7fe5270ad..68ab18650efae 100644 --- a/cmd/batch-replicate_gen_test.go +++ b/cmd/batch-replicate_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/batch-replicate_test.go b/cmd/batch-replicate_test.go new file mode 100644 index 0000000000000..e84c59cf96589 --- /dev/null +++ b/cmd/batch-replicate_test.go @@ -0,0 +1,182 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "slices" + "testing" + + "gopkg.in/yaml.v3" +) + +func TestParseBatchJobReplicate(t *testing.T) { + replicateYaml := ` +replicate: + apiVersion: v1 + # source of the objects to be replicated + source: + type: minio # valid values are "s3" or "minio" + bucket: mytest + prefix: object-prefix1 # 'PREFIX' is optional + # If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted + # Either the 'source' or 'remote' *must* be the "local" deployment +# endpoint: "http://127.0.0.1:9000" +# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto" +# credentials: +# accessKey: minioadmin # Required +# secretKey: minioadmin # Required +# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used + snowball: # automatically activated if the source is local + disable: true # optionally turn-off snowball archive transfer +# batch: 100 # upto this many objects per archive +# inmemory: true # indicates if the archive must be staged locally or in-memory +# compress: false # S2/Snappy compressed archive +# smallerThan: 5MiB # create archive for all objects smaller than 5MiB +# skipErrs: false # skips any source side read() errors + + # target where the objects must be replicated + target: + type: minio # valid values are "s3" or "minio" + bucket: mytest + prefix: stage # 'PREFIX' is optional + # If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted + + # Either the 'source' or 'remote' *must* be the "local" deployment + endpoint: "http://127.0.0.1:9001" + # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto" + credentials: + accessKey: minioadmin + secretKey: minioadmin + # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used + + # NOTE: All flags are optional + # - filtering criteria only applies for all source objects match the criteria + # - configurable notification endpoints + # - configurable retries for the job (each retry skips successfully previously replaced objects) + flags: + filter: + newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s) + olderThan: "7d" # match objects older than this value (e.g. 7d10h31s) +# createdAfter: "date" # match objects created after "date" +# createdBefore: "date" # match objects created before "date" + + ## NOTE: tags are not supported when "source" is remote. + tags: + - key: "name" + value: "pick*" # match objects with tag 'name', with all values starting with 'pick' + + metadata: + - key: "content-type" + value: "image/*" # match objects with 'content-type', with all values starting with 'image/' + +# notify: +# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events +# token: "Bearer xxxxx" # optional authentication token for the notification endpoint +# +# retry: +# attempts: 10 # number of retries for the job before giving up +# delay: "500ms" # least amount of delay between each retry + +` + var job BatchJobRequest + err := yaml.Unmarshal([]byte(replicateYaml), &job) + if err != nil { + t.Fatal("Failed to parse batch-job-replicate yaml", err) + } + if !slices.Equal(job.Replicate.Source.Prefix.F(), []string{"object-prefix1"}) { + t.Fatal("Failed to parse batch-job-replicate yaml", err) + } + multiPrefixReplicateYaml := ` +replicate: + apiVersion: v1 + # source of the objects to be replicated + source: + type: minio # valid values are "s3" or "minio" + bucket: mytest + prefix: # 'PREFIX' is optional + - object-prefix1 + - object-prefix2 + # If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted + # Either the 'source' or 'remote' *must* be the "local" deployment +# endpoint: "http://127.0.0.1:9000" +# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto" +# credentials: +# accessKey: minioadmin # Required +# secretKey: minioadmin # Required +# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used + snowball: # automatically activated if the source is local + disable: true # optionally turn-off snowball archive transfer +# batch: 100 # upto this many objects per archive +# inmemory: true # indicates if the archive must be staged locally or in-memory +# compress: false # S2/Snappy compressed archive +# smallerThan: 5MiB # create archive for all objects smaller than 5MiB +# skipErrs: false # skips any source side read() errors + + # target where the objects must be replicated + target: + type: minio # valid values are "s3" or "minio" + bucket: mytest + prefix: stage # 'PREFIX' is optional + # If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted + + # Either the 'source' or 'remote' *must* be the "local" deployment + endpoint: "http://127.0.0.1:9001" + # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto" + credentials: + accessKey: minioadmin + secretKey: minioadmin + # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used + + # NOTE: All flags are optional + # - filtering criteria only applies for all source objects match the criteria + # - configurable notification endpoints + # - configurable retries for the job (each retry skips successfully previously replaced objects) + flags: + filter: + newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s) + olderThan: "7d" # match objects older than this value (e.g. 7d10h31s) +# createdAfter: "date" # match objects created after "date" +# createdBefore: "date" # match objects created before "date" + + ## NOTE: tags are not supported when "source" is remote. + tags: + - key: "name" + value: "pick*" # match objects with tag 'name', with all values starting with 'pick' + + metadata: + - key: "content-type" + value: "image/*" # match objects with 'content-type', with all values starting with 'image/' + +# notify: +# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events +# token: "Bearer xxxxx" # optional authentication token for the notification endpoint +# +# retry: +# attempts: 10 # number of retries for the job before giving up +# delay: "500ms" # least amount of delay between each retry + +` + var multiPrefixJob BatchJobRequest + err = yaml.Unmarshal([]byte(multiPrefixReplicateYaml), &multiPrefixJob) + if err != nil { + t.Fatal("Failed to parse batch-job-replicate yaml", err) + } + if !slices.Equal(multiPrefixJob.Replicate.Source.Prefix.F(), []string{"object-prefix1", "object-prefix2"}) { + t.Fatal("Failed to parse batch-job-replicate yaml") + } +} diff --git a/cmd/batch-rotate.go b/cmd/batch-rotate.go index 4918262464127..3e8f18fafc4c2 100644 --- a/cmd/batch-rotate.go +++ b/cmd/batch-rotate.go @@ -21,6 +21,7 @@ import ( "context" "encoding/base64" "fmt" + "maps" "math/rand" "net/http" "runtime" @@ -33,9 +34,8 @@ import ( "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/workers" ) // keyrotate: @@ -96,6 +96,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error { if e.Type == ssekms && spaces { return crypto.ErrInvalidEncryptionKeyID } + if e.Type == ssekms && GlobalKMS != nil { ctx := kms.Context{} if e.Context != "" { @@ -110,11 +111,9 @@ func (e BatchJobKeyRotateEncryption) Validate() error { } } e.kmsContext = kms.Context{} - for k, v := range ctx { - e.kmsContext[k] = v - } + maps.Copy(e.kmsContext, ctx) ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation - if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil { + if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil { return err } } @@ -225,9 +224,7 @@ func (r *BatchJobKeyRotateV1) KeyRotate(ctx context.Context, api ObjectLayer, ob // Since we are rotating the keys, make sure to update the metadata. oi.metadataOnly = true oi.keyRotation = true - for k, v := range encMetadata { - oi.UserDefined[k] = v - } + maps.Copy(oi.UserDefined, encMetadata) if _, err := api.CopyObject(ctx, r.Bucket, oi.Name, r.Bucket, oi.Name, oi, ObjectOptions{ VersionID: oi.VersionID, }, ObjectOptions{ @@ -257,7 +254,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba JobType: string(job.Type()), StartTime: job.Started, } - if err := ri.load(ctx, api, job); err != nil { + if err := ri.loadOrInit(ctx, api, job); err != nil { return err } if ri.Complete { @@ -267,14 +264,18 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba globalBatchJobsMetrics.save(job.ID, ri) lastObject := ri.Object + retryAttempts := job.KeyRotate.Flags.Retry.Attempts + if retryAttempts <= 0 { + retryAttempts = batchKeyRotateJobDefaultRetries + } delay := job.KeyRotate.Flags.Retry.Delay - if delay == 0 { + if delay <= 0 { delay = batchKeyRotateJobDefaultRetryDelay } rnd := rand.New(rand.NewSource(time.Now().UnixNano())) - skip := func(info FileInfo) (ok bool) { + selectObj := func(info FileInfo) (ok bool) { if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan { // skip all objects that are newer than specified older duration return false @@ -354,21 +355,25 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba return err } - retryAttempts := ri.RetryAttempts ctx, cancel := context.WithCancel(ctx) - results := make(chan ObjectInfo, 100) + results := make(chan itemOrErr[ObjectInfo], 100) if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{ Marker: lastObject, - Filter: skip, + Filter: selectObj, }); err != nil { cancel() // Do not need to retry if we can't list objects on source. return err } - - for result := range results { - result := result + failed := false + for res := range results { + if res.Err != nil { + failed = true + batchLogIf(ctx, res.Err) + break + } + result := res.Item sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined) sseS3 := crypto.S3.IsEncrypted(result.UserDefined) if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed @@ -378,21 +383,30 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba go func() { defer wk.Give() for attempts := 1; attempts <= retryAttempts; attempts++ { - attempts := attempts stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts) success := true if err := r.KeyRotate(ctx, api, result); err != nil { stopFn(result, err) - logger.LogIf(ctx, err) + batchLogIf(ctx, err) success = false + if attempts >= retryAttempts { + auditOptions := AuditLogOptions{ + Event: "KeyRotate", + APIName: "StartBatchJob", + Bucket: result.Bucket, + Object: result.Name, + VersionID: result.VersionID, + Error: err.Error(), + } + auditLogInternal(ctx, auditOptions) + } } else { stopFn(result, nil) } - ri.trackCurrentBucketObject(r.Bucket, result, success) - ri.RetryAttempts = attempts + ri.trackCurrentBucketObject(r.Bucket, result, success, attempts) globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk after every 10secs. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job)) if success { break } @@ -408,14 +422,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba } wk.Wait() - ri.Complete = ri.ObjectsFailed == 0 - ri.Failed = ri.ObjectsFailed > 0 + ri.Complete = !failed && ri.ObjectsFailed == 0 + ri.Failed = failed || ri.ObjectsFailed > 0 globalBatchJobsMetrics.save(job.ID, ri) // persist in-memory state to disk. - logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job)) + batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job)) if err := r.Notify(ctx, ri); err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err)) + batchLogIf(ctx, fmt.Errorf("unable to notify %v", err)) } cancel() @@ -476,8 +490,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest, } } - if err := r.Flags.Retry.Validate(); err != nil { - return err - } - return nil + return r.Flags.Retry.Validate() } diff --git a/cmd/batch-rotate_gen.go b/cmd/batch-rotate_gen.go index 7ddeb5edaa995..e714e1c865c87 100644 --- a/cmd/batch-rotate_gen.go +++ b/cmd/batch-rotate_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/batch-rotate_gen_test.go b/cmd/batch-rotate_gen_test.go index 5da8f31745284..906b793856708 100644 --- a/cmd/batch-rotate_gen_test.go +++ b/cmd/batch-rotate_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/benchmark-utils_test.go b/cmd/benchmark-utils_test.go index 08bad89e10681..0f7025a9b734b 100644 --- a/cmd/benchmark-utils_test.go +++ b/cmd/benchmark-utils_test.go @@ -35,7 +35,7 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) + err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{}) if err != nil { b.Fatal(err) } @@ -51,10 +51,10 @@ func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) { // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() // the actual benchmark for PutObject starts here. Reset the benchmark timer. - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { // insert the object. - objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i), + objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i), mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{}) if err != nil { b.Fatal(err) @@ -76,7 +76,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { object := getRandomObjectName() // create bucket. - err = obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) + err = obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{}) if err != nil { b.Fatal(err) } @@ -90,7 +90,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { textData := generateBytesData(objSize) // generate md5sum for the generated data. // md5sum of the data to written is required as input for NewMultipartUpload. - res, err := obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{}) + res, err := obj.NewMultipartUpload(b.Context(), bucket, object, ObjectOptions{}) if err != nil { b.Fatal(err) } @@ -101,11 +101,11 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { // benchmark utility which helps obtain number of allocations and bytes allocated per ops. b.ReportAllocs() // the actual benchmark for PutObjectPart starts here. Reset the benchmark timer. - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { // insert the object. totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize))) - for j := 0; j < totalPartsNR; j++ { + for j := range totalPartsNR { if j < totalPartsNR-1 { textPartData = textData[j*partSize : (j+1)*partSize-1] } else { @@ -113,7 +113,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { } md5hex := getMD5Hash(textPartData) var partInfo PartInfo - partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, res.UploadID, j, + partInfo, err = obj.PutObjectPart(b.Context(), bucket, object, res.UploadID, j, mustGetPutObjReader(b, bytes.NewReader(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{}) if err != nil { b.Fatal(err) @@ -130,7 +130,7 @@ func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) { // creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function. func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { // create a temp Erasure/FS backend. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) if err != nil { @@ -146,7 +146,7 @@ func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) { // creates Erasure/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function. func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { // create a temp Erasure/FS backend. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) if err != nil { @@ -162,7 +162,7 @@ func benchmarkPutObject(b *testing.B, instanceType string, objSize int) { // creates Erasure/FS backend setup, obtains the object layer and runs parallel benchmark for put object. func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) { // create a temp Erasure/FS backend. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() objLayer, disks, err := prepareTestBackend(ctx, instanceType) if err != nil { @@ -196,7 +196,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { // obtains random bucket name. bucket := getRandomBucketName() // create bucket. - err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) + err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{}) if err != nil { b.Fatal(err) } @@ -218,7 +218,7 @@ func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) { i := 0 for pb.Next() { // insert the object. - objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i), + objInfo, err := obj.PutObject(b.Context(), bucket, "object"+strconv.Itoa(i), mustGetPutObjReader(b, bytes.NewReader(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{}) if err != nil { b.Fatal(err) diff --git a/cmd/bitrot-streaming.go b/cmd/bitrot-streaming.go index de8b0831da98f..7c1a313b77349 100644 --- a/cmd/bitrot-streaming.go +++ b/cmd/bitrot-streaming.go @@ -20,27 +20,40 @@ package cmd import ( "bytes" "context" + "errors" "hash" "io" "sync" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/ioutil" + "github.com/minio/minio/internal/ringbuffer" ) // Calculates bitrot in chunks and writes the hash into the stream. type streamingBitrotWriter struct { iow io.WriteCloser - closeWithErr func(err error) error + closeWithErr func(err error) h hash.Hash shardSize int64 canClose *sync.WaitGroup + byteBuf []byte + finished bool } func (b *streamingBitrotWriter) Write(p []byte) (int, error) { if len(p) == 0 { return 0, nil } + if b.finished { + return 0, errors.New("bitrot write not allowed") + } + if int64(len(p)) > b.shardSize { + return 0, errors.New("unexpected bitrot buffer size") + } + if int64(len(p)) < b.shardSize { + b.finished = true + } b.h.Reset() b.h.Write(p) hashBytes := b.h.Sum(nil) @@ -62,7 +75,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) { } func (b *streamingBitrotWriter) Close() error { + // Close the underlying writer. + // This will also flush the ring buffer if used. err := b.iow.Close() + // Wait for all data to be written before returning else it causes race conditions. // Race condition is because of io.PipeWriter implementation. i.e consider the following // sequent of operations: @@ -73,29 +89,34 @@ func (b *streamingBitrotWriter) Close() error { if b.canClose != nil { b.canClose.Wait() } + + // Recycle the buffer. + if b.byteBuf != nil { + globalBytePoolCap.Load().Put(b.byteBuf) + b.byteBuf = nil + } return err } // newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation. // The output is written to the supplied writer w. func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer { - return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error { - // Similar to CloseWithError on pipes we always return nil. - return nil - }} + return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}} } // Returns streaming bitrot writer implementation. func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer { - r, w := io.Pipe() h := algo.New() + buf := globalBytePoolCap.Load().Get() + rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true) bw := &streamingBitrotWriter{ - iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()), - closeWithErr: w.CloseWithError, + iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()), + closeWithErr: rb.CloseWithError, h: h, shardSize: shardSize, canClose: &sync.WaitGroup{}, + byteBuf: buf, } bw.canClose.Add(1) go func() { @@ -106,7 +127,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums. totalFileSize = bitrotSumsTotalSize + length } - r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r)) + rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb)) }() return bw } @@ -131,13 +152,7 @@ func (b *streamingBitrotReader) Close() error { } if closer, ok := b.rc.(io.Closer); ok { // drain the body for connection reuse at network layer. - xhttp.DrainBody(struct { - io.Reader - io.Closer - }{ - Reader: b.rc, - Closer: closeWrapper(func() error { return nil }), - }) + xhttp.DrainBody(io.NopCloser(b.rc)) return closer.Close() } return nil diff --git a/cmd/bitrot.go b/cmd/bitrot.go index f858f1b486857..f1b2a0e4e890e 100644 --- a/cmd/bitrot.go +++ b/cmd/bitrot.go @@ -99,7 +99,7 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) { return alg } } - return + return a } func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer { @@ -128,14 +128,20 @@ func closeBitrotReaders(rs []io.ReaderAt) { } // Close all the writers. -func closeBitrotWriters(ws []io.Writer) { - for _, w := range ws { - if w != nil { - if bw, ok := w.(io.Closer); ok { - bw.Close() - } +func closeBitrotWriters(ws []io.Writer) []error { + errs := make([]error, len(ws)) + for i, w := range ws { + if w == nil { + errs[i] = errDiskNotFound + continue + } + if bw, ok := w.(io.Closer); ok { + errs[i] = bw.Close() + } else { + errs[i] = nil } } + return errs } // Returns hash sum for whole-bitrot, nil for streaming-bitrot. @@ -178,7 +184,7 @@ func bitrotVerify(r io.Reader, wantSize, partSize int64, algo BitrotAlgorithm, w return errFileCorrupt } - bufp := xioutil.ODirectPoolSmall.Get().(*[]byte) + bufp := xioutil.ODirectPoolSmall.Get() defer xioutil.ODirectPoolSmall.Put(bufp) for left > 0 { diff --git a/cmd/bitrot_test.go b/cmd/bitrot_test.go index 9f2ebde3ad2ac..636d18736e77c 100644 --- a/cmd/bitrot_test.go +++ b/cmd/bitrot_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "io" "testing" ) @@ -34,7 +33,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) { t.Fatal(err) } - disk.MakeVol(context.Background(), volume) + disk.MakeVol(t.Context(), volume) writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10) diff --git a/cmd/bootstrap-messages.go b/cmd/bootstrap-messages.go index 39d82e5f0b70b..c01b4529f52bb 100644 --- a/cmd/bootstrap-messages.go +++ b/cmd/bootstrap-messages.go @@ -48,9 +48,7 @@ func (bs *bootstrapTracer) Events() []madmin.TraceInfo { traceInfo := make([]madmin.TraceInfo, 0, bootstrapTraceLimit) bs.mu.RLock() - for _, i := range bs.info { - traceInfo = append(traceInfo, i) - } + traceInfo = append(traceInfo, bs.info...) bs.mu.RUnlock() return traceInfo diff --git a/cmd/bootstrap-peer-server.go b/cmd/bootstrap-peer-server.go index 52dc772981b9b..4fb179bb26b54 100644 --- a/cmd/bootstrap-peer-server.go +++ b/cmd/bootstrap-peer-server.go @@ -19,9 +19,13 @@ package cmd import ( "context" + "crypto/md5" + "encoding/hex" "errors" "fmt" + "io" "math/rand" + "os" "reflect" "strings" "sync" @@ -30,7 +34,7 @@ import ( "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // To abstract a node over network. @@ -43,10 +47,15 @@ type ServerSystemConfig struct { NEndpoints int CmdLines []string MinioEnv map[string]string + Checksum string } // Diff - returns error on first difference found in two configs. func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error { + if s1.Checksum != s2.Checksum { + return fmt.Errorf("Expected MinIO binary checksum: %s, seen: %s", s1.Checksum, s2.Checksum) + } + ns1 := s1.NEndpoints ns2 := s2.NEndpoints if ns1 != ns2 { @@ -82,7 +91,7 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error { extra = append(extra, k) } } - msg := "Expected same MINIO_ environment variables and values across all servers: " + msg := "Expected MINIO_* environment name and values across all servers to be same: " if len(missing) > 0 { msg += fmt.Sprintf(`Missing environment values: %v. `, missing) } @@ -97,14 +106,17 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error { } var skipEnvs = map[string]struct{}{ - "MINIO_OPTS": {}, - "MINIO_CERT_PASSWD": {}, - "MINIO_SERVER_DEBUG": {}, - "MINIO_DSYNC_TRACE": {}, - "MINIO_ROOT_USER": {}, - "MINIO_ROOT_PASSWORD": {}, - "MINIO_ACCESS_KEY": {}, - "MINIO_SECRET_KEY": {}, + "MINIO_OPTS": {}, + "MINIO_CERT_PASSWD": {}, + "MINIO_SERVER_DEBUG": {}, + "MINIO_DSYNC_TRACE": {}, + "MINIO_ROOT_USER": {}, + "MINIO_ROOT_PASSWORD": {}, + "MINIO_ACCESS_KEY": {}, + "MINIO_SECRET_KEY": {}, + "MINIO_OPERATOR_VERSION": {}, + "MINIO_VSPHERE_PLUGIN_VERSION": {}, + "MINIO_CI_CD": {}, } func getServerSystemCfg() *ServerSystemConfig { @@ -120,7 +132,7 @@ func getServerSystemCfg() *ServerSystemConfig { } envValues[envK] = logger.HashString(env.Get(envK, "")) } - scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues} + scfg := &ServerSystemConfig{NEndpoints: globalEndpoints.NEndpoints(), MinioEnv: envValues, Checksum: binaryChecksum} var cmdLines []string for _, ep := range globalEndpoints { cmdLines = append(cmdLines, ep.CmdLine) @@ -167,6 +179,26 @@ func (client *bootstrapRESTClient) String() string { return client.gridConn.String() } +var binaryChecksum = getBinaryChecksum() + +func getBinaryChecksum() string { + mw := md5.New() + binPath, err := os.Executable() + if err != nil { + logger.Error("Calculating checksum failed: %s", err) + return "00000000000000000000000000000000" + } + b, err := os.Open(binPath) + if err != nil { + logger.Error("Calculating checksum failed: %s", err) + return "00000000000000000000000000000000" + } + + defer b.Close() + io.Copy(mw, b) + return hex.EncodeToString(mw.Sum(nil)) +} + func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointServerPools, gm *grid.Manager) error { srcCfg := getServerSystemCfg() clnts := newBootstrapRESTClients(endpointServerPools, gm) @@ -196,9 +228,9 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS err := clnt.Verify(ctx, srcCfg) mu.Lock() if err != nil { - bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt)) + bootstrapTraceMsg(fmt.Sprintf("bootstrapVerify: %v, endpoint: %s", err, clnt)) if !isNetworkError(err) { - logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String()) + bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String()) incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err)) } else { offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err)) diff --git a/cmd/bootstrap-peer-server_gen.go b/cmd/bootstrap-peer-server_gen.go index 77f4cde1d7720..79b35bbbe44b2 100644 --- a/cmd/bootstrap-peer-server_gen.go +++ b/cmd/bootstrap-peer-server_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -59,19 +59,17 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) { if z.MinioEnv == nil { z.MinioEnv = make(map[string]string, zb0003) } else if len(z.MinioEnv) > 0 { - for key := range z.MinioEnv { - delete(z.MinioEnv, key) - } + clear(z.MinioEnv) } for zb0003 > 0 { zb0003-- var za0002 string - var za0003 string za0002, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MinioEnv") return } + var za0003 string za0003, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MinioEnv", za0002) @@ -79,6 +77,12 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) { } z.MinioEnv[za0002] = za0003 } + case "Checksum": + z.Checksum, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Checksum") + return + } default: err = dc.Skip() if err != nil { @@ -92,9 +96,9 @@ func (z *ServerSystemConfig) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 + // map header, size 4 // write "NEndpoints" - err = en.Append(0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73) + err = en.Append(0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73) if err != nil { return } @@ -142,15 +146,25 @@ func (z *ServerSystemConfig) EncodeMsg(en *msgp.Writer) (err error) { return } } + // write "Checksum" + err = en.Append(0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d) + if err != nil { + return + } + err = en.WriteString(z.Checksum) + if err != nil { + err = msgp.WrapError(err, "Checksum") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 3 + // map header, size 4 // string "NEndpoints" - o = append(o, 0x83, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73) + o = append(o, 0x84, 0xaa, 0x4e, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73) o = msgp.AppendInt(o, z.NEndpoints) // string "CmdLines" o = append(o, 0xa8, 0x43, 0x6d, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x73) @@ -165,6 +179,9 @@ func (z *ServerSystemConfig) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendString(o, za0002) o = msgp.AppendString(o, za0003) } + // string "Checksum" + o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d) + o = msgp.AppendString(o, z.Checksum) return } @@ -221,14 +238,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.MinioEnv == nil { z.MinioEnv = make(map[string]string, zb0003) } else if len(z.MinioEnv) > 0 { - for key := range z.MinioEnv { - delete(z.MinioEnv, key) - } + clear(z.MinioEnv) } for zb0003 > 0 { - var za0002 string var za0003 string zb0003-- + var za0002 string za0002, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "MinioEnv") @@ -241,6 +256,12 @@ func (z *ServerSystemConfig) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.MinioEnv[za0002] = za0003 } + case "Checksum": + z.Checksum, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Checksum") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -266,5 +287,6 @@ func (z *ServerSystemConfig) Msgsize() (s int) { s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) } } + s += 9 + msgp.StringPrefixSize + len(z.Checksum) return } diff --git a/cmd/bootstrap-peer-server_gen_test.go b/cmd/bootstrap-peer-server_gen_test.go index 1446451de5320..efb0ee0cd319b 100644 --- a/cmd/bootstrap-peer-server_gen_test.go +++ b/cmd/bootstrap-peer-server_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/bucket-encryption-handlers.go b/cmd/bucket-encryption-handlers.go index 7b5c0cad845c4..1fe7631de6a0d 100644 --- a/cmd/bucket-encryption-handlers.go +++ b/cmd/bucket-encryption-handlers.go @@ -30,7 +30,7 @@ import ( "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r kmsKey := encConfig.KeyID() if kmsKey != "" { kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation - _, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext) + _, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext}) if err != nil { if errors.Is(err, kes.ErrKeyNotFound) { writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL) @@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeSSEConfig, Bucket: bucket, SSEConfig: &cfgStr, @@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeSSEConfig, Bucket: bucket, SSEConfig: nil, diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index ab46893d85b75..564572f2a94a6 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -28,6 +28,7 @@ import ( "errors" "fmt" "io" + "mime" "mime/multipart" "net/http" "net/textproto" @@ -51,9 +52,9 @@ import ( sse "github.com/minio/minio/internal/bucket/encryption" objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/replication" - "github.com/minio/minio/internal/config/cache" "github.com/minio/minio/internal/config/dns" "github.com/minio/minio/internal/crypto" + "github.com/minio/minio/internal/etag" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/hash" @@ -61,8 +62,8 @@ import ( "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/sync/errgroup" ) const ( @@ -72,6 +73,8 @@ const ( xMinIOErrCodeHeader = "x-minio-error-code" xMinIOErrDescHeader = "x-minio-error-desc" + + postPolicyBucketTagging = "tagging" ) // Check if there are buckets on server without corresponding entry in etcd backend and @@ -90,7 +93,7 @@ const ( // -- If IP of the entry doesn't match, this means entry is // // for another instance. Log an error to console. -func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { +func initFederatorBackend(buckets []string, objLayer ObjectLayer) { if len(buckets) == 0 { return } @@ -98,7 +101,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // Get buckets in the DNS dnsBuckets, err := globalDNSConfig.List() if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) { - logger.LogIf(GlobalContext, err) + dnsLogIf(GlobalContext, err) return } @@ -111,10 +114,10 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { domainMissing := err == dns.ErrDomainMissing if dnsBuckets != nil { for _, bucket := range buckets { - bucketsSet.Add(bucket.Name) - r, ok := dnsBuckets[bucket.Name] + bucketsSet.Add(bucket) + r, ok := dnsBuckets[bucket] if !ok { - bucketsToBeUpdated.Add(bucket.Name) + bucketsToBeUpdated.Add(bucket) continue } if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(r)...)).IsEmpty() { @@ -133,7 +136,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // but if we do see a difference with local domain IPs with // hostSlice from etcd then we should update with newer // domainIPs, we proceed to do that here. - bucketsToBeUpdated.Add(bucket.Name) + bucketsToBeUpdated.Add(bucket) continue } @@ -142,7 +145,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // bucket names are globally unique in federation at a given // path prefix, name collision is not allowed. We simply log // an error and continue. - bucketsInConflict.Add(bucket.Name) + bucketsInConflict.Add(bucket) } } @@ -151,7 +154,6 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { g := errgroup.WithNErrs(len(bucketsToBeUpdatedSlice)).WithConcurrency(50) for index := range bucketsToBeUpdatedSlice { - index := index g.Go(func() error { return globalDNSConfig.Put(bucketsToBeUpdatedSlice[index]) }, index) @@ -160,13 +162,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { ctx := GlobalContext for _, err := range g.Wait() { if err != nil { - logger.LogIf(ctx, err) + dnsLogIf(ctx, err) return } } for _, bucket := range bucketsInConflict.ToSlice() { - logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice())) + dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice())) } var wg sync.WaitGroup @@ -187,7 +189,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) { // We go to here, so we know the bucket no longer exists, // but is registered in DNS to this server if err := globalDNSConfig.Delete(bucket); err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w", + dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w", bucket, err)) } }(bucket) @@ -227,7 +229,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r * // Generate response. encodedSuccessResponse := encodeResponse(LocationResponse{}) // Get current region. - region := globalSite.Region + region := globalSite.Region() if region != globalMinioDefaultRegion { encodedSuccessResponse = encodeResponse(LocationResponse{ Location: region, @@ -341,11 +343,9 @@ func (api objectAPIHandlers) ListBucketsHandler(w http.ResponseWriter, r *http.R Created: dnsRecords[0].CreationDate, }) } - sort.Slice(bucketsInfo, func(i, j int) bool { return bucketsInfo[i].Name < bucketsInfo[j].Name }) - } else { // Invoke the list buckets. var err error @@ -426,7 +426,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Content-Md5 is required should be set // http://docs.aws.amazon.com/AmazonS3/latest/API/multiobjectdeleteapi.html - if _, ok := r.Header[xhttp.ContentMD5]; !ok { + if !validateLengthAndChecksum(r) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL) return } @@ -558,7 +558,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, }, goi, opts, gerr) if dsc.ReplicateAny() { if object.VersionID != "" { - object.VersionPurgeStatus = Pending + object.VersionPurgeStatus = replication.VersionPurgePending object.VersionPurgeStatuses = dsc.PendingStatus() } else { object.DeleteMarkerReplicationStatus = dsc.PendingStatus() @@ -592,7 +592,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, output[idx] = obj idx++ } - return + return output } // Disable timeouts and cancellation @@ -668,9 +668,7 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, continue } - defer globalCacheConfig.Delete(bucket, dobj.ObjectName) - - if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == Pending) { + if replicateDeletes && (dobj.DeleteMarkerReplicationStatus() == replication.Pending || dobj.VersionPurgeStatus() == replication.VersionPurgePending) { // copy so we can re-add null ID. dobj := dobj if isDirObject(dobj.ObjectName) && dobj.VersionID == "" { @@ -790,7 +788,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req // check if client is attempting to create more buckets, complain about it. if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets { - logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets)) + internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind) } opts := MakeBucketOptions{ @@ -840,7 +838,6 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req } writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return - } apiErr := ErrBucketAlreadyExists if !globalDomainIPs.Intersection(set.CreateStringSet(getHostsSlice(sr)...)).IsEmpty() { @@ -871,7 +868,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket) // Call site replication hook - logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)) + replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts)) // Make sure to add Location information here only for bucket w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket)) @@ -888,6 +885,30 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req }) } +// multipartReader is just like https://pkg.go.dev/net/http#Request.MultipartReader but +// rejects multipart/mixed as its not supported in S3 API. +func multipartReader(r *http.Request) (*multipart.Reader, error) { + v := r.Header.Get("Content-Type") + if v == "" { + return nil, http.ErrNotMultipart + } + if r.Body == nil { + return nil, errors.New("missing form body") + } + d, params, err := mime.ParseMediaType(v) + if err != nil { + return nil, http.ErrNotMultipart + } + if d != "multipart/form-data" { + return nil, http.ErrNotMultipart + } + boundary, ok := params["boundary"] + if !ok { + return nil, http.ErrMissingBoundary + } + return multipart.NewReader(r.Body, boundary), nil +} + // PostPolicyBucketHandler - POST policy // ---------- // This implementation of the POST operation handles object creation with a specified @@ -921,9 +942,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } + if r.ContentLength <= 0 { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrEmptyRequestBody), r.URL) + return + } + // Here the parameter is the size of the form data that should // be loaded in memory, the remaining being put in temporary files. - mp, err := r.MultipartReader() + mp, err := multipartReader(r) if err != nil { apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err) @@ -935,7 +961,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h var ( reader io.Reader - fileSize int64 = -1 + actualSize int64 = -1 fileName string fanOutEntries = make([]minio.PutObjectFanOutEntry, 0, 100) ) @@ -943,6 +969,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h maxParts := 1000 // Canonicalize the form values into http.Header. formValues := make(http.Header) + var headerLen int64 for { part, err := mp.NextRawPart() if errors.Is(err, io.EOF) { @@ -984,7 +1011,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - var b bytes.Buffer + headerLen += int64(len(name)) + int64(len(fileName)) if name != "file" { if http.CanonicalHeaderKey(name) == http.CanonicalHeaderKey("x-minio-fanout-list") { dec := json.NewDecoder(part) @@ -995,7 +1022,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h if err := dec.Decode(&m); err != nil { part.Close() apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) - apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, multipart.ErrMessageTooLarge) + apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err) writeErrorResponse(ctx, w, apiErr, r.URL) return } @@ -1005,8 +1032,12 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h continue } + buf := bytebufferpool.Get() // value, store as string in memory - n, err := io.CopyN(&b, part, maxMemoryBytes+1) + n, err := io.CopyN(buf, part, maxMemoryBytes+1) + value := buf.String() + buf.Reset() + bytebufferpool.Put(buf) part.Close() if err != nil && err != io.EOF { @@ -1028,7 +1059,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h writeErrorResponse(ctx, w, apiErr, r.URL) return } - formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], b.String()) + headerLen += n + formValues[http.CanonicalHeaderKey(name)] = append(formValues[http.CanonicalHeaderKey(name)], value) continue } @@ -1037,10 +1069,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h // The file or text content must be the last field in the form. // You cannot upload more than one file at a time. reader = part + + possibleShardSize := (r.ContentLength - headerLen) + if globalStorageClass.ShouldInline(possibleShardSize, false) { // keep versioned false for this check + var b bytes.Buffer + n, err := io.Copy(&b, reader) + if err != nil { + apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) + apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, err) + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + reader = &b + actualSize = n + } + // we have found the File part of the request we are done processing multipart-form break } + // check if have a file + if reader == nil { + apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) + apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The file or text content is missing")) + writeErrorResponse(ctx, w, apiErr, r.URL) + return + } + if keyName, ok := formValues["Key"]; !ok { apiErr := errorCodes.ToAPIErr(ErrMalformedPOSTRequest) apiErr.Description = fmt.Sprintf("%s (%v)", apiErr.Description, errors.New("The name of the uploaded key is missing")) @@ -1138,11 +1193,33 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize) + clientETag, err := etag.FromContentMD5(formValues) + if err != nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDigest), r.URL) + return + } + + var forceMD5 []byte + // Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag. Optionally enable this also + // for server that is started with `--no-compat`. + kind, _ := crypto.IsRequested(formValues) + if !etag.ContentMD5Requested(formValues) && (kind == crypto.SSEC || kind == crypto.S3KMS || !globalServerCtxt.StrictS3Compat) { + forceMD5 = mustGetUUIDBytes() + } + + hashReader, err := hash.NewReaderWithOpts(ctx, reader, hash.Options{ + Size: actualSize, + MD5Hex: clientETag.String(), + SHA256Hex: "", + ActualSize: actualSize, + DisableMD5: false, + ForceMD5: forceMD5, + }) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } + if checksum != nil && checksum.Valid() { if err = hashReader.AddChecksumNoTrailer(formValues, false); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1199,9 +1276,9 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return } + opts.WantChecksum = checksum fanOutOpts := fanOutOptions{Checksum: checksum} - if crypto.Requested(formValues) { if crypto.SSECopy.IsRequested(r.Header) { writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParameters), r.URL) @@ -1218,11 +1295,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) { - writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL) - return - } - var ( reader io.Reader keyID string @@ -1251,8 +1323,15 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } + + wantSize := int64(-1) + if actualSize >= 0 { + info := ObjectInfo{Size: actualSize} + wantSize = info.EncryptedSize() + } + // do not try to verify encrypted content/ - hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1) + hashReader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1263,6 +1342,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } } + opts.EncryptFn = metadataEncrypter(objectEncryptionKey) pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1306,10 +1386,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h // Set the correct hex md5sum for the fan-out stream. fanOutOpts.MD5Hex = hex.EncodeToString(md5w.Sum(nil)) - concurrentSize := 100 - if runtime.GOMAXPROCS(0) < concurrentSize { - concurrentSize = runtime.GOMAXPROCS(0) - } + concurrentSize := min(runtime.GOMAXPROCS(0), 100) fanOutResp := make([]minio.PutObjectFanOutResponse, 0, len(fanOutEntries)) eventArgsList := make([]eventArgs, 0, len(fanOutEntries)) @@ -1332,7 +1409,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h Key: objInfo.Name, Error: errs[i].Error(), }) - eventArgsList = append(eventArgsList, eventArgs{ EventName: event.ObjectCreatedPost, BucketName: objInfo.Bucket, @@ -1345,22 +1421,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h continue } - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: getDecryptedETag(formValues, objInfo, false), - ModTime: objInfo.ModTime, - Expires: objInfo.Expires.UTC().Format(http.TimeFormat), - CacheControl: objInfo.CacheControl, - Metadata: cleanReservedKeys(objInfo.UserDefined), - Size: asize, - }) - fanOutResp = append(fanOutResp, minio.PutObjectFanOutResponse{ Key: objInfo.Name, ETag: getDecryptedETag(formValues, objInfo, false), @@ -1420,6 +1480,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } + if formValues.Get(postPolicyBucketTagging) != "" { + tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging))) + if err != nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL) + return + } + tagsStr := tags.String() + opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr + } else { + // avoid user set an invalid tag using `X-Amz-Tagging` + delete(opts.UserDefined, xhttp.AmzObjectTagging) + } + objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1442,22 +1515,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h w.Header().Set(xhttp.Location, obj) } - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - defer globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: etag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Metadata: cleanReservedKeys(objInfo.UserDefined), - Size: asize, - }) - // Notify object created event. defer sendEvent(eventArgs{ EventName: event.ObjectCreatedPost, @@ -1600,9 +1657,11 @@ func (api objectAPIHandlers) HeadBucketHandler(w http.ResponseWriter, r *http.Re return } - if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) - return + if s3Error := checkRequestAuthType(ctx, r, policy.HeadBucketAction, bucket, ""); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone { + writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) + return + } } getBucketInfo := objectAPI.GetBucketInfo @@ -1666,7 +1725,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) return } - case rcfg.HasActiveRules("", true): + case rcfg != nil && rcfg.HasActiveRules("", true): writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL) return } @@ -1698,17 +1757,17 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. if globalDNSConfig != nil { if err := globalDNSConfig.Delete(bucket); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err)) + dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err)) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } } globalNotificationSys.DeleteBucketMetadata(ctx, bucket) - globalReplicationPool.deleteResyncMetadata(ctx, bucket) + globalReplicationPool.Get().deleteResyncMetadata(ctx, bucket) // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete)) + replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete)) // Write success response. writeSuccessNoContent(w) @@ -1754,6 +1813,10 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri return } + // Audit log tags. + reqInfo := logger.GetReqInfo(ctx) + reqInfo.SetTags("retention", config.String()) + configData, err := xml.Marshal(config) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1781,7 +1844,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeObjectLockConfig, Bucket: bucket, ObjectLockConfig: &cfgStr, @@ -1885,7 +1948,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeTags, Bucket: bucket, Tags: &cfgStr, @@ -1961,7 +2024,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r return } - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeTags, Bucket: bucket, UpdatedAt: updatedAt, diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index 7a0bbaf34f010..0adb9b8f15389 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -32,7 +32,7 @@ import ( // Wrapper for calling RemoveBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestRemoveBucketHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testRemoveBucketHandler, endpoints: []string{"RemoveBucket"}}) } func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -78,7 +78,7 @@ func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, a // Wrapper for calling GetBucketPolicy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketLocationHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketLocationHandler, endpoints: []string{"GetBucketLocation"}}) } func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -188,7 +188,6 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri if errorResponse.Code != testCase.errorResponse.Code { t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code) } - } // Test for Anonymous/unsigned http request. @@ -220,7 +219,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri // Wrapper for calling HeadBucket HTTP handler tests for both Erasure multiple disks and single node setup. func TestHeadBucketHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testHeadBucketHandler, endpoints: []string{"HeadBucket"}}) } func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -290,7 +289,6 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api if recV2.Code != testCase.expectedRespStatus { t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) } - } // Test for Anonymous/unsigned http request. @@ -322,7 +320,7 @@ func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, api // Wrapper for calling TestListMultipartUploadsHandler tests for both Erasure multiple disks and single node setup. func TestListMultipartUploadsHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListMultipartUploadsHandler, endpoints: []string{"ListMultipartUploads"}}) } // testListMultipartUploadsHandler - Tests validate listing of multipart uploads. @@ -558,7 +556,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s // Wrapper for calling TestListBucketsHandler tests for both Erasure multiple disks and single node setup. func TestListBucketsHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testListBucketsHandler, endpoints: []string{"ListBuckets"}}) } // testListBucketsHandler - Tests validate listing of buckets. @@ -649,7 +647,7 @@ func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, ap // Wrapper for calling DeleteMultipleObjects HTTP handler tests for both Erasure multiple disks and single node setup. func TestAPIDeleteMultipleObjectsHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects", "PutBucketPolicy"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteMultipleObjectsHandler, endpoints: []string{"DeleteMultipleObjects", "PutBucketPolicy"}}) } func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -659,7 +657,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa sha256sum := "" var objectNames []string - for i := 0; i < 10; i++ { + for i := range 10 { contentBytes := []byte("hello") objectName := "test-object-" + strconv.Itoa(i) if i == 0 { @@ -689,7 +687,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa // The following block will create a bucket policy with delete object to 'public/*'. This is // to test a mixed response of a successful & failure while deleting objects in a single request - policyBytes := []byte(fmt.Sprintf(`{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName)) + policyBytes := fmt.Appendf(nil, `{"Id": "Policy1637752602639", "Version": "2012-10-17", "Statement": [{"Sid": "Stmt1637752600730", "Action": "s3:DeleteObject", "Effect": "Allow", "Resource": "arn:aws:s3:::%s/public/*", "Principal": "*"}]}`, bucketName) rec := httptest.NewRecorder() req, err := newTestSignedRequestV4(http.MethodPut, getPutPolicyURL("", bucketName), int64(len(policyBytes)), bytes.NewReader(policyBytes), credentials.AccessKey, credentials.SecretKey, nil) diff --git a/cmd/bucket-lifecycle-audit.go b/cmd/bucket-lifecycle-audit.go index e4c5134d239ec..1fa76e090aeb2 100644 --- a/cmd/bucket-lifecycle-audit.go +++ b/cmd/bucket-lifecycle-audit.go @@ -17,12 +17,16 @@ package cmd -import "github.com/minio/minio/internal/bucket/lifecycle" +import ( + "strconv" + + "github.com/minio/minio/internal/bucket/lifecycle" +) //go:generate stringer -type lcEventSrc -trimprefix lcEventSrc_ $GOFILE type lcEventSrc uint8 -//revive:disable:var-naming Underscores is used here to indicate where common prefix ends and the enumeration name begins +//nolint:staticcheck,revive // Underscores are used here to indicate where common prefix ends and the enumeration name begins const ( lcEventSrc_None lcEventSrc = iota lcEventSrc_Heal @@ -43,7 +47,7 @@ type lcAuditEvent struct { source lcEventSrc } -func (lae lcAuditEvent) Tags() map[string]interface{} { +func (lae lcAuditEvent) Tags() map[string]string { event := lae.Event src := lae.source const ( @@ -55,7 +59,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} { ilmNewerNoncurrentVersions = "ilm-newer-noncurrent-versions" ilmNoncurrentDays = "ilm-noncurrent-days" ) - tags := make(map[string]interface{}, 5) + tags := make(map[string]string, 5) if src > lcEventSrc_None { tags[ilmSrc] = src.String() } @@ -63,7 +67,7 @@ func (lae lcAuditEvent) Tags() map[string]interface{} { tags[ilmRuleID] = event.RuleID if !event.Due.IsZero() { - tags[ilmDue] = event.Due + tags[ilmDue] = event.Due.Format(iso8601Format) } // rule with Transition/NoncurrentVersionTransition in effect @@ -73,10 +77,10 @@ func (lae lcAuditEvent) Tags() map[string]interface{} { // rule with NewernoncurrentVersions in effect if event.NewerNoncurrentVersions > 0 { - tags[ilmNewerNoncurrentVersions] = event.NewerNoncurrentVersions + tags[ilmNewerNoncurrentVersions] = strconv.Itoa(event.NewerNoncurrentVersions) } if event.NoncurrentDays > 0 { - tags[ilmNoncurrentDays] = event.NoncurrentDays + tags[ilmNoncurrentDays] = strconv.Itoa(event.NoncurrentDays) } return tags } diff --git a/cmd/bucket-lifecycle-handlers.go b/cmd/bucket-lifecycle-handlers.go index d983f9b6ed890..e917c9adf53dc 100644 --- a/cmd/bucket-lifecycle-handlers.go +++ b/cmd/bucket-lifecycle-handlers.go @@ -19,7 +19,6 @@ package cmd import ( "encoding/xml" - "io" "net/http" "strconv" "time" @@ -28,7 +27,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -53,7 +52,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r bucket := vars["bucket"] // PutBucketLifecycle always needs a Content-Md5 - if _, ok := r.Header[xhttp.ContentMD5]; !ok { + if !validateLengthAndChecksum(r) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL) return } @@ -64,19 +63,20 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r } // Check if bucket exists. - if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil { + rcfg, err := globalBucketObjectLockSys.Get(bucket) + if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(io.LimitReader(r.Body, r.ContentLength)) + bucketLifecycle, err := lifecycle.ParseLifecycleConfigWithID(r.Body) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } // Validate the received bucket policy document - if err = bucketLifecycle.Validate(); err != nil { + if err = bucketLifecycle.Validate(rcfg); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } diff --git a/cmd/bucket-lifecycle-handlers_test.go b/cmd/bucket-lifecycle-handlers_test.go index f101f2ef6f3f9..7dc2cf020c727 100644 --- a/cmd/bucket-lifecycle-handlers_test.go +++ b/cmd/bucket-lifecycle-handlers_test.go @@ -29,7 +29,7 @@ import ( // Test S3 Bucket lifecycle APIs with wrong credentials func TestBucketLifecycleWrongCredentials(t *testing.T) { - ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlersWrongCredentials, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}}) } // Test for authentication @@ -145,7 +145,7 @@ func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, // Test S3 Bucket lifecycle APIs func TestBucketLifecycle(t *testing.T) { - ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testBucketLifecycleHandlers, endpoints: []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"}}) } // Simple tests of bucket lifecycle: PUT, GET, DELETE. diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index d34cec4319ed3..24fdc67d15fb4 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "strconv" "strings" @@ -40,7 +41,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/s3select" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/zeebo/xxh3" ) @@ -71,7 +72,12 @@ func NewLifecycleSys() *LifecycleSys { return &LifecycleSys{} } -func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string) madmin.TraceInfo { +func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event string, metadata map[string]string, err string) madmin.TraceInfo { + sz, _ := oi.GetActualSize() + if metadata == nil { + metadata = make(map[string]string) + } + metadata["version-id"] = oi.VersionID return madmin.TraceInfo{ TraceType: madmin.TraceILM, Time: startTime, @@ -79,18 +85,23 @@ func ilmTrace(startTime time.Time, duration time.Duration, oi ObjectInfo, event FuncName: event, Duration: duration, Path: pathJoin(oi.Bucket, oi.Name), - Error: "", + Bytes: sz, + Error: err, Message: getSource(4), - Custom: map[string]string{"version-id": oi.VersionID}, + Custom: metadata, } } -func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string) { +func (sys *LifecycleSys) trace(oi ObjectInfo) func(event string, metadata map[string]string, err error) { startTime := time.Now() - return func(event string) { + return func(event string, metadata map[string]string, err error) { duration := time.Since(startTime) if globalTrace.NumSubscribers(madmin.TraceILM) > 0 { - globalTrace.Publish(ilmTrace(startTime, duration, oi, event)) + e := "" + if err != nil { + e = err.Error() + } + globalTrace.Publish(ilmTrace(startTime, duration, oi, event, metadata, e)) } } } @@ -145,8 +156,8 @@ func (f freeVersionTask) OpHash() uint64 { return xxh3.HashString(f.TransitionedObject.Tier + f.TransitionedObject.Name) } -func (n newerNoncurrentTask) OpHash() uint64 { - return xxh3.HashString(n.bucket + n.versions[0].ObjectV.ObjectName) +func (n noncurrentVersionsTask) OpHash() uint64 { + return xxh3.HashString(n.bucket + n.versions[0].ObjectName) } func (j jentry) OpHash() uint64 { @@ -230,10 +241,16 @@ func (es *expiryState) enqueueByDays(oi ObjectInfo, event lifecycle.Event, src l } } -// enqueueByNewerNoncurrent enqueues object versions expired by -// NewerNoncurrentVersions limit for expiry. -func (es *expiryState) enqueueByNewerNoncurrent(bucket string, versions []ObjectToDelete, lcEvent lifecycle.Event) { - task := newerNoncurrentTask{bucket: bucket, versions: versions, event: lcEvent} +func (es *expiryState) enqueueNoncurrentVersions(bucket string, versions []ObjectToDelete, events []lifecycle.Event) { + if len(versions) == 0 { + return + } + + task := noncurrentVersionsTask{ + bucket: bucket, + versions: versions, + events: events, + } wrkr := es.getWorkerCh(task.OpHash()) if wrkr == nil { es.stats.missedExpiryTasks.Add(1) @@ -273,6 +290,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp { } func (es *expiryState) ResizeWorkers(n int) { + if n == 0 { + n = 100 + } + // Lock to avoid multiple resizes to happen at the same time. es.mu.Lock() defer es.mu.Unlock() @@ -329,16 +350,16 @@ func (es *expiryState) Worker(input <-chan expiryOp) { } else { applyExpiryOnNonTransitionedObjects(es.ctx, es.objAPI, v.objInfo, v.event, v.src) } - case newerNoncurrentTask: - deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event) + case noncurrentVersionsTask: + deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.events) case jentry: - logger.LogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName)) + transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName)) case freeVersionTask: oi := v.ObjectInfo traceFn := globalLifecycleSys.trace(oi) if !oi.TransitionedObject.FreeVersion { // nothing to be done - return + continue } ignoreNotFoundErr := func(err error) error { @@ -351,8 +372,9 @@ func (es *expiryState) Worker(input <-chan expiryOp) { // Remove the remote object err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier) if ignoreNotFoundErr(err) != nil { - logger.LogIf(es.ctx, err) - return + transitionLogIf(es.ctx, err) + traceFn(ILMFreeVersionDelete, nil, err) + continue } // Remove this free version @@ -364,10 +386,10 @@ func (es *expiryState) Worker(input <-chan expiryOp) { auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn) } if ignoreNotFoundErr(err) != nil { - logger.LogIf(es.ctx, err) + transitionLogIf(es.ctx, err) } default: - logger.LogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v)) + bugLogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v)) } } } @@ -377,12 +399,10 @@ func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) { globalExpiryState = newExpiryState(ctx, objectAPI, globalILMConfig.getExpirationWorkers()) } -// newerNoncurrentTask encapsulates arguments required by worker to expire objects -// by NewerNoncurrentVersions -type newerNoncurrentTask struct { +type noncurrentVersionsTask struct { bucket string versions []ObjectToDelete - event lifecycle.Event + events []lifecycle.Event } type transitionTask struct { @@ -482,7 +502,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) { if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil { if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) { if !strings.Contains(err.Error(), "use of closed network connection") { - logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w", + transitionLogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w", task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err)) } } @@ -534,6 +554,10 @@ func (t *transitionState) UpdateWorkers(n int) { } func (t *transitionState) updateWorkers(n int) { + if n == 0 { + n = 100 + } + for t.numWorkers < n { go t.worker(t.objAPI) t.numWorkers++ @@ -569,6 +593,10 @@ func enqueueTransitionImmediate(obj ObjectInfo, src lcEventSrc) { if lc, err := globalLifecycleSys.Get(obj.Bucket); err == nil { switch event := lc.Eval(obj.ToLifecycleOpts()); event.Action { case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: + if obj.DeleteMarker || obj.IsDir { + // nothing to transition + return + } globalTransitionState.queueTransitionTask(obj, event, src) } } @@ -610,7 +638,7 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob // remote object opts.SkipFreeVersion = true } else { - logger.LogIf(ctx, err) + transitionLogIf(ctx, err) } // Now, delete object from hot-tier namespace @@ -659,11 +687,12 @@ func genTransitionObjName(bucket string) (string, error) { // is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved // to the transition tier without decrypting or re-encrypting. func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) { + timeILM := globalScannerMetrics.timeILM(lae.Action) defer func() { if err != nil { return } - globalScannerMetrics.timeILM(lae.Action)(1) + timeILM(1) }() opts := ObjectOptions{ @@ -688,6 +717,11 @@ type auditTierOp struct { Error string `json:"error,omitempty"` } +func (op auditTierOp) String() string { + // flattening the auditTierOp{} for audit + return fmt.Sprintf("tier:%s,respNS:%d,tx:%d,err:%s", op.Tier, op.TimeToResponseNS, op.OutputBytes, op.Error) +} + func auditTierActions(ctx context.Context, tier string, bytes int64) func(err error) { startTime := time.Now() return func(err error) { @@ -711,18 +745,18 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er globalTierMetrics.logFailure(tier) } - logger.GetReqInfo(ctx).AppendTags("tierStats", op) + logger.GetReqInfo(ctx).AppendTags("tierStats", op.String()) } } // getTransitionedObjectReader returns a reader from the transitioned tier. func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) { - tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier) + tgtClient, err := globalTierConfigMgr.getDriver(ctx, oi.TransitionedObject.Tier) if err != nil { - return nil, fmt.Errorf("transition storage class not configured") + return nil, fmt.Errorf("transition storage class not configured: %w", err) } - fn, off, length, err := NewGetObjectReader(rs, oi, opts) + fn, off, length, err := NewGetObjectReader(rs, oi, opts, h) if err != nil { return nil, ErrorRespToObjectError(err, bucket, object) } @@ -875,7 +909,7 @@ func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string if vid != "" && vid != nullVersionID { _, err := uuid.Parse(vid) if err != nil { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) return opts, InvalidVersionID{ Bucket: bucket, Object: object, @@ -926,9 +960,7 @@ func putRestoreOpts(bucket, object string, rreq *RestoreObjectRequest, objInfo O UserDefined: meta, } } - for k, v := range objInfo.UserDefined { - meta[k] = v - } + maps.Copy(meta, objInfo.UserDefined) if len(objInfo.UserTags) != 0 { meta[xhttp.AmzObjectTagging] = objInfo.UserTags } @@ -979,7 +1011,7 @@ func ongoingRestoreObj() restoreObjStatus { } } -// completeRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry. +// completedRestoreObj constructs restoreObjStatus for a completed restore-object with given expiry. func completedRestoreObj(expiry time.Time) restoreObjStatus { return restoreObjStatus{ ongoing: false, @@ -1075,17 +1107,20 @@ func isRestoredObjectOnDisk(meta map[string]string) (onDisk bool) { // ToLifecycleOpts returns lifecycle.ObjectOpts value for oi. func (oi ObjectInfo) ToLifecycleOpts() lifecycle.ObjectOpts { return lifecycle.ObjectOpts{ - Name: oi.Name, - UserTags: oi.UserTags, - VersionID: oi.VersionID, - ModTime: oi.ModTime, - Size: oi.Size, - IsLatest: oi.IsLatest, - NumVersions: oi.NumVersions, - DeleteMarker: oi.DeleteMarker, - SuccessorModTime: oi.SuccessorModTime, - RestoreOngoing: oi.RestoreOngoing, - RestoreExpires: oi.RestoreExpires, - TransitionStatus: oi.TransitionedObject.Status, + Name: oi.Name, + UserTags: oi.UserTags, + VersionID: oi.VersionID, + ModTime: oi.ModTime, + Size: oi.Size, + IsLatest: oi.IsLatest, + NumVersions: oi.NumVersions, + DeleteMarker: oi.DeleteMarker, + SuccessorModTime: oi.SuccessorModTime, + RestoreOngoing: oi.RestoreOngoing, + RestoreExpires: oi.RestoreExpires, + TransitionStatus: oi.TransitionedObject.Status, + UserDefined: oi.UserDefined, + VersionPurgeStatus: oi.VersionPurgeStatus, + ReplicationStatus: oi.ReplicationStatus, } } diff --git a/cmd/bucket-listobjects-handlers.go b/cmd/bucket-listobjects-handlers.go index 934343320bd56..0afd76c43cc25 100644 --- a/cmd/bucket-listobjects-handlers.go +++ b/cmd/bucket-listobjects-handlers.go @@ -26,7 +26,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // Validate all the ListObjects query arguments, returns an APIErrorCode @@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta) + response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta) // Write success response. writeSuccessResponseXML(w, encodeResponseList(response)) @@ -202,7 +202,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re if r.Header.Get(xMinIOExtract) == "true" && strings.Contains(prefix, archivePattern) { // Initiate a list objects operation inside a zip file based in the input params - listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter) + listObjectsV2Info, err = listObjectsV2InArchive(ctx, objectAPI, bucket, prefix, token, delimiter, maxKeys, startAfter, r.Header) } else { // Initiate a list objects operation based on the input params. // On success would return back ListObjectsInfo object to be @@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re return } - response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, + response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter, delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta) @@ -231,7 +231,7 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) { if token == "" { return token, -1 } - i := strings.Index(token, ":") + i := strings.Index(token, getKeySeparator()) if i < 0 { return token, -1 } @@ -243,26 +243,26 @@ func parseRequestToken(token string) (subToken string, nodeIndex int) { return subToken, nodeIndex } -func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string) (string, bool) { - subToken, nodeIndex := parseRequestToken(token) - if nodeIndex > 0 { - return subToken, proxyRequestByNodeIndex(ctx, w, r, nodeIndex) +func proxyRequestByToken(ctx context.Context, w http.ResponseWriter, r *http.Request, token string, returnErr bool) (subToken string, proxied bool, success bool) { + var nodeIndex int + if subToken, nodeIndex = parseRequestToken(token); nodeIndex >= 0 { + proxied, success = proxyRequestByNodeIndex(ctx, w, r, nodeIndex, returnErr) } - return subToken, false + return subToken, proxied, success } -func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int) (success bool) { +func proxyRequestByNodeIndex(ctx context.Context, w http.ResponseWriter, r *http.Request, index int, returnErr bool) (proxied, success bool) { if len(globalProxyEndpoints) == 0 { - return false + return proxied, success } if index < 0 || index >= len(globalProxyEndpoints) { - return false + return proxied, success } ep := globalProxyEndpoints[index] if ep.IsLocal { - return false + return proxied, success } - return proxyRequest(ctx, w, r, ep) + return true, proxyRequest(ctx, w, r, ep, returnErr) } // ListObjectsV1Handler - GET Bucket (List Objects) Version 1. @@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http return } - response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo) + response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo) // Write success response. writeSuccessResponseXML(w, encodeResponseList(response)) diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 86517177301a0..20be4ffd3e36a 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -37,8 +37,9 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/sync/errgroup" + "golang.org/x/sync/singleflight" ) // BucketMetadataSys captures all bucket metadata for a given cluster. @@ -46,6 +47,8 @@ type BucketMetadataSys struct { objAPI ObjectLayer sync.RWMutex + initialized bool + group *singleflight.Group metadataMap map[string]BucketMetadata } @@ -61,6 +64,7 @@ func (sys *BucketMetadataSys) Count() int { func (sys *BucketMetadataSys) Remove(buckets ...string) { sys.Lock() for _, bucket := range buckets { + sys.group.Forget(bucket) delete(sys.metadataMap, bucket) globalBucketMonitor.DeleteBucket(bucket) } @@ -120,6 +124,7 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string, meta.PolicyConfigUpdatedAt = updatedAt case bucketNotificationConfig: meta.NotificationConfigXML = configData + meta.NotificationConfigUpdatedAt = updatedAt case bucketLifecycleConfig: meta.LifecycleConfigXML = configData meta.LifecycleConfigUpdatedAt = updatedAt @@ -149,12 +154,13 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string, if err != nil { return updatedAt, fmt.Errorf("Error encrypting bucket target metadata %w", err) } + meta.BucketTargetsConfigUpdatedAt = updatedAt + meta.BucketTargetsConfigMetaUpdatedAt = updatedAt default: return updatedAt, fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile) } - err = sys.save(ctx, meta) - return updatedAt, err + return updatedAt, sys.save(ctx, meta) } func (sys *BucketMetadataSys) save(ctx context.Context, meta BucketMetadata) error { @@ -261,6 +267,21 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve return meta.versioningConfig, meta.VersioningConfigUpdatedAt, nil } +// GetBucketPolicy returns configured bucket policy +func (sys *BucketMetadataSys) GetBucketPolicy(bucket string) (*policy.BucketPolicy, time.Time, error) { + meta, _, err := sys.GetConfig(GlobalContext, bucket) + if err != nil { + if errors.Is(err, errConfigNotFound) { + return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket} + } + return nil, time.Time{}, err + } + if meta.policyConfig == nil { + return nil, time.Time{}, BucketPolicyNotFound{Bucket: bucket} + } + return meta.policyConfig, meta.PolicyConfigUpdatedAt, nil +} + // GetTaggingConfig returns configured tagging config // The returned object may not be modified. func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) { @@ -391,9 +412,7 @@ func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket s return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket} } if reloaded { - globalBucketTargetSys.set(BucketInfo{ - Name: bucket, - }, meta) + globalBucketTargetSys.set(bucket, meta) } return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil } @@ -412,9 +431,7 @@ func (sys *BucketMetadataSys) GetBucketTargetsConfig(bucket string) (*madmin.Buc return nil, BucketRemoteTargetNotFound{Bucket: bucket} } if reloaded { - globalBucketTargetSys.set(BucketInfo{ - Name: bucket, - }, meta) + globalBucketTargetSys.set(bucket, meta) } return meta.bucketTargetConfig, nil } @@ -433,6 +450,8 @@ func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket stri return loadBucketMetadata(ctx, objAPI, bucket) } +var errBucketMetadataNotInitialized = errors.New("bucket metadata not initialized yet") + // GetConfig returns a specific configuration from the bucket metadata. // The returned object may not be modified. // reloaded will be true if metadata refreshed from disk @@ -452,9 +471,20 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met if ok { return meta, reloaded, nil } - meta, err = loadBucketMetadata(ctx, objAPI, bucket) + + val, err, _ := sys.group.Do(bucket, func() (val any, err error) { + meta, err = loadBucketMetadata(ctx, objAPI, bucket) + if err != nil { + if !sys.Initialized() { + // bucket metadata not yet initialized + return newBucketMetadata(bucket), errBucketMetadataNotInitialized + } + } + return meta, err + }) + meta, _ = val.(BucketMetadata) if err != nil { - return meta, reloaded, err + return meta, false, err } sys.Lock() sys.metadataMap[bucket] = meta @@ -464,7 +494,7 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met } // Init - initializes bucket metadata system for all buckets. -func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error { +func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []string, objAPI ObjectLayer) error { if objAPI == nil { return errServerNotInitialized } @@ -477,18 +507,17 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob } // concurrently load bucket metadata to speed up loading bucket metadata. -func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, failedBuckets map[string]struct{}) { +func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []string) { g := errgroup.WithNErrs(len(buckets)) bucketMetas := make([]BucketMetadata, len(buckets)) for index := range buckets { - index := index g.Go(func() error { // Sleep and stagger to avoid blocked CPU and thundering // herd upon start up sequence. time.Sleep(25*time.Millisecond + time.Duration(rand.Int63n(int64(100*time.Millisecond)))) - _, _ = sys.objAPI.HealBucket(ctx, buckets[index].Name, madmin.HealOpts{Recreate: true}) - meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index].Name) + _, _ = sys.objAPI.HealBucket(ctx, buckets[index], madmin.HealOpts{Recreate: true}) + meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[index]) if err != nil { return err } @@ -498,9 +527,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck } errs := g.Wait() - for _, err := range errs { + for index, err := range errs { if err != nil { - logger.LogIf(ctx, err) + internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err), + "load-bucket-metadata-"+buckets[index], logger.WarningKind) } } @@ -511,16 +541,12 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck if errs[i] != nil { continue } - sys.metadataMap[buckets[i].Name] = meta + sys.metadataMap[buckets[i]] = meta } sys.Unlock() for i, meta := range bucketMetas { if errs[i] != nil { - if failedBuckets == nil { - failedBuckets = make(map[string]struct{}) - } - failedBuckets[buckets[i].Name] = struct{}{} continue } globalEventNotifier.set(buckets[i], meta) // set notification targets @@ -528,7 +554,7 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck } } -func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, failedBuckets map[string]struct{}) { +func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context) { const bucketMetadataRefresh = 15 * time.Minute sleeper := newDynamicSleeper(2, 150*time.Millisecond, false) @@ -540,9 +566,9 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa case <-ctx.Done(): return case <-t.C: - buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{}) + buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{NoMetadata: true}) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) break } @@ -558,22 +584,27 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa for i := range buckets { wait := sleeper.Timer(ctx) - meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name) + bucket := buckets[i].Name + updated := false + + meta, err := loadBucketMetadata(ctx, sys.objAPI, bucket) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) wait() // wait to proceed to next entry. continue } sys.Lock() - sys.metadataMap[buckets[i].Name] = meta + // Update if the bucket metadata in the memory is older than on-disk one + if lu := sys.metadataMap[bucket].lastUpdate(); lu.Before(meta.lastUpdate()) { + updated = true + sys.metadataMap[bucket] = meta + } sys.Unlock() - // Initialize the failed buckets - if _, ok := failedBuckets[buckets[i].Name]; ok { - globalEventNotifier.set(buckets[i], meta) - globalBucketTargetSys.set(buckets[i], meta) - delete(failedBuckets, buckets[i].Name) + if updated { + globalEventNotifier.set(bucket, meta) + globalBucketTargetSys.set(bucket, meta) } wait() // wait to proceed to next entry. @@ -583,30 +614,39 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa } } +// Initialized indicates if bucket metadata sys is initialized atleast once. +func (sys *BucketMetadataSys) Initialized() bool { + sys.RLock() + defer sys.RUnlock() + + return sys.initialized +} + // Loads bucket metadata for all buckets into BucketMetadataSys. -func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) { - count := 100 // load 100 bucket metadata at a time. - failedBuckets := make(map[string]struct{}) +func (sys *BucketMetadataSys) init(ctx context.Context, buckets []string) { + count := globalEndpoints.ESCount() * 10 for { if len(buckets) < count { - sys.concurrentLoad(ctx, buckets, failedBuckets) + sys.concurrentLoad(ctx, buckets) break } - sys.concurrentLoad(ctx, buckets[:count], failedBuckets) + sys.concurrentLoad(ctx, buckets[:count]) buckets = buckets[count:] } + sys.Lock() + sys.initialized = true + sys.Unlock() + if globalIsDistErasure { - go sys.refreshBucketsMetadataLoop(ctx, failedBuckets) + go sys.refreshBucketsMetadataLoop(ctx) } } // Reset the state of the BucketMetadataSys. func (sys *BucketMetadataSys) Reset() { sys.Lock() - for k := range sys.metadataMap { - delete(sys.metadataMap, k) - } + clear(sys.metadataMap) sys.Unlock() } @@ -614,5 +654,6 @@ func (sys *BucketMetadataSys) Reset() { func NewBucketMetadataSys() *BucketMetadataSys { return &BucketMetadataSys{ metadataMap: make(map[string]BucketMetadata), + group: &singleflight.Group{}, } } diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index 848962cc6472b..e78118175e58b 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -38,10 +38,9 @@ import ( "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/event" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" "github.com/minio/sio" ) @@ -81,14 +80,19 @@ type BucketMetadata struct { ReplicationConfigXML []byte BucketTargetsConfigJSON []byte BucketTargetsConfigMetaJSON []byte - PolicyConfigUpdatedAt time.Time - ObjectLockConfigUpdatedAt time.Time - EncryptionConfigUpdatedAt time.Time - TaggingConfigUpdatedAt time.Time - QuotaConfigUpdatedAt time.Time - ReplicationConfigUpdatedAt time.Time - VersioningConfigUpdatedAt time.Time - LifecycleConfigUpdatedAt time.Time + + PolicyConfigUpdatedAt time.Time + ObjectLockConfigUpdatedAt time.Time + EncryptionConfigUpdatedAt time.Time + TaggingConfigUpdatedAt time.Time + QuotaConfigUpdatedAt time.Time + ReplicationConfigUpdatedAt time.Time + VersioningConfigUpdatedAt time.Time + LifecycleConfigUpdatedAt time.Time + NotificationConfigUpdatedAt time.Time + BucketTargetsConfigUpdatedAt time.Time + BucketTargetsConfigMetaUpdatedAt time.Time + // Add a new UpdatedAt field and update lastUpdate function // Unexported fields. Must be updated atomically. policyConfig *policy.BucketPolicy @@ -120,6 +124,46 @@ func newBucketMetadata(name string) BucketMetadata { } } +// Return the last update of this bucket metadata, which +// means, the last update of any policy document. +func (b BucketMetadata) lastUpdate() (t time.Time) { + if b.PolicyConfigUpdatedAt.After(t) { + t = b.PolicyConfigUpdatedAt + } + if b.ObjectLockConfigUpdatedAt.After(t) { + t = b.ObjectLockConfigUpdatedAt + } + if b.EncryptionConfigUpdatedAt.After(t) { + t = b.EncryptionConfigUpdatedAt + } + if b.TaggingConfigUpdatedAt.After(t) { + t = b.TaggingConfigUpdatedAt + } + if b.QuotaConfigUpdatedAt.After(t) { + t = b.QuotaConfigUpdatedAt + } + if b.ReplicationConfigUpdatedAt.After(t) { + t = b.ReplicationConfigUpdatedAt + } + if b.VersioningConfigUpdatedAt.After(t) { + t = b.VersioningConfigUpdatedAt + } + if b.LifecycleConfigUpdatedAt.After(t) { + t = b.LifecycleConfigUpdatedAt + } + if b.NotificationConfigUpdatedAt.After(t) { + t = b.NotificationConfigUpdatedAt + } + if b.BucketTargetsConfigUpdatedAt.After(t) { + t = b.BucketTargetsConfigUpdatedAt + } + if b.BucketTargetsConfigMetaUpdatedAt.After(t) { + t = b.BucketTargetsConfigMetaUpdatedAt + } + + return t +} + // Versioning returns true if versioning is enabled func (b BucketMetadata) Versioning() bool { return b.LockEnabled || (b.versioningConfig != nil && b.versioningConfig.Enabled()) || (b.objectLockConfig != nil && b.objectLockConfig.Enabled()) @@ -145,7 +189,7 @@ func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) { // If an error is returned the returned metadata will be default initialized. func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) { if name == "" { - logger.LogIf(ctx, errors.New("bucket name cannot be empty")) + internalLogIf(ctx, errors.New("bucket name cannot be empty"), logger.WarningKind) return BucketMetadata{}, errInvalidArgument } b := newBucketMetadata(name) @@ -400,7 +444,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj for legacyFile := range configs { configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile) if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) } } @@ -440,6 +484,18 @@ func (b *BucketMetadata) defaultTimestamps() { if b.LifecycleConfigUpdatedAt.IsZero() { b.LifecycleConfigUpdatedAt = b.Created } + + if b.NotificationConfigUpdatedAt.IsZero() { + b.NotificationConfigUpdatedAt = b.Created + } + + if b.BucketTargetsConfigUpdatedAt.IsZero() { + b.BucketTargetsConfigUpdatedAt = b.Created + } + + if b.BucketTargetsConfigMetaUpdatedAt.IsZero() { + b.BucketTargetsConfigMetaUpdatedAt = b.Created + } } // Save config to supplied ObjectLayer api. @@ -486,26 +542,26 @@ func (b *BucketMetadata) migrateTargetConfig(ctx context.Context, objectAPI Obje func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kmsContext kms.Context) (output, metabytes []byte, err error) { if GlobalKMS == nil { output = input - return + return output, metabytes, err } metadata := make(map[string]string) - key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext) + key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext}) if err != nil { - return + return output, metabytes, err } outbuf := bytes.NewBuffer(nil) objectKey := crypto.GenerateKey(key.Plaintext, rand.Reader) sealedKey := objectKey.Seal(key.Plaintext, crypto.GenerateIV(rand.Reader), crypto.S3.String(), bucket, "") crypto.S3.CreateMetadata(metadata, key.KeyID, key.Ciphertext, sealedKey) - _, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()}) + _, err = sio.Encrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20}) if err != nil { return output, metabytes, err } metabytes, err = json.Marshal(metadata) if err != nil { - return + return output, metabytes, err } return outbuf.Bytes(), metabytes, nil } @@ -519,7 +575,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, if err != nil { return nil, err } - extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext) + extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{ + Name: keyID, + Ciphertext: kmsKey, + AssociatedData: kmsContext, + }) if err != nil { return nil, err } @@ -529,6 +589,6 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string, } outbuf := bytes.NewBuffer(nil) - _, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()}) + _, err = sio.Decrypt(outbuf, bytes.NewBuffer(input), sio.Config{Key: objectKey[:], MinVersion: sio.Version20}) return outbuf.Bytes(), err } diff --git a/cmd/bucket-metadata_gen.go b/cmd/bucket-metadata_gen.go index 3e86a80e4c81d..0407b66ea8db8 100644 --- a/cmd/bucket-metadata_gen.go +++ b/cmd/bucket-metadata_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -156,6 +156,24 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "LifecycleConfigUpdatedAt") return } + case "NotificationConfigUpdatedAt": + z.NotificationConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "NotificationConfigUpdatedAt") + return + } + case "BucketTargetsConfigUpdatedAt": + z.BucketTargetsConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt") + return + } + case "BucketTargetsConfigMetaUpdatedAt": + z.BucketTargetsConfigMetaUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt") + return + } default: err = dc.Skip() if err != nil { @@ -169,9 +187,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 22 + // map header, size 25 // write "Name" - err = en.Append(0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = en.Append(0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -390,15 +408,45 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "LifecycleConfigUpdatedAt") return } + // write "NotificationConfigUpdatedAt" + err = en.Append(0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.NotificationConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "NotificationConfigUpdatedAt") + return + } + // write "BucketTargetsConfigUpdatedAt" + err = en.Append(0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.BucketTargetsConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt") + return + } + // write "BucketTargetsConfigMetaUpdatedAt" + err = en.Append(0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.BucketTargetsConfigMetaUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 22 + // map header, size 25 // string "Name" - o = append(o, 0xde, 0x0, 0x16, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "Created" o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64) @@ -463,6 +511,15 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { // string "LifecycleConfigUpdatedAt" o = append(o, 0xb8, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) o = msgp.AppendTime(o, z.LifecycleConfigUpdatedAt) + // string "NotificationConfigUpdatedAt" + o = append(o, 0xbb, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.NotificationConfigUpdatedAt) + // string "BucketTargetsConfigUpdatedAt" + o = append(o, 0xbc, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.BucketTargetsConfigUpdatedAt) + // string "BucketTargetsConfigMetaUpdatedAt" + o = append(o, 0xd9, 0x20, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.BucketTargetsConfigMetaUpdatedAt) return } @@ -616,6 +673,24 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "LifecycleConfigUpdatedAt") return } + case "NotificationConfigUpdatedAt": + z.NotificationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NotificationConfigUpdatedAt") + return + } + case "BucketTargetsConfigUpdatedAt": + z.BucketTargetsConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigUpdatedAt") + return + } + case "BucketTargetsConfigMetaUpdatedAt": + z.BucketTargetsConfigMetaUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BucketTargetsConfigMetaUpdatedAt") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -630,6 +705,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BucketMetadata) Msgsize() (s int) { - s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize + s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize + 26 + msgp.TimeSize + 25 + msgp.TimeSize + 28 + msgp.TimeSize + 29 + msgp.TimeSize + 34 + msgp.TimeSize return } diff --git a/cmd/bucket-metadata_gen_test.go b/cmd/bucket-metadata_gen_test.go index 066a68d166cb6..4c2d25c7723d7 100644 --- a/cmd/bucket-metadata_gen_test.go +++ b/cmd/bucket-metadata_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index 128986c3a76dc..c41823b4e35f3 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -26,7 +26,7 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - config.SetRegion(globalSite.Region) - if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil { + region := globalSite.Region() + config.SetRegion(region) + if err = config.Validate(region, globalEventNotifier.targetList); err != nil { arnErr, ok := err.(*event.ErrARNNotFound) if ok { for i, queue := range config.QueueList { @@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, return } - config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList) + config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList) if err != nil { apiErr := errorCodes.ToAPIErr(ErrMalformedXML) if event.IsEventError(err) { diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index 842458fea23a2..d0ad85144ada8 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio/internal/bucket/replication" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // BucketObjectLockSys - map of bucket and retention configuration. @@ -44,7 +44,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, if errors.Is(err, errInvalidArgument) { return r, err } - logger.CriticalIf(context.Background(), err) return r, err } return config.ToRetention(), nil @@ -66,7 +65,7 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) { t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return true } if ret.RetainUntilDate.After(t) { @@ -114,7 +113,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke // duration of the retention period. t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{} } @@ -140,7 +139,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke if !byPassSet { t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{} } @@ -170,7 +169,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID} } @@ -277,7 +276,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined) t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return mode, retainDate, legalHold, ErrObjectLocked } if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) { @@ -296,7 +295,10 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob if legalHoldRequested { var lerr error if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(rq.Header); lerr != nil { - return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) + return mode, retainDate, legalHold, toAPIErrorCode(ctx, lerr) + } + if legalHoldPermErr != ErrNone { + return mode, retainDate, legalHold, legalHoldPermErr } } @@ -306,7 +308,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(rq.Header) - if err != nil && !(replica && rMode == "" && rDate.IsZero()) { + if err != nil && (!replica || rMode != "" || !rDate.IsZero()) { return mode, retainDate, legalHold, toAPIErrorCode(ctx, err) } if retentionPermErr != ErrNone { @@ -324,7 +326,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob t, err := objectlock.UTCNowNTP() if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err, logger.WarningKind) return mode, retainDate, legalHold, ErrObjectLocked } diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index 321635b731561..994b0b0da18e0 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -27,7 +27,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -113,7 +113,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypePolicy, Bucket: bucket, Policy: bucketPolicyBytes, @@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r } // Call site replication hook. - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypePolicy, Bucket: bucket, UpdatedAt: updatedAt, diff --git a/cmd/bucket-policy-handlers_test.go b/cmd/bucket-policy-handlers_test.go index f299ebc5bb627..e506aceb06325 100644 --- a/cmd/bucket-policy-handlers_test.go +++ b/cmd/bucket-policy-handlers_test.go @@ -29,8 +29,8 @@ import ( "testing" "github.com/minio/minio/internal/auth" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/policy/condition" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/policy/condition" ) func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy { @@ -107,7 +107,7 @@ func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.BucketPolic // Wrapper for calling Create Bucket and ensure we get one and only one success. func TestCreateBucket(t *testing.T) { - ExecObjectLayerAPITest(t, testCreateBucket, []string{"MakeBucket"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testCreateBucket, endpoints: []string{"MakeBucket"}}) } // testCreateBucket - Test for calling Create Bucket and ensure we get one and only one success. @@ -122,7 +122,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute var wg sync.WaitGroup var mu sync.Mutex wg.Add(n) - for i := 0; i < n; i++ { + for range n { go func() { defer wg.Done() // Sync start. @@ -154,7 +154,7 @@ func testCreateBucket(obj ObjectLayer, instanceType, bucketName string, apiRoute // Wrapper for calling Put Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestPutBucketPolicyHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testPutBucketPolicyHandler, endpoints: []string{"PutBucketPolicy"}}) } // testPutBucketPolicyHandler - Test for Bucket policy end point. @@ -187,7 +187,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Test case - 1. { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -199,7 +199,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Expecting StatusBadRequest (400). { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: maxBucketPolicySize + 1, accessKey: credentials.AccessKey, @@ -211,7 +211,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Expecting the HTTP response status to be StatusLengthRequired (411). { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: 0, accessKey: credentials.AccessKey, @@ -258,7 +258,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // checkBucketPolicyResources should fail. { bucketName: bucketName1, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -271,7 +271,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 404 StatusNotFound { bucketName: "non-existent-bucket", - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket")), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -284,7 +284,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 404 StatusNotFound { bucketName: ".invalid-bucket", - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket")), policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -297,7 +297,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // should result in 400 StatusBadRequest. { bucketName: bucketName, - bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))), + bucketPolicyReader: bytes.NewReader(fmt.Appendf(nil, bucketPolicyTemplateWithoutVersion, bucketName, bucketName)), policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)), accessKey: credentials.AccessKey, @@ -373,7 +373,7 @@ func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Wrapper for calling Get Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestGetBucketPolicyHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testGetBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "GetBucketPolicy"}}) } // testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket. @@ -577,7 +577,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string // Wrapper for calling Delete Bucket Policy HTTP handler tests for both Erasure multiple disks and single node setup. func TestDeleteBucketPolicyHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testDeleteBucketPolicyHandler, endpoints: []string{"PutBucketPolicy", "DeleteBucketPolicy"}}) } // testDeleteBucketPolicyHandler - Test for Delete bucket policy end point. diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index 9e1330b63776c..0bcbe2e665096 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -19,6 +19,7 @@ package cmd import ( "encoding/json" + "maps" "net/http" "net/url" "strconv" @@ -32,7 +33,7 @@ import ( "github.com/minio/minio/internal/handlers" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // PolicySys - policy subsystem. @@ -53,7 +54,7 @@ func (sys *PolicySys) IsAllowed(args policy.BucketPolicyArgs) bool { // Log unhandled errors. if _, ok := err.(BucketPolicyNotFound); !ok { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err, logger.WarningKind) } // As policy is not available for given bucket name, returns IsOwner i.e. @@ -187,9 +188,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s } cloneURLValues := make(url.Values, len(r.Form)) - for k, v := range r.Form { - cloneURLValues[k] = v - } + maps.Copy(cloneURLValues, r.Form) for _, objLock := range []string{ xhttp.AmzObjectLockMode, @@ -224,7 +223,7 @@ func getConditionValues(r *http.Request, lc string, cred auth.Credentials) map[s // Add groups claim which could be a list. This will ensure that the claim // `jwt:groups` works. if grpsVal, ok := claims["groups"]; ok { - if grpsIs, ok := grpsVal.([]interface{}); ok { + if grpsIs, ok := grpsVal.([]any); ok { grps := []string{} for _, gI := range grpsIs { if g, ok := gI.(string); ok { diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 78eabfa0b9dc2..d9779c21ab1fb 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -49,8 +49,11 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]() func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) { bucketStorageCache.InitOnce(10*time.Second, cachevalue.Opts{ReturnLastGood: true, NoWait: true}, - func() (DataUsageInfo, error) { - ctx, done := context.WithTimeout(context.Background(), 2*time.Second) + func(ctx context.Context) (DataUsageInfo, error) { + if objAPI == nil { + return DataUsageInfo{}, errServerNotInitialized + } + ctx, done := context.WithTimeout(ctx, 2*time.Second) defer done() return loadDataUsageFromBackend(ctx, objAPI) @@ -59,24 +62,26 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) { } // GetBucketUsageInfo return bucket usage info for a given bucket -func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) { - dui, err := bucketStorageCache.Get() +func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) BucketUsageInfo { + sys.Init(newObjectLayerFn()) + + dui, err := bucketStorageCache.GetWithCtx(ctx) timedout := OperationTimedOut{} if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) { if len(dui.BucketsUsage) > 0 { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket) + internalLogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket) } else { - logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket) + internalLogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket) } } if len(dui.BucketsUsage) > 0 { bui, ok := dui.BucketsUsage[bucket] if ok { - return bui, nil + return bui } } - return BucketUsageInfo{}, nil + return BucketUsageInfo{} } // parseBucketQuota parses BucketQuota from json @@ -87,12 +92,12 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, } if !quotaCfg.IsValid() { if quotaCfg.Type == "fifo" { - logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects")) + internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc quota clear alias/bucket' and use 'mc ilm add' for expiration of objects"), logger.WarningKind) return quotaCfg, fmt.Errorf("invalid quota type 'fifo'") } return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg) } - return + return quotaCfg, err } func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, size int64) error { @@ -118,11 +123,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string, return BucketQuotaExceeded{Bucket: bucket} } - bui, err := sys.GetBucketUsageInfo(bucket) - if err != nil { - return err - } - + bui := sys.GetBucketUsageInfo(ctx, bucket) if bui.Size > 0 && ((bui.Size + uint64(size)) >= quotaSize) { return BucketQuotaExceeded{Bucket: bucket} } diff --git a/cmd/bucket-replication-handlers.go b/cmd/bucket-replication-handlers.go index 29c066a08f5b4..05a9d2e859c1a 100644 --- a/cmd/bucket-replication-handlers.go +++ b/cmd/bucket-replication-handlers.go @@ -34,7 +34,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // PutBucketReplicationConfigHandler - PUT Bucket replication configuration. @@ -75,7 +75,7 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr writeErrorResponse(ctx, w, apiErr, r.URL) return } - sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true) + sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, &validateReplicationDestinationOptions{CheckRemoteBucket: true}) if apiErr != noError { writeErrorResponse(ctx, w, apiErr, r.URL) return @@ -230,7 +230,7 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsHandler(w http.ResponseW w.Header().Set(xhttp.ContentType, string(mimeJSON)) enc := json.NewEncoder(w) - stats := globalReplicationStats.getLatestReplicationStats(bucket) + stats := globalReplicationStats.Load().getLatestReplicationStats(bucket) bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket) bwMap := bwRpt.BucketStats for arn, st := range stats.ReplicationStats.Stats { @@ -286,7 +286,7 @@ func (api objectAPIHandlers) GetBucketReplicationMetricsV2Handler(w http.Respons w.Header().Set(xhttp.ContentType, string(mimeJSON)) enc := json.NewEncoder(w) - stats := globalReplicationStats.getLatestReplicationStats(bucket) + stats := globalReplicationStats.Load().getLatestReplicationStats(bucket) bwRpt := globalNotificationSys.GetBandwidthReports(ctx, bucket) bwMap := bwRpt.BucketStats for arn, st := range stats.ReplicationStats.Stats { @@ -422,7 +422,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseW return } - if err := globalReplicationPool.resyncer.start(ctx, objectAPI, resyncOpts{ + if err := globalReplicationPool.Get().resyncer.start(ctx, objectAPI, resyncOpts{ bucket: bucket, arn: arn, resyncID: resetID, @@ -559,7 +559,7 @@ func (api objectAPIHandlers) ValidateBucketReplicationCredsHandler(w http.Respon lockEnabled = lcfg.Enabled() } - sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, true) + sameTarget, apiErr := validateReplicationDestination(ctx, bucket, replicationConfig, &validateReplicationDestinationOptions{CheckRemoteBucket: true}) if apiErr != noError { writeErrorResponse(ctx, w, apiErr, r.URL) return diff --git a/cmd/bucket-replication-metrics.go b/cmd/bucket-replication-metrics.go index 3b3b56af6fd04..aa4cfb963f303 100644 --- a/cmd/bucket-replication-metrics.go +++ b/cmd/bucket-replication-metrics.go @@ -119,7 +119,7 @@ func (a *ActiveWorkerStat) update() { if a == nil { return } - a.Curr = globalReplicationPool.ActiveWorkers() + a.Curr = globalReplicationPool.Get().ActiveWorkers() a.hist.Update(int64(a.Curr)) a.Avg = float32(a.hist.Mean()) a.Max = int(a.hist.Max()) diff --git a/cmd/bucket-replication-metrics_gen.go b/cmd/bucket-replication-metrics_gen.go index f0443ab8ac1fa..f8eba540c6d14 100644 --- a/cmd/bucket-replication-metrics_gen.go +++ b/cmd/bucket-replication-metrics_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -584,6 +584,7 @@ func (z *InQueueStats) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z InQueueStats) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -595,6 +596,7 @@ func (z InQueueStats) EncodeMsg(en *msgp.Writer) (err error) { func (z InQueueStats) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } diff --git a/cmd/bucket-replication-metrics_gen_test.go b/cmd/bucket-replication-metrics_gen_test.go index 629649e76f661..7342472fdf606 100644 --- a/cmd/bucket-replication-metrics_gen_test.go +++ b/cmd/bucket-replication-metrics_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/bucket-replication-stats.go b/cmd/bucket-replication-stats.go index 1df4a68271426..2971792848805 100644 --- a/cmd/bucket-replication-stats.go +++ b/cmd/bucket-replication-stats.go @@ -87,6 +87,9 @@ func (r *ReplicationStats) updateMovingAvg() { // ActiveWorkers returns worker stats func (r *ReplicationStats) ActiveWorkers() ActiveWorkerStat { + if r == nil { + return ActiveWorkerStat{} + } r.wlock.RLock() defer r.wlock.RUnlock() w := r.workers.get() @@ -109,7 +112,6 @@ func (r *ReplicationStats) collectWorkerMetrics(ctx context.Context) { r.wlock.Lock() r.workers.update() r.wlock.Unlock() - } } } @@ -351,6 +353,9 @@ func NewReplicationStats(ctx context.Context, objectAPI ObjectLayer) *Replicatio } func (r *ReplicationStats) getAllLatest(bucketsUsage map[string]BucketUsageInfo) (bucketsReplicationStats map[string]BucketStats) { + if r == nil { + return nil + } peerBucketStatsList := globalNotificationSys.GetClusterAllBucketStats(GlobalContext) bucketsReplicationStats = make(map[string]BucketStats, len(bucketsUsage)) @@ -460,6 +465,9 @@ func (r *ReplicationStats) calculateBucketReplicationStats(bucket string, bucket // get the most current of in-memory replication stats and data usage info from crawler. func (r *ReplicationStats) getLatestReplicationStats(bucket string) (s BucketStats) { + if r == nil { + return s + } bucketStats := globalNotificationSys.GetClusterBucketStats(GlobalContext, bucket) return r.calculateBucketReplicationStats(bucket, bucketStats) } @@ -495,9 +503,14 @@ func (r *ReplicationStats) decQ(bucket string, sz int64, isDelMarker bool, opTyp // incProxy increments proxy metrics for proxied calls func (r *ReplicationStats) incProxy(bucket string, api replProxyAPI, isErr bool) { - r.pCache.inc(bucket, api, isErr) + if r != nil { + r.pCache.inc(bucket, api, isErr) + } } func (r *ReplicationStats) getProxyStats(bucket string) ProxyMetric { + if r == nil { + return ProxyMetric{} + } return r.pCache.getBucketStats(bucket) } diff --git a/cmd/bucket-replication-utils.go b/cmd/bucket-replication-utils.go index 349a4ed7e8307..41b5dbc639797 100644 --- a/cmd/bucket-replication-utils.go +++ b/cmd/bucket-replication-utils.go @@ -21,6 +21,7 @@ import ( "bytes" "context" "fmt" + "maps" "net/http" "net/url" "regexp" @@ -125,16 +126,16 @@ func (ri replicatedInfos) VersionPurgeStatus() VersionPurgeStatusType { completed := 0 for _, v := range ri.Targets { switch v.VersionPurgeStatus { - case Failed: - return Failed - case Complete: + case replication.VersionPurgeFailed: + return replication.VersionPurgeFailed + case replication.VersionPurgeComplete: completed++ } } if completed == len(ri.Targets) { - return Complete + return replication.VersionPurgeComplete } - return Pending + return replication.VersionPurgePending } func (ri replicatedInfos) VersionPurgeStatusInternal() string { @@ -171,13 +172,13 @@ func (ri ReplicateObjectInfo) TargetReplicationStatus(arn string) (status replic repStatMatches := replStatusRegex.FindAllStringSubmatch(ri.ReplicationStatusInternal, -1) for _, repStatMatch := range repStatMatches { if len(repStatMatch) != 3 { - return + return status } if repStatMatch[1] == arn { return replication.StatusType(repStatMatch[2]) } } - return + return status } // TargetReplicationStatus - returns replication status of a target @@ -185,13 +186,13 @@ func (o ObjectInfo) TargetReplicationStatus(arn string) (status replication.Stat repStatMatches := replStatusRegex.FindAllStringSubmatch(o.ReplicationStatusInternal, -1) for _, repStatMatch := range repStatMatches { if len(repStatMatch) != 3 { - return + return status } if repStatMatch[1] == arn { return replication.StatusType(repStatMatch[2]) } } - return + return status } type replicateTargetDecision struct { @@ -309,9 +310,9 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD targetsMap: make(map[string]replicateTargetDecision), } if len(s) == 0 { - return + return r, err } - for _, p := range strings.Split(s, ",") { + for p := range strings.SplitSeq(s, ",") { if p == "" { continue } @@ -326,7 +327,7 @@ func parseReplicateDecision(ctx context.Context, bucket, s string) (r ReplicateD } r.targetsMap[slc[0]] = replicateTargetDecision{Replicate: tgt[0] == "true", Synchronous: tgt[1] == "true", Arn: tgt[2], ID: tgt[3]} } - return + return r, err } // ReplicationState represents internal replication state @@ -373,14 +374,14 @@ func (rs *ReplicationState) CompositeReplicationStatus() (st replication.StatusT case !rs.ReplicaStatus.Empty(): return rs.ReplicaStatus default: - return + return st } } // CompositeVersionPurgeStatus returns overall replication purge status for the permanent delete being replicated. func (rs *ReplicationState) CompositeVersionPurgeStatus() VersionPurgeStatusType { switch VersionPurgeStatusType(rs.VersionPurgeStatusInternal) { - case Pending, Complete, Failed: // for backward compatibility + case replication.VersionPurgePending, replication.VersionPurgeComplete, replication.VersionPurgeFailed: // for backward compatibility return VersionPurgeStatusType(rs.VersionPurgeStatusInternal) default: return getCompositeVersionPurgeStatus(rs.PurgeTargets) @@ -478,16 +479,16 @@ func getCompositeVersionPurgeStatus(m map[string]VersionPurgeStatusType) Version completed := 0 for _, v := range m { switch v { - case Failed: - return Failed - case Complete: + case replication.VersionPurgeFailed: + return replication.VersionPurgeFailed + case replication.VersionPurgeComplete: completed++ } } if completed == len(m) { - return Complete + return replication.VersionPurgeComplete } - return Pending + return replication.VersionPurgePending } // getHealReplicateObjectInfo returns info needed by heal replication in ReplicateObjectInfo @@ -534,7 +535,7 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate rstate.ReplicateDecisionStr = dsc.String() asz, _ := oi.GetActualSize() - return ReplicateObjectInfo{ + r := ReplicateObjectInfo{ Name: oi.Name, Size: oi.Size, ActualSize: asz, @@ -558,6 +559,10 @@ func getHealReplicateObjectInfo(oi ObjectInfo, rcfg replicationConfig) Replicate SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined), UserTags: oi.UserTags, } + if r.SSEC { + r.Checksum = oi.Checksum + } + return r } // ReplicationState - returns replication state using other internal replication metadata in ObjectInfo @@ -631,28 +636,7 @@ type ResyncTarget struct { } // VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication -type VersionPurgeStatusType string - -const ( - // Pending - versioned delete replication is pending. - Pending VersionPurgeStatusType = "PENDING" - - // Complete - versioned delete replication is now complete, erase version on disk. - Complete VersionPurgeStatusType = "COMPLETE" - - // Failed - versioned delete replication failed. - Failed VersionPurgeStatusType = "FAILED" -) - -// Empty returns true if purge status was not set. -func (v VersionPurgeStatusType) Empty() bool { - return string(v) == "" -} - -// Pending returns true if the version is pending purge. -func (v VersionPurgeStatusType) Pending() bool { - return v == Pending || v == Failed -} +type VersionPurgeStatusType = replication.VersionPurgeStatusType type replicationResyncer struct { // map of bucket to their resync status @@ -752,10 +736,8 @@ type BucketReplicationResyncStatus struct { func (rs *BucketReplicationResyncStatus) cloneTgtStats() (m map[string]TargetReplicationResyncStatus) { m = make(map[string]TargetReplicationResyncStatus) - for arn, st := range rs.TargetsMap { - m[arn] = st - } - return + maps.Copy(m, rs.TargetsMap) + return m } func newBucketResyncStatus(bucket string) BucketReplicationResyncStatus { @@ -792,7 +774,7 @@ func extractReplicateDiffOpts(q url.Values) (opts madmin.ReplDiffOpts) { opts.Verbose = q.Get("verbose") == "true" opts.ARN = q.Get("arn") opts.Prefix = q.Get("prefix") - return + return opts } const ( diff --git a/cmd/bucket-replication-utils_gen.go b/cmd/bucket-replication-utils_gen.go index 4a1078fd6ef51..933869213635d 100644 --- a/cmd/bucket-replication-utils_gen.go +++ b/cmd/bucket-replication-utils_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/minio/minio/internal/bucket/replication" "github.com/tinylib/msgp/msgp" @@ -41,19 +41,17 @@ func (z *BucketReplicationResyncStatus) DecodeMsg(dc *msgp.Reader) (err error) { if z.TargetsMap == nil { z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002) } else if len(z.TargetsMap) > 0 { - for key := range z.TargetsMap { - delete(z.TargetsMap, key) - } + clear(z.TargetsMap) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 TargetReplicationResyncStatus za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "TargetsMap") return } + var za0002 TargetReplicationResyncStatus err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "TargetsMap", za0001) @@ -203,14 +201,12 @@ func (z *BucketReplicationResyncStatus) UnmarshalMsg(bts []byte) (o []byte, err if z.TargetsMap == nil { z.TargetsMap = make(map[string]TargetReplicationResyncStatus, zb0002) } else if len(z.TargetsMap) > 0 { - for key := range z.TargetsMap { - delete(z.TargetsMap, key) - } + clear(z.TargetsMap) } for zb0002 > 0 { - var za0001 string var za0002 TargetReplicationResyncStatus zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "TargetsMap") @@ -288,19 +284,17 @@ func (z *MRFReplicateEntries) DecodeMsg(dc *msgp.Reader) (err error) { if z.Entries == nil { z.Entries = make(map[string]MRFReplicateEntry, zb0002) } else if len(z.Entries) > 0 { - for key := range z.Entries { - delete(z.Entries, key) - } + clear(z.Entries) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 MRFReplicateEntry za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Entries") return } + var za0002 MRFReplicateEntry var zb0003 uint32 zb0003, err = dc.ReadMapHeader() if err != nil { @@ -478,14 +472,12 @@ func (z *MRFReplicateEntries) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Entries == nil { z.Entries = make(map[string]MRFReplicateEntry, zb0002) } else if len(z.Entries) > 0 { - for key := range z.Entries { - delete(z.Entries, key) - } + clear(z.Entries) } for zb0002 > 0 { - var za0001 string var za0002 MRFReplicateEntry zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Entries") @@ -749,6 +741,7 @@ func (z *ReplicateDecision) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z ReplicateDecision) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -760,6 +753,7 @@ func (z ReplicateDecision) EncodeMsg(en *msgp.Writer) (err error) { func (z ReplicateDecision) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } @@ -870,19 +864,17 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) { if z.Targets == nil { z.Targets = make(map[string]replication.StatusType, zb0002) } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } + clear(z.Targets) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 replication.StatusType za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Targets") return } + var za0002 replication.StatusType err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Targets", za0001) @@ -900,53 +892,45 @@ func (z *ReplicationState) DecodeMsg(dc *msgp.Reader) (err error) { if z.PurgeTargets == nil { z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003) } else if len(z.PurgeTargets) > 0 { - for key := range z.PurgeTargets { - delete(z.PurgeTargets, key) - } + clear(z.PurgeTargets) } for zb0003 > 0 { zb0003-- var za0003 string - var za0004 VersionPurgeStatusType za0003, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "PurgeTargets") return } - { - var zb0004 string - zb0004, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "PurgeTargets", za0003) - return - } - za0004 = VersionPurgeStatusType(zb0004) + var za0004 VersionPurgeStatusType + err = za0004.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "PurgeTargets", za0003) + return } z.PurgeTargets[za0003] = za0004 } case "ResetStatusesMap": - var zb0005 uint32 - zb0005, err = dc.ReadMapHeader() + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "ResetStatusesMap") return } if z.ResetStatusesMap == nil { - z.ResetStatusesMap = make(map[string]string, zb0005) + z.ResetStatusesMap = make(map[string]string, zb0004) } else if len(z.ResetStatusesMap) > 0 { - for key := range z.ResetStatusesMap { - delete(z.ResetStatusesMap, key) - } + clear(z.ResetStatusesMap) } - for zb0005 > 0 { - zb0005-- + for zb0004 > 0 { + zb0004-- var za0005 string - var za0006 string za0005, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "ResetStatusesMap") return } + var za0006 string za0006, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "ResetStatusesMap", za0005) @@ -1076,7 +1060,7 @@ func (z *ReplicationState) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "PurgeTargets") return } - err = en.WriteString(string(za0004)) + err = za0004.EncodeMsg(en) if err != nil { err = msgp.WrapError(err, "PurgeTargets", za0003) return @@ -1152,7 +1136,11 @@ func (z *ReplicationState) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendMapHeader(o, uint32(len(z.PurgeTargets))) for za0003, za0004 := range z.PurgeTargets { o = msgp.AppendString(o, za0003) - o = msgp.AppendString(o, string(za0004)) + o, err = za0004.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "PurgeTargets", za0003) + return + } } // string "ResetStatusesMap" o = append(o, 0xb0, 0x52, 0x65, 0x73, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x4d, 0x61, 0x70) @@ -1234,14 +1222,12 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Targets == nil { z.Targets = make(map[string]replication.StatusType, zb0002) } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } + clear(z.Targets) } for zb0002 > 0 { - var za0001 string var za0002 replication.StatusType zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Targets") @@ -1264,48 +1250,40 @@ func (z *ReplicationState) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.PurgeTargets == nil { z.PurgeTargets = make(map[string]VersionPurgeStatusType, zb0003) } else if len(z.PurgeTargets) > 0 { - for key := range z.PurgeTargets { - delete(z.PurgeTargets, key) - } + clear(z.PurgeTargets) } for zb0003 > 0 { - var za0003 string var za0004 VersionPurgeStatusType zb0003-- + var za0003 string za0003, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "PurgeTargets") return } - { - var zb0004 string - zb0004, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PurgeTargets", za0003) - return - } - za0004 = VersionPurgeStatusType(zb0004) + bts, err = za0004.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "PurgeTargets", za0003) + return } z.PurgeTargets[za0003] = za0004 } case "ResetStatusesMap": - var zb0005 uint32 - zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "ResetStatusesMap") return } if z.ResetStatusesMap == nil { - z.ResetStatusesMap = make(map[string]string, zb0005) + z.ResetStatusesMap = make(map[string]string, zb0004) } else if len(z.ResetStatusesMap) > 0 { - for key := range z.ResetStatusesMap { - delete(z.ResetStatusesMap, key) - } + clear(z.ResetStatusesMap) } - for zb0005 > 0 { - var za0005 string + for zb0004 > 0 { var za0006 string - zb0005-- + zb0004-- + var za0005 string za0005, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "ResetStatusesMap") @@ -1343,7 +1321,7 @@ func (z *ReplicationState) Msgsize() (s int) { if z.PurgeTargets != nil { for za0003, za0004 := range z.PurgeTargets { _ = za0004 - s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(string(za0004)) + s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize() } } s += 17 + msgp.MapHeaderSize @@ -1388,6 +1366,7 @@ func (z *ResyncDecision) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z ResyncDecision) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -1399,6 +1378,7 @@ func (z ResyncDecision) EncodeMsg(en *msgp.Writer) (err error) { func (z ResyncDecision) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } @@ -2503,55 +2483,3 @@ func (z *TargetReplicationResyncStatus) Msgsize() (s int) { s = 1 + 3 + msgp.TimeSize + 4 + msgp.TimeSize + 3 + msgp.StringPrefixSize + len(z.ResyncID) + 4 + msgp.TimeSize + 4 + msgp.IntSize + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.StringPrefixSize + len(z.Bucket) + 4 + msgp.StringPrefixSize + len(z.Object) return } - -// DecodeMsg implements msgp.Decodable -func (z *VersionPurgeStatusType) DecodeMsg(dc *msgp.Reader) (err error) { - { - var zb0001 string - zb0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err) - return - } - (*z) = VersionPurgeStatusType(zb0001) - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z VersionPurgeStatusType) EncodeMsg(en *msgp.Writer) (err error) { - err = en.WriteString(string(z)) - if err != nil { - err = msgp.WrapError(err) - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z VersionPurgeStatusType) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - o = msgp.AppendString(o, string(z)) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *VersionPurgeStatusType) UnmarshalMsg(bts []byte) (o []byte, err error) { - { - var zb0001 string - zb0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - (*z) = VersionPurgeStatusType(zb0001) - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z VersionPurgeStatusType) Msgsize() (s int) { - s = msgp.StringPrefixSize + len(string(z)) - return -} diff --git a/cmd/bucket-replication-utils_gen_test.go b/cmd/bucket-replication-utils_gen_test.go index 9a8fd1ea7042e..ec7dd41c55487 100644 --- a/cmd/bucket-replication-utils_gen_test.go +++ b/cmd/bucket-replication-utils_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/bucket-replication-utils_test.go b/cmd/bucket-replication-utils_test.go index 43a1c356742a5..19c1b4b14ccda 100644 --- a/cmd/bucket-replication-utils_test.go +++ b/cmd/bucket-replication-utils_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "testing" "github.com/minio/minio/internal/bucket/replication" @@ -184,7 +183,7 @@ var parseReplicationDecisionTest = []struct { func TestParseReplicateDecision(t *testing.T) { for i, test := range parseReplicationDecisionTest { - dsc, err := parseReplicateDecision(context.Background(), "bucket", test.expDsc.String()) + dsc, err := parseReplicateDecision(t.Context(), "bucket", test.expDsc.String()) if err != nil { if test.expErr != err { t.Errorf("Test%d (%s): Expected parse error got %t , want %t", i+1, test.name, err, test.expErr) diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 5ab136497618f..5f6268cb66cd5 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -19,10 +19,12 @@ package cmd import ( "context" + "encoding/base64" "encoding/binary" "errors" "fmt" "io" + "maps" "math/rand" "net/http" "net/url" @@ -48,7 +50,9 @@ import ( "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" + "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" + "github.com/minio/minio/internal/once" "github.com/tinylib/msgp/msgp" "github.com/zeebo/xxh3" ) @@ -72,30 +76,26 @@ const ( ObjectLockRetentionTimestamp = "objectlock-retention-timestamp" // ObjectLockLegalHoldTimestamp - the last time a legal hold metadata modification happened on this cluster for this object version ObjectLockLegalHoldTimestamp = "objectlock-legalhold-timestamp" - // ReplicationWorkerMultiplier is suggested worker multiplier if traffic exceeds replication worker capacity - ReplicationWorkerMultiplier = 1.5 -) -func isReplicationEnabled(ctx context.Context, bucketName string) bool { - rc, _ := getReplicationConfig(ctx, bucketName) - return rc != nil -} + // ReplicationSsecChecksumHeader - the encrypted checksum of the SSE-C encrypted object. + ReplicationSsecChecksumHeader = "X-Minio-Replication-Ssec-Crc" +) // gets replication config associated to a given bucket name. func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) { rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName) - if err != nil { - if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) { - return rCfg, err - } - logger.CriticalIf(ctx, err) + if err != nil && !errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) { + return rCfg, err } - return rCfg, err + return rCfg, nil } // validateReplicationDestination returns error if replication destination bucket missing or not configured // It also returns true if replication destination is same as this server. -func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, checkRemote bool) (bool, APIError) { +func validateReplicationDestination(ctx context.Context, bucket string, rCfg *replication.Config, opts *validateReplicationDestinationOptions) (bool, APIError) { + if opts == nil { + opts = &validateReplicationDestinationOptions{} + } var arns []string if rCfg.RoleArn != "" { arns = append(arns, rCfg.RoleArn) @@ -117,7 +117,7 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re if clnt == nil { return sameTarget, toAPIError(ctx, BucketRemoteTargetNotFound{Bucket: bucket}) } - if checkRemote { // validate remote bucket + if opts.CheckRemoteBucket { // validate remote bucket found, err := clnt.BucketExists(ctx, arn.Bucket) if err != nil { return sameTarget, errorCodes.ToAPIErrWithErr(ErrRemoteDestinationNotFoundError, err) @@ -137,24 +137,30 @@ func validateReplicationDestination(ctx context.Context, bucket string, rCfg *re } } } - // validate replication ARN against target endpoint - c := globalBucketTargetSys.GetRemoteTargetClient(bucket, arnStr) - if c != nil { - if err := checkRemoteEndpoint(ctx, c.EndpointURL()); err != nil { - switch err.(type) { - case BucketRemoteIdenticalToSource: - return true, errorCodes.ToAPIErrWithErr(ErrBucketRemoteIdenticalToSource, fmt.Errorf("remote target endpoint %s is self referential", c.EndpointURL().String())) - default: + // if checked bucket, then check the ready is unnecessary + if !opts.CheckRemoteBucket && opts.CheckReady { + endpoint := clnt.EndpointURL().String() + if errInt, ok := opts.checkReadyErr.Load(endpoint); !ok { + err = checkRemoteEndpoint(ctx, clnt.EndpointURL()) + opts.checkReadyErr.Store(endpoint, err) + } else { + if errInt == nil { + err = nil + } else { + err, _ = errInt.(error) } } - if c.EndpointURL().String() == clnt.EndpointURL().String() { - selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort) - if !sameTarget { - sameTarget = selfTarget - } - continue + switch err.(type) { + case BucketRemoteIdenticalToSource: + return true, errorCodes.ToAPIErrWithErr(ErrBucketRemoteIdenticalToSource, fmt.Errorf("remote target endpoint %s is self referential", clnt.EndpointURL().String())) + default: } } + // validate replication ARN against target endpoint + selfTarget, _ := isLocalHost(clnt.EndpointURL().Hostname(), clnt.EndpointURL().Port(), globalMinioPort) + if !sameTarget { + sameTarget = selfTarget + } } if len(arns) == 0 { @@ -247,27 +253,33 @@ func getMustReplicateOptions(userDefined map[string]string, userTags string, sta func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) { // object layer not initialized we return with no decision. if newObjectLayerFn() == nil { - return + return dsc } // Disable server-side replication on object prefixes which are excluded // from versioning via the MinIO bucket versioning extension. if !globalBucketVersioningSys.PrefixEnabled(bucket, object) { - return + return dsc } replStatus := mopts.ReplicationStatus() if replStatus == replication.Replica && !mopts.isMetadataReplication() { - return + return dsc } if mopts.replicationRequest { // incoming replication request on target cluster - return + return dsc } + cfg, err := getReplicationConfig(ctx, bucket) if err != nil { - return + replLogOnceIf(ctx, err, bucket) + return dsc + } + if cfg == nil { + return dsc } + opts := replication.ObjectOpts{ Name: object, SSEC: crypto.SSEC.IsEncrypted(mopts.meta), @@ -315,6 +327,7 @@ var standardHeaders = []string{ func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool { c, err := getReplicationConfig(ctx, bucket) if err != nil || c == nil { + replLogOnceIf(ctx, err, bucket) return false } for _, obj := range objects { @@ -334,16 +347,17 @@ func isStandardHeader(matchHeaderKey string) bool { func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) { rcfg, err := getReplicationConfig(ctx, bucket) if err != nil || rcfg == nil { - return + replLogOnceIf(ctx, err, bucket) + return dsc } // If incoming request is a replication request, it does not need to be re-replicated. if delOpts.ReplicationRequest { - return + return dsc } // Skip replication if this object's prefix is excluded from being // versioned. if !delOpts.Versioned { - return + return dsc } opts := replication.ObjectOpts{ Name: dobj.ObjectName, @@ -377,7 +391,7 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet // can be the case that other cluster is down and duplicate `mc rm --vid` // is issued - this still needs to be replicated back to the other target if !oi.VersionPurgeStatus.Empty() { - replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed + replicate = oi.VersionPurgeStatus == replication.VersionPurgePending || oi.VersionPurgeStatus == replication.VersionPurgeFailed dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync)) } continue @@ -426,7 +440,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj rcfg, err := getReplicationConfig(ctx, bucket) if err != nil || rcfg == nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket) + replLogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket) sendEvent(eventArgs{ BucketName: bucket, Object: ObjectInfo{ @@ -443,7 +457,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj } dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr) if err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s", + replLogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s", bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr) sendEvent(eventArgs{ BucketName: bucket, @@ -465,7 +479,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj lk := objectAPI.NewNSLock(bucket, "/[replicate]/"+dobj.ObjectName) lkctx, err := lk.GetLock(ctx, globalOperationTimeout) if err != nil { - globalReplicationPool.queueMRFSave(dobj.ToMRFEntry()) + globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry()) sendEvent(eventArgs{ BucketName: bucket, Object: ObjectInfo{ @@ -497,7 +511,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn) if tgtClnt == nil { // Skip stale targets if any and log them to be missing at least once. - logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn) + replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -535,7 +549,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj // to decrement pending count later. for _, rinfo := range rinfos.Targets { if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus { - globalReplicationStats.Update(dobj.Bucket, rinfo, replicationStatus, + globalReplicationStats.Load().Update(dobj.Bucket, rinfo, replicationStatus, prevStatus) } } @@ -543,7 +557,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj eventName := event.ObjectReplicationComplete if replicationStatus == replication.Failed { eventName = event.ObjectReplicationFailed - globalReplicationPool.queueMRFSave(dobj.ToMRFEntry()) + globalReplicationPool.Get().queueMRFSave(dobj.ToMRFEntry()) } drs := getReplicationState(rinfos, dobj.ReplicationState, dobj.VersionID) if replicationStatus != prevStatus { @@ -603,13 +617,13 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI if dobj.VersionID == "" && rinfo.PrevReplicationStatus == replication.Completed && dobj.OpType != replication.ExistingObjectReplicationType { rinfo.ReplicationStatus = rinfo.PrevReplicationStatus - return + return rinfo } - if dobj.VersionID != "" && rinfo.VersionPurgeStatus == Complete { - return + if dobj.VersionID != "" && rinfo.VersionPurgeStatus == replication.VersionPurgeComplete { + return rinfo } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN) sendEvent(eventArgs{ BucketName: dobj.Bucket, Object: ObjectInfo{ @@ -625,9 +639,9 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI if dobj.VersionID == "" { rinfo.ReplicationStatus = replication.Failed } else { - rinfo.VersionPurgeStatus = Failed + rinfo.VersionPurgeStatus = replication.VersionPurgeFailed } - return + return rinfo } // early return if already replicated delete marker for existing object replication/ healing delete markers if dobj.DeleteMarkerVersionID != "" { @@ -644,13 +658,13 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI // delete marker already replicated if dobj.VersionID == "" && rinfo.VersionPurgeStatus.Empty() { rinfo.ReplicationStatus = replication.Completed - return + return rinfo } case isErrObjectNotFound(serr), isErrVersionNotFound(serr): // version being purged is already not found on target. if !rinfo.VersionPurgeStatus.Empty() { - rinfo.VersionPurgeStatus = Complete - return + rinfo.VersionPurgeStatus = replication.VersionPurgeComplete + return rinfo } case isErrReadQuorum(serr), isErrWriteQuorum(serr): // destination has some quorum issues, perform removeObject() anyways @@ -664,7 +678,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI if err != nil && !toi.ReplicationReady { rinfo.ReplicationStatus = replication.Failed rinfo.Err = err - return + return rinfo } } } @@ -682,9 +696,9 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI if dobj.VersionID == "" { rinfo.ReplicationStatus = replication.Failed } else { - rinfo.VersionPurgeStatus = Failed + rinfo.VersionPurgeStatus = replication.VersionPurgeFailed } - logger.LogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr)) + replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr)) if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { globalBucketTargetSys.markOffline(tgt.EndpointURL()) } @@ -692,10 +706,10 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI if dobj.VersionID == "" { rinfo.ReplicationStatus = replication.Completed } else { - rinfo.VersionPurgeStatus = Complete + rinfo.VersionPurgeStatus = replication.VersionPurgeComplete } } - return + return rinfo } func getCopyObjMetadata(oi ObjectInfo, sc string) map[string]string { @@ -761,16 +775,43 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) { return "", false } -func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) { +func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, isMP bool, err error) { meta := make(map[string]string) + isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined) + for k, v := range objInfo.UserDefined { - if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) { - continue + _, isValidSSEHeader := validSSEReplicationHeaders[k] + // In case of SSE-C objects copy the allowed internal headers as well + if !isSSEC || !isValidSSEHeader { + if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) { + continue + } + if isStandardHeader(k) { + continue + } } - if isStandardHeader(k) { - continue + if isValidSSEHeader { + meta[validSSEReplicationHeaders[k]] = v + } else { + meta[k] = v + } + } + isMP = objInfo.isMultipart() + if len(objInfo.Checksum) > 0 { + // Add encrypted CRC to metadata for SSE-C objects. + if isSSEC { + meta[ReplicationSsecChecksumHeader] = base64.StdEncoding.EncodeToString(objInfo.Checksum) + } else { + cs, mp := getCRCMeta(objInfo, 0, nil) + // Set object checksum. + maps.Copy(meta, cs) + isMP = mp + if !objInfo.isMultipart() && cs[xhttp.AmzChecksumType] == xhttp.AmzChecksumTypeFullObject { + // For objects where checksum is full object, it will be the same. + // Therefore, we use the cheaper PutObject replication. + isMP = false + } } - meta[k] = v } if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) { @@ -799,7 +840,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put if tagTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok { tagTimestamp, err = time.Parse(time.RFC3339Nano, tagTmstampStr) if err != nil { - return putOpts, err + return putOpts, false, err } } putOpts.Internal.TaggingTimestamp = tagTimestamp @@ -823,7 +864,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put if retainDateStr, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok { rdate, err := amztime.ISO8601Parse(retainDateStr) if err != nil { - return putOpts, err + return putOpts, false, err } putOpts.RetainUntilDate = rdate // set retention timestamp in opts @@ -831,7 +872,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put if retainTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok { retTimestamp, err = time.Parse(time.RFC3339Nano, retainTmstampStr) if err != nil { - return putOpts, err + return putOpts, false, err } } putOpts.Internal.RetentionTimestamp = retTimestamp @@ -843,7 +884,7 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put if lholdTmstampStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok { lholdTimestamp, err = time.Parse(time.RFC3339Nano, lholdTmstampStr) if err != nil { - return putOpts, err + return putOpts, false, err } } putOpts.Internal.LegalholdTimestamp = lholdTimestamp @@ -851,7 +892,25 @@ func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (put if crypto.S3.IsEncrypted(objInfo.UserDefined) { putOpts.ServerSideEncryption = encrypt.NewSSE() } - return + + if crypto.S3KMS.IsEncrypted(objInfo.UserDefined) { + // If KMS key ID replication is enabled (as by default) + // we include the object's KMS key ID. In any case, we + // always set the SSE-KMS header. If no KMS key ID is + // specified, MinIO is supposed to use whatever default + // config applies on the site or bucket. + var keyID string + if kms.ReplicateKeyID() { + keyID = objInfo.KMSKeyID() + } + + sseEnc, err := encrypt.NewSSEKMS(keyID, nil) + if err != nil { + return putOpts, false, err + } + putOpts.ServerSideEncryption = sseEnc + } + return putOpts, isMP, err } type replicationAction string @@ -908,7 +967,9 @@ func getReplicationAction(oi1 ObjectInfo, oi2 minio.ObjectInfo, opType replicati } t, _ := tags.ParseObjectTags(oi1.UserTags) - if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2.UserTags, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) { + oi2Map := make(map[string]string) + maps.Copy(oi2Map, oi2.UserTags) + if (oi2.UserTagCount > 0 && !reflect.DeepEqual(oi2Map, t.ToMap())) || (oi2.UserTagCount != len(t.ToMap())) { return replicateMetadata } @@ -989,8 +1050,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje object := ri.Name cfg, err := getReplicationConfig(ctx, bucket) - if err != nil { - logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket) + if err != nil || cfg == nil { + replLogOnceIf(ctx, err, "get-replication-config-"+bucket) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1017,7 +1078,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - globalReplicationPool.queueMRFSave(ri.ToMRFEntry()) + globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry()) return } ctx = lkctx.Context() @@ -1029,7 +1090,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje for _, tgtArn := range tgtArns { tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn) if tgt == nil { - logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn) + replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1102,7 +1163,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje for _, rinfo := range rinfos.Targets { if rinfo.ReplicationStatus != rinfo.PrevReplicationStatus { rinfo.OpType = opType // update optype to reflect correct operation. - globalReplicationStats.Update(bucket, rinfo, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus) + globalReplicationStats.Load().Update(bucket, rinfo, rinfo.ReplicationStatus, rinfo.PrevReplicationStatus) } } } @@ -1122,7 +1183,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje ri.EventType = ReplicateMRF ri.ReplicationStatusInternal = rinfos.ReplicationStatusInternal() ri.RetryCount++ - globalReplicationPool.queueMRFSave(ri.ToMRFEntry()) + globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry()) } } @@ -1147,11 +1208,11 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj if ri.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) { rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationResynced = true - return + return rinfo } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1159,16 +1220,17 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo } versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object) versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object) gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{ - VersionID: ri.VersionID, - Versioned: versioned, - VersionSuspended: versionSuspended, + VersionID: ri.VersionID, + Versioned: versioned, + VersionSuspended: versionSuspended, + ReplicationRequest: true, }) if err != nil { if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { @@ -1180,9 +1242,9 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - logger.LogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) + replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID) } - return + return rinfo } defer gr.Close() @@ -1191,21 +1253,27 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj // make sure we have the latest metadata for metrics calculation rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN) - size, err := objInfo.GetActualSize() - if err != nil { - logger.LogIf(ctx, err) - sendEvent(eventArgs{ - EventName: event.ObjectReplicationNotTracked, - BucketName: bucket, - Object: objInfo, - UserAgent: "Internal: [Replication]", - Host: globalLocalNodeName, - }) - return + // Set the encrypted size for SSE-C objects + var size int64 + if crypto.SSEC.IsEncrypted(objInfo.UserDefined) { + size = objInfo.Size + } else { + size, err = objInfo.GetActualSize() + if err != nil { + replLogIf(ctx, err) + sendEvent(eventArgs{ + EventName: event.ObjectReplicationNotTracked, + BucketName: bucket, + Object: objInfo, + UserAgent: "Internal: [Replication]", + Host: globalLocalNodeName, + }) + return rinfo + } } if tgt.Bucket == "" { - logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1229,9 +1297,9 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj // use core client to avoid doing multipart on PUT c := &minio.Core{Client: tgt.Client} - putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo) + putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err)) + replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1239,7 +1307,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo } var headerSize int @@ -1255,32 +1323,28 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj HeaderSize: headerSize, } newCtx := ctx - if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) { + if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) && objInfo.Size < minLargeObjSize { var cancel context.CancelFunc newCtx, cancel = context.WithTimeout(ctx, throttleDeadline) defer cancel() } r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts) - if objInfo.isMultipart() { - if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, - r, objInfo, putOpts); rinfo.Err != nil { - if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { - rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) - } - } + if isMP { + rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts) } else { - if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { - if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { - rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL())) - } - } + _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts) } - if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - globalBucketTargetSys.markOffline(tgt.EndpointURL()) + if rinfo.Err != nil { + if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { + rinfo.ReplicationStatus = replication.Failed + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): to (target: %s): %w", + bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + } + if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { + globalBucketTargetSys.markOffline(tgt.EndpointURL()) + } } - return + return rinfo } // replicateAll replicates metadata for specified version of the object to destination bucket @@ -1308,7 +1372,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } if globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN) + replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1316,17 +1380,19 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo } versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object) versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object) - gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{ - VersionID: ri.VersionID, - Versioned: versioned, - VersionSuspended: versionSuspended, - }) + gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, + ObjectOptions{ + VersionID: ri.VersionID, + Versioned: versioned, + VersionSuspended: versionSuspended, + ReplicationRequest: true, + }) if err != nil { if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { objInfo := ri.ToObjectInfo() @@ -1337,13 +1403,14 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - logger.LogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) + replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err)) } - return + return rinfo } defer gr.Close() objInfo := gr.ObjInfo + // make sure we have the latest metadata for metrics calculation rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN) @@ -1351,12 +1418,12 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object if objInfo.TargetReplicationStatus(tgt.ARN) == replication.Completed && !ri.ExistingObjResync.Empty() && !ri.ExistingObjResync.mustResyncTarget(tgt.ARN) { rinfo.ReplicationStatus = replication.Completed rinfo.ReplicationResynced = true - return + return rinfo } size, err := objInfo.GetActualSize() if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1364,11 +1431,17 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo + } + + // Set the encrypted size for SSE-C objects + isSSEC := crypto.SSEC.IsEncrypted(objInfo.UserDefined) + if isSSEC { + size = objInfo.Size } if tgt.Bucket == "" { - logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1385,20 +1458,21 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } rinfo.Duration = time.Since(startTime) }() - - oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, minio.StatObjectOptions{ + sOpts := minio.StatObjectOptions{ VersionID: objInfo.VersionID, Internal: minio.AdvancedGetOptions{ ReplicationProxyRequest: "false", }, - }) + } + sOpts.Set(xhttp.AmzTagDirective, "ACCESS") + oi, cerr := tgt.StatObject(ctx, tgt.Bucket, object, sOpts) if cerr == nil { rAction = getReplicationAction(objInfo, oi, ri.OpType) rinfo.ReplicationStatus = replication.Completed if rAction == replicateNone { if ri.OpType == replication.ExistingObjectReplicationType && objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID { - logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL())) + replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL())) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1420,9 +1494,16 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object rinfo.ReplicationAction = rAction rinfo.ReplicationStatus = replication.Completed } - return + return rinfo } } else { + // SSEC objects will refuse HeadObject without the decryption key. + // Ignore the error, since we know the object exists and versioning prevents overwriting existing versions. + if isSSEC && strings.Contains(cerr.Error(), errorCodes[ErrSSEEncryptedObject].Description) { + rinfo.ReplicationStatus = replication.Completed + rinfo.ReplicationAction = replicateNone + goto applyAction + } // if target returns error other than NoSuchKey, defer replication attempt if minio.IsNetworkOrHostDown(cerr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { globalBucketTargetSys.markOffline(tgt.EndpointURL()) @@ -1438,7 +1519,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object rAction = replicateAll default: rinfo.Err = cerr - logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD", + replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD", bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, @@ -1447,9 +1528,10 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo } } +applyAction: rinfo.ReplicationStatus = replication.Completed rinfo.Size = size rinfo.ReplicationAction = rAction @@ -1468,19 +1550,30 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object ReplicationRequest: true, // always set this to distinguish between `mc mirror` replication and serverside }, } - if tagTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp]; ok { + // default timestamps to ModTime unless present in metadata + lkMap := caseInsensitiveMap(objInfo.UserDefined) + if _, ok := lkMap.Lookup(xhttp.AmzObjectLockLegalHold); ok { + dstOpts.Internal.LegalholdTimestamp = objInfo.ModTime + } + if _, ok := lkMap.Lookup(xhttp.AmzObjectLockRetainUntilDate); ok { + dstOpts.Internal.RetentionTimestamp = objInfo.ModTime + } + if objInfo.UserTags != "" { + dstOpts.Internal.TaggingTimestamp = objInfo.ModTime + } + if tagTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + TaggingTimestamp); ok { ondiskTimestamp, err := time.Parse(time.RFC3339, tagTmStr) if err == nil { dstOpts.Internal.TaggingTimestamp = ondiskTimestamp } } - if retTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockRetentionTimestamp]; ok { + if retTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockRetentionTimestamp); ok { ondiskTimestamp, err := time.Parse(time.RFC3339, retTmStr) if err == nil { dstOpts.Internal.RetentionTimestamp = ondiskTimestamp } } - if lholdTmStr, ok := objInfo.UserDefined[ReservedMetadataPrefixLower+ObjectLockLegalHoldTimestamp]; ok { + if lholdTmStr, ok := lkMap.Lookup(ReservedMetadataPrefixLower + ObjectLockLegalHoldTimestamp); ok { ondiskTimestamp, err := time.Parse(time.RFC3339, lholdTmStr) if err == nil { dstOpts.Internal.LegalholdTimestamp = ondiskTimestamp @@ -1488,13 +1581,12 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object } if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil { rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) } } else { - var putOpts minio.PutObjectOptions - putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo) + putOpts, isMP, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err)) + replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err)) sendEvent(eventArgs{ EventName: event.ObjectReplicationNotTracked, BucketName: bucket, @@ -1502,7 +1594,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object UserAgent: "Internal: [Replication]", Host: globalLocalNodeName, }) - return + return rinfo } var headerSize int for k, v := range putOpts.Header() { @@ -1517,37 +1609,29 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object HeaderSize: headerSize, } newCtx := ctx - if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) { + if globalBucketMonitor.IsThrottled(bucket, tgt.ARN) && objInfo.Size < minLargeObjSize { var cancel context.CancelFunc newCtx, cancel = context.WithTimeout(ctx, throttleDeadline) defer cancel() } r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts) - if objInfo.isMultipart() { - if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, - r, objInfo, putOpts); rinfo.Err != nil { - if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { - rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) - } else { - rinfo.ReplicationStatus = replication.Completed - } - } + if isMP { + rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts) } else { - if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil { - if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { - rinfo.ReplicationStatus = replication.Failed - logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) - } else { - rinfo.ReplicationStatus = replication.Completed - } - } + _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts) } - if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { - globalBucketTargetSys.markOffline(tgt.EndpointURL()) + if rinfo.Err != nil { + if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" { + rinfo.ReplicationStatus = replication.Failed + replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", + bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err)) + } + if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) { + globalBucketTargetSys.markOffline(tgt.EndpointURL()) + } } } - return + return rinfo } func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, object string, r io.Reader, objInfo ObjectInfo, opts minio.PutObjectOptions) (err error) { @@ -1563,6 +1647,9 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob if err == nil { break } + if minio.ToErrorResponse(err).Code == "PreconditionFailed" { + return nil + } attempts++ time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) } @@ -1577,14 +1664,10 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob for attempts <= 3 { actx, acancel := context.WithTimeout(ctx, time.Minute) aerr := c.AbortMultipartUpload(actx, bucket, object, uploadID) + acancel() if aerr == nil { - acancel() return } - acancel() - logger.LogIf(actx, - fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster", - humanize.Ordinal(attempts), uploadID, bucket, object, aerr)) attempts++ time.Sleep(time.Duration(rand.Int63n(int64(time.Second)))) } @@ -1592,37 +1675,80 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob }() var ( - hr *hash.Reader - pInfo minio.ObjectPart + hr *hash.Reader + isSSEC = crypto.SSEC.IsEncrypted(objInfo.UserDefined) ) + var objectSize int64 for _, partInfo := range objInfo.Parts { - hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize) + if isSSEC { + hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.Size), partInfo.Size, "", "", partInfo.ActualSize) + } else { + hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize) + } if err != nil { return err } + cHeader := http.Header{} + cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true") + if !isSSEC { + cs, _ := getCRCMeta(objInfo, partInfo.Number, nil) + for k, v := range cs { + cHeader.Add(k, v) + } + } popts := minio.PutObjectPartOptions{ - SSE: opts.ServerSideEncryption, + SSE: opts.ServerSideEncryption, + CustomHeader: cHeader, } - pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts) + var size int64 + if isSSEC { + size = partInfo.Size + } else { + size = partInfo.ActualSize + } + objectSize += size + pInfo, err := c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, size, popts) if err != nil { return err } - if pInfo.Size != partInfo.ActualSize { - return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize) + if pInfo.Size != size { + return fmt.Errorf("ssec(%t): Part size mismatch: got %d, want %d", isSSEC, pInfo.Size, size) } uploadedParts = append(uploadedParts, minio.CompletePart{ - PartNumber: pInfo.PartNumber, - ETag: pInfo.ETag, + PartNumber: pInfo.PartNumber, + ETag: pInfo.ETag, + ChecksumCRC32: pInfo.ChecksumCRC32, + ChecksumCRC32C: pInfo.ChecksumCRC32C, + ChecksumSHA1: pInfo.ChecksumSHA1, + ChecksumSHA256: pInfo.ChecksumSHA256, + ChecksumCRC64NVME: pInfo.ChecksumCRC64NVME, }) } + userMeta := map[string]string{ + xhttp.MinIOReplicationActualObjectSize: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"], + } + if isSSEC && objInfo.UserDefined[ReplicationSsecChecksumHeader] != "" { + userMeta[ReplicationSsecChecksumHeader] = objInfo.UserDefined[ReplicationSsecChecksumHeader] + } + + // really big value but its okay on heavily loaded systems. This is just tail end timeout. cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute) defer ccancel() + + if len(objInfo.Checksum) > 0 { + cs, _ := getCRCMeta(objInfo, 0, nil) + for k, v := range cs { + userMeta[k] = strings.Split(v, "-")[0] + } + } _, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{ + UserMetadata: userMeta, Internal: minio.AdvancedPutOptions{ SourceMTime: objInfo.ModTime, + SourceETag: objInfo.ETag, // always set this to distinguish between `mc mirror` replication and serverside ReplicationRequest: true, }, @@ -1641,9 +1767,7 @@ func filterReplicationStatusMetadata(metadata map[string]string) map[string]stri } if !copied { dst = make(map[string]string, len(metadata)) - for k, v := range metadata { - dst[k] = v - } + maps.Copy(dst, metadata) copied = true } delete(dst, key) @@ -1705,23 +1829,27 @@ const ( ) var ( - globalReplicationPool *ReplicationPool - globalReplicationStats *ReplicationStats + globalReplicationPool = once.NewSingleton[ReplicationPool]() + globalReplicationStats atomic.Pointer[ReplicationStats] ) // ReplicationPool describes replication pool type ReplicationPool struct { // atomic ops: activeWorkers int32 + activeLrgWorkers int32 activeMRFWorkers int32 - objLayer ObjectLayer - ctx context.Context - priority string - maxWorkers int - mu sync.RWMutex - mrfMU sync.Mutex - resyncer *replicationResyncer + objLayer ObjectLayer + ctx context.Context + priority string + maxWorkers int + maxLWorkers int + stats *ReplicationStats + + mu sync.RWMutex + mrfMU sync.Mutex + resyncer *replicationResyncer // workers: workers []chan ReplicationWorkerOperation @@ -1764,7 +1892,7 @@ const ( ) // NewReplicationPool creates a pool of replication workers of specified size -func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts) *ReplicationPool { +func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPoolOpts, stats *ReplicationStats) *ReplicationPool { var workers, failedWorkers int priority := "auto" maxWorkers := WorkerMaxLimit @@ -1792,9 +1920,13 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool if maxWorkers > 0 && failedWorkers > maxWorkers { failedWorkers = maxWorkers } + maxLWorkers := LargeWorkerCount + if opts.MaxLWorkers > 0 { + maxLWorkers = opts.MaxLWorkers + } pool := &ReplicationPool{ workers: make([]chan ReplicationWorkerOperation, 0, workers), - lrgworkers: make([]chan ReplicationWorkerOperation, 0, LargeWorkerCount), + lrgworkers: make([]chan ReplicationWorkerOperation, 0, maxLWorkers), mrfReplicaCh: make(chan ReplicationWorkerOperation, 100000), mrfWorkerKillCh: make(chan struct{}, failedWorkers), resyncer: newresyncer(), @@ -1802,11 +1934,13 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool mrfStopCh: make(chan struct{}, 1), ctx: ctx, objLayer: o, + stats: stats, priority: priority, maxWorkers: maxWorkers, + maxLWorkers: maxLWorkers, } - pool.AddLargeWorkers() + pool.ResizeLrgWorkers(maxLWorkers, 0) pool.ResizeWorkers(workers, 0) pool.ResizeFailedWorkers(failedWorkers) go pool.resyncer.PersistToDisk(ctx, o) @@ -1828,14 +1962,14 @@ func (p *ReplicationPool) AddMRFWorker() { } switch v := oi.(type) { case ReplicateObjectInfo: - globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) atomic.AddInt32(&p.activeMRFWorkers, 1) replicateObject(p.ctx, v, p.objLayer) atomic.AddInt32(&p.activeMRFWorkers, -1) - globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type") } case <-p.mrfWorkerKillCh: return @@ -1860,9 +1994,9 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT if opTracker != nil { atomic.AddInt32(opTracker, 1) } - globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) replicateObject(p.ctx, v, p.objLayer) - globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) if opTracker != nil { atomic.AddInt32(opTracker, -1) } @@ -1870,38 +2004,23 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT if opTracker != nil { atomic.AddInt32(opTracker, 1) } - globalReplicationStats.incQ(v.Bucket, 0, true, v.OpType) + p.stats.incQ(v.Bucket, 0, true, v.OpType) replicateDelete(p.ctx, v, p.objLayer) - globalReplicationStats.decQ(v.Bucket, 0, true, v.OpType) + p.stats.decQ(v.Bucket, 0, true, v.OpType) if opTracker != nil { atomic.AddInt32(opTracker, -1) } default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") } } } } -// AddLargeWorkers adds a static number of workers to handle large uploads -func (p *ReplicationPool) AddLargeWorkers() { - for i := 0; i < LargeWorkerCount; i++ { - p.lrgworkers = append(p.lrgworkers, make(chan ReplicationWorkerOperation, 100000)) - i := i - go p.AddLargeWorker(p.lrgworkers[i]) - } - go func() { - <-p.ctx.Done() - for i := 0; i < LargeWorkerCount; i++ { - xioutil.SafeClose(p.lrgworkers[i]) - } - }() -} - // AddLargeWorker adds a replication worker to the static pool for large uploads. -func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation) { +func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation, opTracker *int32) { for { select { case <-p.ctx.Done(): @@ -1912,18 +2031,54 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation } switch v := oi.(type) { case ReplicateObjectInfo: - globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + if opTracker != nil { + atomic.AddInt32(opTracker, 1) + } + p.stats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) replicateObject(p.ctx, v, p.objLayer) - globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + p.stats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType) + if opTracker != nil { + atomic.AddInt32(opTracker, -1) + } case DeletedObjectReplicationInfo: + if opTracker != nil { + atomic.AddInt32(opTracker, 1) + } replicateDelete(p.ctx, v, p.objLayer) + if opTracker != nil { + atomic.AddInt32(opTracker, -1) + } default: - logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") + bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type") } } } } +// ResizeLrgWorkers sets replication workers pool for large transfers(>=128MiB) to new size. +// checkOld can be set to an expected value. +// If the worker count changed +func (p *ReplicationPool) ResizeLrgWorkers(n, checkOld int) { + p.mu.Lock() + defer p.mu.Unlock() + + if (checkOld > 0 && len(p.lrgworkers) != checkOld) || n == len(p.lrgworkers) || n < 1 { + // Either already satisfied or worker count changed while we waited for the lock. + return + } + for len(p.lrgworkers) < n { + input := make(chan ReplicationWorkerOperation, 100000) + p.lrgworkers = append(p.lrgworkers, input) + + go p.AddLargeWorker(input, &p.activeLrgWorkers) + } + for len(p.lrgworkers) > n { + worker := p.lrgworkers[len(p.lrgworkers)-1] + p.lrgworkers = p.lrgworkers[:len(p.lrgworkers)-1] + xioutil.SafeClose(worker) + } +} + // ActiveWorkers returns the number of active workers handling replication traffic. func (p *ReplicationPool) ActiveWorkers() int { return int(atomic.LoadInt32(&p.activeWorkers)) @@ -1934,6 +2089,11 @@ func (p *ReplicationPool) ActiveMRFWorkers() int { return int(atomic.LoadInt32(&p.activeMRFWorkers)) } +// ActiveLrgWorkers returns the number of active workers handling traffic > 128MiB object size. +func (p *ReplicationPool) ActiveLrgWorkers() int { + return int(atomic.LoadInt32(&p.activeLrgWorkers)) +} + // ResizeWorkers sets replication workers pool to new size. // checkOld can be set to an expected value. // If the worker count changed @@ -1959,7 +2119,7 @@ func (p *ReplicationPool) ResizeWorkers(n, checkOld int) { } // ResizeWorkerPriority sets replication failed workers pool size -func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) { +func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers, maxLWorkers int) { var workers, mrfWorkers int p.mu.Lock() switch pri { @@ -1986,11 +2146,15 @@ func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) { if maxWorkers > 0 && mrfWorkers > maxWorkers { mrfWorkers = maxWorkers } + if maxLWorkers <= 0 { + maxLWorkers = LargeWorkerCount + } p.priority = pri p.maxWorkers = maxWorkers p.mu.Unlock() p.ResizeWorkers(workers, 0) p.ResizeFailedWorkers(mrfWorkers) + p.ResizeLrgWorkers(maxLWorkers, 0) } // ResizeFailedWorkers sets replication failed workers pool size @@ -2034,9 +2198,18 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) { h := xxh3.HashString(ri.Bucket + ri.Name) select { case <-p.ctx.Done(): - case p.lrgworkers[h%LargeWorkerCount] <- ri: + case p.lrgworkers[h%uint64(len(p.lrgworkers))] <- ri: default: - globalReplicationPool.queueMRFSave(ri.ToMRFEntry()) + p.queueMRFSave(ri.ToMRFEntry()) + p.mu.RLock() + maxLWorkers := p.maxLWorkers + existing := len(p.lrgworkers) + p.mu.RUnlock() + maxLWorkers = min(maxLWorkers, LargeWorkerCount) + if p.ActiveLrgWorkers() < maxLWorkers { + workers := min(existing+1, maxLWorkers) + p.ResizeLrgWorkers(workers, existing) + } } return } @@ -2058,16 +2231,16 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) { case healCh <- ri: case ch <- ri: default: - globalReplicationPool.queueMRFSave(ri.ToMRFEntry()) + globalReplicationPool.Get().queueMRFSave(ri.ToMRFEntry()) p.mu.RLock() prio := p.priority maxWorkers := p.maxWorkers p.mu.RUnlock() switch prio { case "fast": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic"), string(replicationSubsystem), logger.WarningKind) case "slow": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind) default: maxWorkers = min(maxWorkers, WorkerMaxLimit) if p.ActiveWorkers() < maxWorkers { @@ -2094,7 +2267,7 @@ func queueReplicateDeletesWrapper(doi DeletedObjectReplicationInfo, existingObje doi.ResetID = v.ResetID doi.TargetArn = k - globalReplicationPool.queueReplicaDeleteTask(doi) + globalReplicationPool.Get().queueReplicaDeleteTask(doi) } } } @@ -2115,16 +2288,16 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf case <-p.ctx.Done(): case ch <- doi: default: - globalReplicationPool.queueMRFSave(doi.ToMRFEntry()) + p.queueMRFSave(doi.ToMRFEntry()) p.mu.RLock() prio := p.priority maxWorkers := p.maxWorkers p.mu.RUnlock() switch prio { case "fast": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes"), string(replicationSubsystem), logger.WarningKind) case "slow": - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem)) + replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind) default: maxWorkers = min(maxWorkers, WorkerMaxLimit) if p.ActiveWorkers() < maxWorkers { @@ -2139,14 +2312,16 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf } type replicationPoolOpts struct { - Priority string - MaxWorkers int + Priority string + MaxWorkers int + MaxLWorkers int } func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) { - globalReplicationPool = NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationOpts()) - globalReplicationStats = NewReplicationStats(ctx, objectAPI) - go globalReplicationStats.trackEWMA() + stats := NewReplicationStats(ctx, objectAPI) + globalReplicationPool.Set(NewReplicationPool(ctx, objectAPI, globalAPIConfig.getReplicationOpts(), stats)) + globalReplicationStats.Store(stats) + go stats.trackEWMA() } type proxyResult struct { @@ -2156,12 +2331,12 @@ type proxyResult struct { // get Reader from replication target if active-active replication is in place and // this node returns a 404 -func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, _ http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) { +func proxyGetToReplicationTarget(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions, proxyTargets *madmin.BucketTargets) (gr *GetObjectReader, proxy proxyResult, err error) { tgt, oi, proxy := proxyHeadToRepTarget(ctx, bucket, object, rs, opts, proxyTargets) if !proxy.Proxy { return nil, proxy, nil } - fn, _, _, err := NewGetObjectReader(nil, oi, opts) + fn, _, _, err := NewGetObjectReader(nil, oi, opts, h) if err != nil { return nil, proxy, err } @@ -2216,6 +2391,8 @@ func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti } cfg, err := getReplicationConfig(ctx, bucket) if err != nil || cfg == nil { + replLogOnceIf(ctx, err, bucket) + return &madmin.BucketTargets{} } topts := replication.ObjectOpts{Name: object} @@ -2257,7 +2434,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa if rs != nil { h, err := rs.ToHeader() if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err)) + replLogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err)) continue } gopts.Set(xhttp.Range, h) @@ -2344,11 +2521,13 @@ func scheduleReplication(ctx context.Context, oi ObjectInfo, o ObjectLayer, dsc SSEC: crypto.SSEC.IsEncrypted(oi.UserDefined), UserTags: oi.UserTags, } - + if ri.SSEC { + ri.Checksum = oi.Checksum + } if dsc.Synchronous() { replicateObject(ctx, ri, o) } else { - globalReplicationPool.queueReplicaTask(ri) + globalReplicationPool.Get().queueReplicaTask(ri) } } @@ -2476,9 +2655,9 @@ func proxyGetTaggingToRepTarget(ctx context.Context, bucket, object string, opts } func scheduleReplicationDelete(ctx context.Context, dv DeletedObjectReplicationInfo, o ObjectLayer) { - globalReplicationPool.queueReplicaDeleteTask(dv) + globalReplicationPool.Get().queueReplicaDeleteTask(dv) for arn := range dv.ReplicationState.Targets { - globalReplicationStats.Update(dv.Bucket, replicatedTargetInfo{Arn: arn, Size: 0, Duration: 0, OpType: replication.DeleteReplicationType}, replication.Pending, replication.StatusType("")) + globalReplicationStats.Load().Update(dv.Bucket, replicatedTargetInfo{Arn: arn, Size: 0, Duration: 0, OpType: replication.DeleteReplicationType}, replication.Pending, replication.StatusType("")) } } @@ -2498,7 +2677,7 @@ func (c replicationConfig) Replicate(opts replication.ObjectOpts) bool { // Resync returns true if replication reset is requested func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc ReplicateDecision, tgtStatuses map[string]replication.StatusType) (r ResyncDecision) { if c.Empty() { - return + return r } // Now overlay existing object replication choices for target @@ -2514,7 +2693,7 @@ func (c replicationConfig) Resync(ctx context.Context, oi ObjectInfo, dsc Replic tgtArns := c.Config.FilterTargetArns(opts) // indicates no matching target with Existing object replication enabled. if len(tgtArns) == 0 { - return + return r } for _, t := range tgtArns { opts.TargetArn = t @@ -2540,7 +2719,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu targets: make(map[string]ResyncTargetDecision, len(dsc.targetsMap)), } if c.remotes == nil { - return + return r } for _, tgt := range c.remotes.Targets { d, ok := dsc.targetsMap[tgt.Arn] @@ -2552,7 +2731,7 @@ func (c replicationConfig) resync(oi ObjectInfo, dsc ReplicateDecision, tgtStatu } r.targets[d.Arn] = resyncTarget(oi, tgt.Arn, tgt.ResetID, tgt.ResetBeforeDate, tgtStatuses[tgt.Arn]) } - return + return r } func targetResetHeader(arn string) string { @@ -2571,28 +2750,28 @@ func resyncTarget(oi ObjectInfo, arn string, resetID string, resetBeforeDate tim if !ok { // existing object replication is enabled and object version is unreplicated so far. if resetID != "" && oi.ModTime.Before(resetBeforeDate) { // trigger replication if `mc replicate reset` requested rd.Replicate = true - return + return rd } // For existing object reset - this condition is needed rd.Replicate = tgtStatus == "" - return + return rd } if resetID == "" || resetBeforeDate.Equal(timeSentinel) { // no reset in progress - return + return rd } // if already replicated, return true if a new reset was requested. splits := strings.SplitN(rs, ";", 2) if len(splits) != 2 { - return + return rd } newReset := splits[1] != resetID if !newReset && tgtStatus == replication.Completed { // already replicated and no reset requested - return + return rd } rd.Replicate = newReset && oi.ModTime.Before(resetBeforeDate) - return + return rd } const resyncTimeInterval = time.Minute * 1 @@ -2625,7 +2804,7 @@ func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI Objec } if updt { if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil { - logger.LogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err)) + replLogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err)) } else { lastResyncStatusSave[bucket] = brs.LastUpdate } @@ -2710,15 +2889,15 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object s.workerCh <- struct{}{} }() // Allocate new results channel to receive ObjectInfo. - objInfoCh := make(chan ObjectInfo) + objInfoCh := make(chan itemOrErr[ObjectInfo]) cfg, err := getReplicationConfig(ctx, opts.bucket) if err != nil { - logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err)) + replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err)) return } tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket) if err != nil { - logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err)) + replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err)) return } rcfg := replicationConfig{ @@ -2731,12 +2910,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object TargetArn: opts.arn, }) if len(tgtArns) != 1 { - logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn)) + replLogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn)) return } tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn) if tgt == nil { - logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn)) + replLogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn)) return } // mark resync status as resync started @@ -2747,7 +2926,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object // Walk through all object versions - Walk() is always in ascending order needed to ensure // delete marker replicated to target after object version is first created. if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return } @@ -2770,7 +2949,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object }() var wg sync.WaitGroup - for i := 0; i < resyncParallelRoutines; i++ { + for i := range resyncParallelRoutines { wg.Add(1) workers[i] = make(chan ReplicateObjectInfo, 100) i := i @@ -2824,17 +3003,19 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object ReplicationProxyRequest: "false", }, }) + sz := roi.Size if err != nil { if roi.DeleteMarker && isErrMethodNotAllowed(ErrorRespToObjectError(err, opts.bucket, roi.Name)) { st.ReplicatedCount++ } else { st.FailedCount++ } + sz = 0 } else { st.ReplicatedCount++ st.ReplicatedSize += roi.Size } - traceFn(err) + traceFn(sz, err) select { case <-ctx.Done(): return @@ -2845,7 +3026,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object } }(ctx, i) } - for obj := range objInfoCh { + for res := range objInfoCh { + if res.Err != nil { + resyncStatus = ResyncFailed + replLogIf(ctx, res.Err) + return + } select { case <-s.resyncCancelCh: resyncStatus = ResyncCanceled @@ -2854,11 +3040,11 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object return default: } - if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name { + if heal && lastCheckpoint != "" && lastCheckpoint != res.Item.Name { continue } lastCheckpoint = "" - roi := getHealReplicateObjectInfo(obj, rcfg) + roi := getHealReplicateObjectInfo(res.Item, rcfg) if !roi.ExistingObjResync.mustResync() { continue } @@ -2872,7 +3058,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object workers[h%uint64(resyncParallelRoutines)] <- roi } } - for i := 0; i < resyncParallelRoutines; i++ { + for i := range resyncParallelRoutines { xioutil.SafeClose(workers[i]) } wg.Wait() @@ -2901,9 +3087,9 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt if len(tgtArns) == 0 { return fmt.Errorf("arn %s specified for resync not found in replication config", opts.arn) } - globalReplicationPool.resyncer.RLock() - data, ok := globalReplicationPool.resyncer.statusMap[opts.bucket] - globalReplicationPool.resyncer.RUnlock() + globalReplicationPool.Get().resyncer.RLock() + data, ok := globalReplicationPool.Get().resyncer.statusMap[opts.bucket] + globalReplicationPool.Get().resyncer.RUnlock() if !ok { data, err = loadBucketResyncMetadata(ctx, opts.bucket, objAPI) if err != nil { @@ -2929,9 +3115,9 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt return err } - globalReplicationPool.resyncer.Lock() - defer globalReplicationPool.resyncer.Unlock() - brs, ok := globalReplicationPool.resyncer.statusMap[opts.bucket] + globalReplicationPool.Get().resyncer.Lock() + defer globalReplicationPool.Get().resyncer.Unlock() + brs, ok := globalReplicationPool.Get().resyncer.statusMap[opts.bucket] if !ok { brs = BucketReplicationResyncStatus{ Version: resyncMetaVersion, @@ -2939,22 +3125,22 @@ func (s *replicationResyncer) start(ctx context.Context, objAPI ObjectLayer, opt } } brs.TargetsMap[opts.arn] = status - globalReplicationPool.resyncer.statusMap[opts.bucket] = brs - go globalReplicationPool.resyncer.resyncBucket(GlobalContext, objAPI, false, opts) + globalReplicationPool.Get().resyncer.statusMap[opts.bucket] = brs + go globalReplicationPool.Get().resyncer.resyncBucket(GlobalContext, objAPI, false, opts) return nil } -func (s *replicationResyncer) trace(resyncID string, path string) func(err error) { +func (s *replicationResyncer) trace(resyncID string, path string) func(sz int64, err error) { startTime := time.Now() - return func(err error) { + return func(sz int64, err error) { duration := time.Since(startTime) if globalTrace.NumSubscribers(madmin.TraceReplicationResync) > 0 { - globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err)) + globalTrace.Publish(replicationResyncTrace(resyncID, startTime, duration, path, err, sz)) } } } -func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo { +func replicationResyncTrace(resyncID string, startTime time.Time, duration time.Duration, path string, err error, sz int64) madmin.TraceInfo { var errStr string if err != nil { errStr = err.Error() @@ -2968,6 +3154,7 @@ func replicationResyncTrace(resyncID string, startTime time.Time, duration time. Duration: duration, Path: path, Error: errStr, + Bytes: sz, } } @@ -2984,7 +3171,7 @@ func (p *ReplicationPool) deleteResyncMetadata(ctx context.Context, bucket strin } // initResync - initializes bucket replication resync for all buckets. -func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error { +func (p *ReplicationPool) initResync(ctx context.Context, buckets []string, objAPI ObjectLayer) error { if objAPI == nil { return errServerNotInitialized } @@ -2993,7 +3180,7 @@ func (p *ReplicationPool) initResync(ctx context.Context, buckets []BucketInfo, return nil } -func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) { +func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []string, objAPI ObjectLayer) { r := rand.New(rand.NewSource(time.Now().UnixNano())) // Run the replication resync in a loop for { @@ -3001,28 +3188,26 @@ func (p *ReplicationPool) startResyncRoutine(ctx context.Context, buckets []Buck <-ctx.Done() return } - duration := time.Duration(r.Float64() * float64(time.Minute)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Minute)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } } // Loads bucket replication resync statuses into memory. -func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error { +func (p *ReplicationPool) loadResync(ctx context.Context, buckets []string, objAPI ObjectLayer) error { // Make sure only one node running resync on the cluster. ctx, cancel := globalLeaderLock.GetLock(ctx) defer cancel() for index := range buckets { - bucket := buckets[index].Name + bucket := buckets[index] meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI) if err != nil { if !errors.Is(err, errVolumeNotFound) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } continue } @@ -3109,18 +3294,18 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) { cfg, err := getReplicationConfig(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogOnceIf(ctx, err, bucket) return nil, err } tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return nil, err } - objInfoCh := make(chan ObjectInfo, 10) + objInfoCh := make(chan itemOrErr[ObjectInfo], 10) if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) return nil, err } rcfg := replicationConfig{ @@ -3130,11 +3315,17 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, diffCh := make(chan madmin.DiffInfo, 4000) go func() { defer xioutil.SafeClose(diffCh) - for obj := range objInfoCh { + for res := range objInfoCh { + if res.Err != nil { + diffCh <- madmin.DiffInfo{Err: res.Err} + return + } if contextCanceled(ctx) { // Just consume input... continue } + obj := res.Item + // Ignore object prefixes which are excluded // from versioning via the MinIO bucket versioning extension. if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) { @@ -3165,7 +3356,7 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, } for arn, st := range roi.TargetPurgeStatuses { if opts.ARN == "" || opts.ARN == arn { - if !opts.Verbose && st == Complete { + if !opts.Verbose && st == replication.VersionPurgeComplete { continue } t, ok := tgtsMap[arn] @@ -3202,7 +3393,11 @@ func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, ret if oi.ModTime.IsZero() { return } - rcfg, _ := getReplicationConfig(ctx, bucket) + rcfg, err := getReplicationConfig(ctx, bucket) + if err != nil { + replLogOnceIf(ctx, err, bucket) + return + } tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket) queueReplicationHeal(ctx, bucket, oi, replicationConfig{ Config: rcfg, @@ -3227,12 +3422,12 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf roi = getHealReplicateObjectInfo(oi, rcfg) roi.RetryCount = uint32(retryCount) if !roi.Dsc.ReplicateAny() { - return + return roi } // early return if replication already done, otherwise we need to determine if this // version is an existing object that needs healing. if oi.ReplicationStatus == replication.Completed && oi.VersionPurgeStatus.Empty() && !roi.ExistingObjResync.mustResync() { - return + return roi } if roi.DeleteMarker || !roi.VersionPurgeStatus.Empty() { @@ -3260,16 +3455,16 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf // heal delete marker replication failure or versioned delete replication failure if roi.ReplicationStatus == replication.Pending || roi.ReplicationStatus == replication.Failed || - roi.VersionPurgeStatus == Failed || roi.VersionPurgeStatus == Pending { - globalReplicationPool.queueReplicaDeleteTask(dv) - return + roi.VersionPurgeStatus == replication.VersionPurgeFailed || roi.VersionPurgeStatus == replication.VersionPurgePending { + globalReplicationPool.Get().queueReplicaDeleteTask(dv) + return roi } // if replication status is Complete on DeleteMarker and existing object resync required if roi.ExistingObjResync.mustResync() && (roi.ReplicationStatus == replication.Completed || roi.ReplicationStatus.Empty()) { queueReplicateDeletesWrapper(dv, roi.ExistingObjResync) - return + return roi } - return + return roi } if roi.ExistingObjResync.mustResync() { roi.OpType = replication.ExistingObjectReplicationType @@ -3277,14 +3472,14 @@ func queueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, rcf switch roi.ReplicationStatus { case replication.Pending, replication.Failed: roi.EventType = ReplicateHeal - globalReplicationPool.queueReplicaTask(roi) - return + globalReplicationPool.Get().queueReplicaTask(roi) + return roi } if roi.ExistingObjResync.mustResync() { roi.EventType = ReplicateExisting - globalReplicationPool.queueReplicaTask(roi) + globalReplicationPool.Get().queueReplicaTask(roi) } - return + return roi } const ( @@ -3347,8 +3542,8 @@ func (p *ReplicationPool) queueMRFSave(entry MRFReplicateEntry) { return } if entry.RetryCount > mrfRetryLimit { // let scanner catch up if retry count exceeded - atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedCount, 1) - atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedBytes, uint64(entry.sz)) + atomic.AddUint64(&p.stats.mrfStats.TotalDroppedCount, 1) + atomic.AddUint64(&p.stats.mrfStats.TotalDroppedBytes, uint64(entry.sz)) return } @@ -3361,8 +3556,8 @@ func (p *ReplicationPool) queueMRFSave(entry MRFReplicateEntry) { select { case p.mrfSaveCh <- entry: default: - atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedCount, 1) - atomic.AddUint64(&globalReplicationStats.mrfStats.TotalDroppedBytes, uint64(entry.sz)) + atomic.AddUint64(&p.stats.mrfStats.TotalDroppedCount, 1) + atomic.AddUint64(&p.stats.mrfStats.TotalDroppedBytes, uint64(entry.sz)) } } } @@ -3393,7 +3588,7 @@ func (p *ReplicationPool) persistToDrive(ctx context.Context, v MRFReplicateEntr } globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() for _, localDrive := range localDrives { @@ -3411,7 +3606,7 @@ func (p *ReplicationPool) saveMRFEntries(ctx context.Context, entries map[string if !p.initialized() { return } - atomic.StoreUint64(&globalReplicationStats.mrfStats.LastFailedCount, uint64(len(entries))) + atomic.StoreUint64(&p.stats.mrfStats.LastFailedCount, uint64(len(entries))) if len(entries) == 0 { return } @@ -3460,7 +3655,7 @@ func (p *ReplicationPool) loadMRF() (mrfRec MRFReplicateEntries, err error) { } globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() for _, localDrive := range localDrives { @@ -3504,7 +3699,7 @@ func (p *ReplicationPool) processMRF() { continue } if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) { - logger.LogIf(p.ctx, err) + replLogIf(p.ctx, err) } pTimer.Reset(mrfQueueInterval) case <-p.ctx.Done(): @@ -3548,7 +3743,7 @@ func (p *ReplicationPool) queueMRFHeal() error { } func (p *ReplicationPool) initialized() bool { - return !(p == nil || p.objLayer == nil) + return p != nil && p.objLayer != nil } // getMRF returns MRF entries for this node. @@ -3581,3 +3776,28 @@ func (p *ReplicationPool) getMRF(ctx context.Context, bucket string) (ch <-chan return mrfCh, nil } + +// validateReplicationDestinationOptions is used to configure the validation of the replication destination. +// validateReplicationDestination uses this to configure the validation. +type validateReplicationDestinationOptions struct { + CheckRemoteBucket bool + CheckReady bool + + checkReadyErr sync.Map +} + +func getCRCMeta(oi ObjectInfo, partNum int, h http.Header) (cs map[string]string, isMP bool) { + meta := make(map[string]string) + cs, isMP = oi.decryptChecksums(partNum, h) + for k, v := range cs { + if k == xhttp.AmzChecksumType { + continue + } + cktype := hash.ChecksumStringToType(k) + if cktype.IsSet() { + meta[cktype.Key()] = v + meta[xhttp.AmzChecksumAlgo] = cktype.String() + } + } + return meta, isMP +} diff --git a/cmd/bucket-replication_test.go b/cmd/bucket-replication_test.go index b16cdacfea961..ada944d20bb1b 100644 --- a/cmd/bucket-replication_test.go +++ b/cmd/bucket-replication_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "fmt" "net/http" "testing" @@ -86,7 +85,7 @@ var replicationConfigTests = []struct { } func TestReplicationResync(t *testing.T) { - ctx := context.Background() + ctx := t.Context() for i, test := range replicationConfigTests { if sync := test.rcfg.Resync(ctx, test.info, test.dsc, test.tgtStatuses); sync.mustResync() != test.expectedSync { t.Errorf("Test%d (%s): Resync got %t , want %t", i+1, test.name, sync.mustResync(), test.expectedSync) diff --git a/cmd/bucket-stats.go b/cmd/bucket-stats.go index d691af93b7a5f..e2eb00aa4ebc6 100644 --- a/cmd/bucket-stats.go +++ b/cmd/bucket-stats.go @@ -19,7 +19,9 @@ package cmd import ( "fmt" + "maps" "math" + "sync/atomic" "time" "github.com/minio/madmin-go/v3" @@ -36,7 +38,7 @@ type ReplicationLatency struct { // Merge two replication latency into a new one func (rl ReplicationLatency) merge(other ReplicationLatency) (newReplLatency ReplicationLatency) { newReplLatency.UploadHistogram = rl.UploadHistogram.Merge(other.UploadHistogram) - return + return newReplLatency } // Get upload latency of each object size range @@ -47,7 +49,7 @@ func (rl ReplicationLatency) getUploadLatency() (ret map[string]uint64) { // Convert nanoseconds to milliseconds ret[sizeTagToString(k)] = uint64(v.avg() / time.Millisecond) } - return + return ret } // Update replication upload latency with a new value @@ -62,7 +64,7 @@ type ReplicationLastMinute struct { func (rl ReplicationLastMinute) merge(other ReplicationLastMinute) (nl ReplicationLastMinute) { nl = ReplicationLastMinute{rl.LastMinute.merge(other.LastMinute)} - return + return nl } func (rl *ReplicationLastMinute) addsize(n int64) { @@ -107,18 +109,18 @@ func (l ReplicationLastHour) merge(o ReplicationLastHour) (merged ReplicationLas // Add a new duration data func (l *ReplicationLastHour) addsize(sz int64) { - min := time.Now().Unix() / 60 - l.forwardTo(min) - winIdx := min % 60 - l.Totals[winIdx].merge(AccElem{Total: min, Size: sz, N: 1}) - l.LastMin = min + minutes := time.Now().Unix() / 60 + l.forwardTo(minutes) + winIdx := minutes % 60 + l.Totals[winIdx].merge(AccElem{Total: minutes, Size: sz, N: 1}) + l.LastMin = minutes } // Merge all recorded counts of last hour into one func (l *ReplicationLastHour) getTotal() AccElem { var res AccElem - min := time.Now().Unix() / 60 - l.forwardTo(min) + minutes := time.Now().Unix() / 60 + l.forwardTo(minutes) for _, elem := range l.Totals[:] { res.merge(elem) } @@ -127,8 +129,7 @@ func (l *ReplicationLastHour) getTotal() AccElem { // forwardTo time t, clearing any entries in between. func (l *ReplicationLastHour) forwardTo(t int64) { - tMin := t / 60 - if l.LastMin >= tMin { + if l.LastMin >= t { return } if t-l.LastMin >= 60 { @@ -221,9 +222,7 @@ func (brs BucketReplicationStats) Clone() (c BucketReplicationStats) { } if s.Failed.ErrCounts == nil { s.Failed.ErrCounts = make(map[string]int) - for k, v := range st.Failed.ErrCounts { - s.Failed.ErrCounts[k] = v - } + maps.Copy(s.Failed.ErrCounts, st.Failed.ErrCounts) } c.Stats[arn] = &s } @@ -310,10 +309,18 @@ type ReplQNodeStats struct { func (r *ReplicationStats) getNodeQueueStats(bucket string) (qs ReplQNodeStats) { qs.NodeName = globalLocalNodeName qs.Uptime = UTCNow().Unix() - globalBootTime.Unix() - qs.ActiveWorkers = globalReplicationStats.ActiveWorkers() + grs := globalReplicationStats.Load() + if grs != nil { + qs.ActiveWorkers = grs.ActiveWorkers() + } else { + qs.ActiveWorkers = ActiveWorkerStat{} + } qs.XferStats = make(map[RMetricName]XferStats) qs.QStats = r.qCache.getBucketStats(bucket) qs.TgtXferStats = make(map[string]map[RMetricName]XferStats) + qs.MRFStats = ReplicationMRFStats{ + LastFailedCount: atomic.LoadUint64(&r.mrfStats.LastFailedCount), + } r.RLock() defer r.RUnlock() @@ -399,10 +406,12 @@ func (r *ReplicationStats) getNodeQueueStats(bucket string) (qs ReplQNodeStats) func (r *ReplicationStats) getNodeQueueStatsSummary() (qs ReplQNodeStats) { qs.NodeName = globalLocalNodeName qs.Uptime = UTCNow().Unix() - globalBootTime.Unix() - qs.ActiveWorkers = globalReplicationStats.ActiveWorkers() + qs.ActiveWorkers = globalReplicationStats.Load().ActiveWorkers() qs.XferStats = make(map[RMetricName]XferStats) qs.QStats = r.qCache.getSiteStats() - + qs.MRFStats = ReplicationMRFStats{ + LastFailedCount: atomic.LoadUint64(&r.mrfStats.LastFailedCount), + } r.RLock() defer r.RUnlock() tx := newXferStats() diff --git a/cmd/bucket-stats_gen.go b/cmd/bucket-stats_gen.go index 1fca700f1fc31..201c574ce3ce4 100644 --- a/cmd/bucket-stats_gen.go +++ b/cmd/bucket-stats_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -617,19 +617,17 @@ func (z *BucketReplicationStats) DecodeMsg(dc *msgp.Reader) (err error) { if z.Stats == nil { z.Stats = make(map[string]*BucketReplicationStat, zb0002) } else if len(z.Stats) > 0 { - for key := range z.Stats { - delete(z.Stats, key) - } + clear(z.Stats) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 *BucketReplicationStat za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Stats") return } + var za0002 *BucketReplicationStat if dc.IsNil() { err = dc.ReadNil() if err != nil { @@ -943,14 +941,12 @@ func (z *BucketReplicationStats) UnmarshalMsg(bts []byte) (o []byte, err error) if z.Stats == nil { z.Stats = make(map[string]*BucketReplicationStat, zb0002) } else if len(z.Stats) > 0 { - for key := range z.Stats { - delete(z.Stats, key) - } + clear(z.Stats) } for zb0002 > 0 { - var za0001 string var za0002 *BucketReplicationStat zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Stats") @@ -1402,19 +1398,17 @@ func (z *BucketStatsMap) DecodeMsg(dc *msgp.Reader) (err error) { if z.Stats == nil { z.Stats = make(map[string]BucketStats, zb0002) } else if len(z.Stats) > 0 { - for key := range z.Stats { - delete(z.Stats, key) - } + clear(z.Stats) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 BucketStats za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Stats") return } + var za0002 BucketStats err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Stats", za0001) @@ -1526,14 +1520,12 @@ func (z *BucketStatsMap) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Stats == nil { z.Stats = make(map[string]BucketStats, zb0002) } else if len(z.Stats) > 0 { - for key := range z.Stats { - delete(z.Stats, key) - } + clear(z.Stats) } for zb0002 > 0 { - var za0001 string var za0002 BucketStats zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Stats") diff --git a/cmd/bucket-stats_gen_test.go b/cmd/bucket-stats_gen_test.go index 2116c19eb43ab..18344b4392d8f 100644 --- a/cmd/bucket-stats_gen_test.go +++ b/cmd/bucket-stats_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index ca901f8e4ceeb..9bd67a05aea88 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -20,7 +20,7 @@ package cmd import ( "context" "errors" - "fmt" + "maps" "net/url" "sync" "time" @@ -32,7 +32,6 @@ import ( "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" ) const ( @@ -131,7 +130,7 @@ func (sys *BucketTargetSys) initHC(ep *url.URL) { func newHCClient() *madmin.AnonymousClient { clnt, e := madmin.NewAnonymousClientNoEndpoint() if e != nil { - logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem)) + bugLogIf(GlobalContext, errors.New("Unable to initialize health check client")) return nil } clnt.SetCustomTransport(globalRemoteTargetTransport) @@ -238,9 +237,7 @@ func (sys *BucketTargetSys) healthStats() map[string]epHealth { sys.hMutex.RLock() defer sys.hMutex.RUnlock() m := make(map[string]epHealth, len(sys.hc)) - for k, v := range sys.hc { - m[k] = v - } + maps.Copy(m, sys.hc) return m } @@ -288,7 +285,7 @@ func (sys *BucketTargetSys) ListTargets(ctx context.Context, bucket, arnType str } } } - return + return targets } // ListBucketTargets - gets list of bucket targets for this bucket. @@ -430,7 +427,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str if arn.Type == madmin.ReplicationService { // reject removal of remote target if replication configuration is present rcfg, err := getReplicationConfig(ctx, bucket) - if err == nil { + if err == nil && rcfg != nil { for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) { if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) { sys.RLock() @@ -614,7 +611,7 @@ func (sys *BucketTargetSys) UpdateAllTargets(bucket string, tgts *madmin.BucketT } // create minio-go clients for buckets having remote targets -func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) { +func (sys *BucketTargetSys) set(bucket string, meta BucketMetadata) { cfg := meta.bucketTargetConfig if cfg == nil || cfg.Empty() { return @@ -624,13 +621,13 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) { for _, tgt := range cfg.Targets { tgtClient, err := sys.getRemoteTargetClient(&tgt) if err != nil { - logger.LogIf(GlobalContext, err) + replLogIf(GlobalContext, err) continue } sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient} - sys.updateBandwidthLimit(bucket.Name, tgt.Arn, tgt.BandwidthLimit) + sys.updateBandwidthLimit(bucket, tgt.Arn, tgt.BandwidthLimit) } - sys.targetsMap[bucket.Name] = cfg.Targets + sys.targetsMap[bucket] = cfg.Targets } // Returns a minio-go Client configured to access remote host described in replication target config. @@ -671,7 +668,7 @@ func (sys *BucketTargetSys) getRemoteTargetClient(tcfg *madmin.BucketTarget) (*T // getRemoteARN gets existing ARN for an endpoint or generates a new one. func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTarget, deplID string) (arn string, exists bool) { if target == nil { - return + return arn, exists } sys.RLock() defer sys.RUnlock() @@ -685,7 +682,7 @@ func (sys *BucketTargetSys) getRemoteARN(bucket string, target *madmin.BucketTar } } if !target.Type.IsValid() { - return + return arn, exists } return generateARN(target, deplID), false } diff --git a/cmd/bucket-versioning-handler.go b/cmd/bucket-versioning-handler.go index 64728a88bf04d..92b2c1466e38e 100644 --- a/cmd/bucket-versioning-handler.go +++ b/cmd/bucket-versioning-handler.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -82,7 +82,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r }, r.URL) return } - if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() { + if rc, _ := getReplicationConfig(ctx, bucket); rc != nil && v.Suspended() { writeErrorResponse(ctx, w, APIError{ Code: "InvalidBucketState", Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.", @@ -108,7 +108,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r // We encode the xml bytes as base64 to ensure there are no encoding // errors. cfgStr := base64.StdEncoding.EncodeToString(configData) - logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ + replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{ Type: madmin.SRBucketMetaTypeVersionConfig, Bucket: bucket, Versioning: &cfgStr, diff --git a/cmd/build-constants.go b/cmd/build-constants.go index 8777fba05237e..7f46baff3ced7 100644 --- a/cmd/build-constants.go +++ b/cmd/build-constants.go @@ -49,8 +49,11 @@ var ( // MinioOSARCH - OS and ARCH. minioOSARCH = runtime.GOOS + "-" + runtime.GOARCH + // MinioReleaseBaseURL - release url without os and arch. + MinioReleaseBaseURL = "https://dl.min.io/server/minio/release/" + // MinioReleaseURL - release URL. - MinioReleaseURL = "https://dl.min.io/server/minio/release/" + minioOSARCH + SlashSeparator + MinioReleaseURL = MinioReleaseBaseURL + minioOSARCH + SlashSeparator // MinioStoreName - MinIO store name. MinioStoreName = "MinIO" @@ -62,5 +65,5 @@ var ( MinioBannerName = "MinIO Object Storage Server" // MinioLicense - MinIO server license. - MinioLicense = "GNU AGPLv3 " + MinioLicense = "GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html" ) diff --git a/cmd/callhome.go b/cmd/callhome.go index f49e6ad5e579a..2a6d6695b6285 100644 --- a/cmd/callhome.go +++ b/cmd/callhome.go @@ -29,7 +29,6 @@ import ( "time" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" ) var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second) @@ -58,11 +57,9 @@ func initCallhome(ctx context.Context, objAPI ObjectLayer) { // callhome running on a different node. // sleep for some time and try again. - duration := time.Duration(r.Float64() * float64(globalCallhomeConfig.FrequencyDur())) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(globalCallhomeConfig.FrequencyDur())), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } }() @@ -81,6 +78,9 @@ func runCallhome(ctx context.Context, objAPI ObjectLayer) bool { ctx = lkctx.Context() defer locker.Unlock(lkctx) + // Perform callhome once and then keep running it at regular intervals. + performCallhome(ctx) + callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur()) defer callhomeTimer.Stop() @@ -112,7 +112,7 @@ func performCallhome(ctx context.Context) { deadline := 10 * time.Second // Default deadline is 10secs for callhome objectAPI := newObjectLayerFn() if objectAPI == nil { - logger.LogIf(ctx, errors.New("Callhome: object layer not ready")) + internalLogIf(ctx, errors.New("Callhome: object layer not ready")) return } @@ -142,11 +142,14 @@ func performCallhome(ctx context.Context) { select { case hi, hasMore := <-healthInfoCh: if !hasMore { + auditOptions := AuditLogOptions{Event: "callhome:diagnostics"} // Received all data. Send to SUBNET and return err := sendHealthInfo(ctx, healthInfo) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err)) + internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err)) + auditOptions.Error = err.Error() } + auditLogInternal(ctx, auditOptions) return } healthInfo = hi @@ -180,12 +183,12 @@ func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []b enc := json.NewEncoder(gzWriter) if e := enc.Encode(header); e != nil { - logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e)) + internalLogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e)) return nil } if e := enc.Encode(healthInfo); e != nil { - logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e)) + internalLogIf(ctx, fmt.Errorf("Could not encode health info: %w", e)) return nil } diff --git a/cmd/common-main.go b/cmd/common-main.go index 2a06e02b34c5b..e57e190f961d0 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -21,10 +21,8 @@ import ( "bufio" "bytes" "context" - "crypto/tls" "crypto/x509" "encoding/gob" - "encoding/pem" "errors" "fmt" "net" @@ -58,19 +56,28 @@ import ( "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/certs" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/ellipses" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/certs" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" + "golang.org/x/term" ) // serverDebugLog will enable debug printing -var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn - -var currentReleaseTime time.Time +var ( + serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn + currentReleaseTime time.Time + orchestrated = IsKubernetes() || IsDocker() +) func init() { + if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) { + color.TurnOff() + } + if env.Get("NO_COLOR", "") != "" || env.Get("TERM", "") == "dumb" { + color.TurnOff() + } + if runtime.GOOS == "windows" { if mousetrap.StartedByExplorer() { fmt.Printf("Don't double-click %s\n", os.Args[0]) @@ -85,9 +92,6 @@ func init() { logger.Init(GOPATH, GOROOT) logger.RegisterError(config.FmtError) - globalBatchJobsMetrics = batchJobMetrics{metrics: make(map[string]*batchJobInfo)} - go globalBatchJobsMetrics.purgeJobMetrics() - t, _ := minioVersionToReleaseTime(Version) if !t.IsZero() { globalVersionUnix = uint64(t.Unix()) @@ -101,7 +105,7 @@ func init() { gob.Register(madmin.TimeInfo{}) gob.Register(madmin.XFSErrorConfigs{}) gob.Register(map[string]string{}) - gob.Register(map[string]interface{}{}) + gob.Register(map[string]any{}) // All minio-go and madmin-go API operations shall be performed only once, // another way to look at this is we are turning off retries. @@ -129,6 +133,12 @@ func minioConfigToConsoleFeatures() { os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value) } } + if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" { + os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value) + } + if value := env.Get(config.EnvConsoleDebugLogLevel, ""); value != "" { + os.Setenv("CONSOLE_DEBUG_LOGLEVEL", value) + } // pass the console subpath configuration if globalBrowserRedirectURL != nil { subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator)) @@ -167,7 +177,10 @@ func minioConfigToConsoleFeatures() { os.Setenv("CONSOLE_STS_DURATION", valueSession) } - os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region) + os.Setenv("CONSOLE_MINIO_SITE_NAME", globalSite.Name()) + os.Setenv("CONSOLE_MINIO_SITE_REGION", globalSite.Region()) + os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region()) + os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", "")) // This section sets Browser (console) stored config @@ -245,7 +258,7 @@ func initConsoleServer() (*consoleapi.Server, error) { if !serverDebugLog { // Disable console logging if server debug log is not enabled - noLog := func(string, ...interface{}) {} + noLog := func(string, ...any) {} consoleapi.LogInfo = noLog consoleapi.LogError = noLog @@ -361,8 +374,16 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) { ctxt.ConsoleAddr = ctx.String("console-address") } + if cxml := ctx.String("crossdomain-xml"); cxml != "" { + buf, err := os.ReadFile(cxml) + if err != nil { + return err + } + ctxt.CrossDomainXML = string(buf) + } + // Check "no-compat" flag from command line argument. - ctxt.StrictS3Compat = !(ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat")) + ctxt.StrictS3Compat = !ctx.IsSet("no-compat") && !ctx.GlobalIsSet("no-compat") switch { case ctx.IsSet("config-dir"): @@ -382,20 +403,37 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) { ctxt.certsDirSet = true } + memAvailable := availableMemory() + if ctx.IsSet("memlimit") || ctx.GlobalIsSet("memlimit") { + memlimit := ctx.String("memlimit") + if memlimit == "" { + memlimit = ctx.GlobalString("memlimit") + } + mlimit, err := humanize.ParseBytes(memlimit) + if err != nil { + return err + } + if mlimit > memAvailable { + logger.Info("WARNING: maximum memory available (%s) smaller than specified --memlimit=%s, ignoring --memlimit value", + humanize.IBytes(memAvailable), memlimit) + } + ctxt.MemLimit = mlimit + } else { + ctxt.MemLimit = memAvailable + } + + if memAvailable < ctxt.MemLimit { + ctxt.MemLimit = memAvailable + } + ctxt.FTP = ctx.StringSlice("ftp") ctxt.SFTP = ctx.StringSlice("sftp") - ctxt.Interface = ctx.String("interface") ctxt.UserTimeout = ctx.Duration("conn-user-timeout") - ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline") - ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline") - ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline") - ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline") - - ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout") + ctxt.SendBufSize = ctx.Int("send-buf-size") + ctxt.RecvBufSize = ctx.Int("recv-buf-size") ctxt.IdleTimeout = ctx.Duration("idle-timeout") - ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout") - ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host") + ctxt.UserTimeout = ctx.Duration("conn-user-timeout") if conf := ctx.String("config"); len(conf) > 0 { err = mergeServerCtxtFromConfigFile(conf, ctxt) @@ -424,25 +462,27 @@ func handleCommonArgs(ctxt serverCtxt) { certsDir := ctxt.CertsDir certsSet := ctxt.certsDirSet - if consoleAddr == "" { - p, err := xnet.GetFreePort() - if err != nil { - logger.FatalIf(err, "Unable to get free port for Console UI on the host") - } - // hold the port - l, err := net.Listen("TCP", fmt.Sprintf(":%s", p.String())) - if err == nil { - defer l.Close() + if globalBrowserEnabled { + if consoleAddr == "" { + p, err := xnet.GetFreePort() + if err != nil { + logger.FatalIf(err, "Unable to get free port for Console UI on the host") + } + // hold the port + l, err := net.Listen("TCP", fmt.Sprintf(":%s", p.String())) + if err == nil { + defer l.Close() + } + consoleAddr = net.JoinHostPort("", p.String()) } - consoleAddr = net.JoinHostPort("", p.String()) - } - if _, _, err := net.SplitHostPort(consoleAddr); err != nil { - logger.FatalIf(err, "Unable to start listening on console port") - } + if _, _, err := net.SplitHostPort(consoleAddr); err != nil { + logger.FatalIf(err, "Unable to start listening on console port") + } - if consoleAddr == addr { - logger.FatalIf(errors.New("--console-address cannot be same as --address"), "Unable to start the server") + if consoleAddr == addr { + logger.FatalIf(errors.New("--console-address cannot be same as --address"), "Unable to start the server") + } } globalMinioHost, globalMinioPort = mustSplitHostPort(addr) @@ -455,7 +495,9 @@ func handleCommonArgs(ctxt serverCtxt) { globalDynamicAPIPort = true } - globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr) + if globalBrowserEnabled { + globalMinioConsoleHost, globalMinioConsolePort = mustSplitHostPort(consoleAddr) + } if globalMinioPort == globalMinioConsolePort { logger.FatalIf(errors.New("--console-address port cannot be same as --address port"), "Unable to start the server") @@ -486,7 +528,11 @@ func runDNSCache(ctx *cli.Context) { dnsTTL := ctx.Duration("dns-cache-ttl") // Check if we have configured a custom DNS cache TTL. if dnsTTL <= 0 { - dnsTTL = 10 * time.Minute + if orchestrated { + dnsTTL = 30 * time.Second + } else { + dnsTTL = 10 * time.Minute + } } // Call to refresh will refresh names in cache. @@ -644,16 +690,6 @@ func loadEnvVarsFromFiles() { } } - if env.IsSet(kms.EnvKMSSecretKeyFile) { - kmsSecret, err := readFromSecret(env.Get(kms.EnvKMSSecretKeyFile, "")) - if err != nil { - logger.Fatal(err, "Unable to read the KMS secret key inherited from secret file") - } - if kmsSecret != "" { - os.Setenv(kms.EnvKMSSecretKey, kmsSecret) - } - } - if env.IsSet(config.EnvConfigEnvFile) { ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, "")) if err != nil && !os.IsNotExist(err) { @@ -665,12 +701,16 @@ func loadEnvVarsFromFiles() { } } -func serverHandleEnvVars() { +func serverHandleEarlyEnvVars() { var err error globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn)) if err != nil { logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable") } +} + +func serverHandleEnvVars() { + var err error if globalBrowserEnabled { if redirectURL := env.Get(config.EnvBrowserRedirectURL, ""); redirectURL != "" { u, err := xnet.ParseHTTPURL(redirectURL) @@ -678,9 +718,7 @@ func serverHandleEnvVars() { logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value in environment variable") } // Look for if URL has invalid values and return error. - if !((u.Scheme == "http" || u.Scheme == "https") && - u.Opaque == "" && - !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint((*url.URL)(u)) { err := fmt.Errorf("URL contains unexpected resources, expected URL to be one of http(s)://console.example.com or as a subpath via API endpoint http(s)://minio.example.com/minio format: %v", u) logger.Fatal(err, "Invalid MINIO_BROWSER_REDIRECT_URL value is environment variable") } @@ -695,9 +733,7 @@ func serverHandleEnvVars() { logger.Fatal(err, "Invalid MINIO_SERVER_URL value in environment variable") } // Look for if URL has invalid values and return error. - if !((u.Scheme == "http" || u.Scheme == "https") && - (u.Path == "/" || u.Path == "") && u.Opaque == "" && - !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint((*url.URL)(u)) { err := fmt.Errorf("URL contains unexpected resources, expected URL to be of http(s)://minio.example.com format: %v", u) logger.Fatal(err, "Invalid MINIO_SERVER_URL value is environment variable") } @@ -725,9 +761,9 @@ func serverHandleEnvVars() { domains := env.Get(config.EnvDomain, "") if len(domains) != 0 { - for _, domainName := range strings.Split(domains, config.ValueSeparator) { + for domainName := range strings.SplitSeq(domains, config.ValueSeparator) { if _, ok := dns2.IsDomainName(domainName); !ok { - logger.Fatal(config.ErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName), + logger.Fatal(config.ErrInvalidDomainValue(nil).Msgf("Unknown value `%s`", domainName), "Invalid MINIO_DOMAIN value in environment variable") } globalDomainNames = append(globalDomainNames, domainName) @@ -736,7 +772,7 @@ func serverHandleEnvVars() { lcpSuf := lcpSuffix(globalDomainNames) for _, domainName := range globalDomainNames { if domainName == lcpSuf && len(globalDomainNames) > 1 { - logger.Fatal(config.ErrOverlappingDomainValue(nil).Msg("Overlapping domains `%s` not allowed", globalDomainNames), + logger.Fatal(config.ErrOverlappingDomainValue(nil).Msgf("Overlapping domains `%s` not allowed", globalDomainNames), "Invalid MINIO_DOMAIN value in environment variable") } } @@ -749,12 +785,7 @@ func serverHandleEnvVars() { for _, endpoint := range minioEndpoints { if net.ParseIP(endpoint) == nil { // Checking if the IP is a DNS entry. - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = net.DefaultResolver.LookupHost - } - - addrs, err := lookupHost(GlobalContext, endpoint) + addrs, err := globalDNSCache.LookupHost(GlobalContext, endpoint) if err != nil { logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint) } @@ -798,174 +829,118 @@ func serverHandleEnvVars() { } } - globalDisableFreezeOnBoot = env.Get("_MINIO_DISABLE_API_FREEZE_ON_BOOT", "") == "true" || serverDebugLog + globalEnableSyncBoot = env.Get("MINIO_SYNC_BOOT", config.EnableOff) == config.EnableOn } -func loadRootCredentials() { +func loadRootCredentials() auth.Credentials { // At this point, either both environment variables // are defined or both are not defined. // Check both cases and authenticate them if correctly defined var user, password string - var hasCredentials bool + var legacyCredentials bool //nolint:gocritic if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) { user = env.Get(config.EnvRootUser, "") password = env.Get(config.EnvRootPassword, "") - hasCredentials = true } else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) { user = env.Get(config.EnvAccessKey, "") password = env.Get(config.EnvSecretKey, "") - hasCredentials = true + legacyCredentials = true } else if globalServerCtxt.RootUser != "" && globalServerCtxt.RootPwd != "" { user, password = globalServerCtxt.RootUser, globalServerCtxt.RootPwd - hasCredentials = true } - if hasCredentials { - cred, err := auth.CreateCredentials(user, password) - if err != nil { + if user == "" || password == "" { + return auth.Credentials{} + } + cred, err := auth.CreateCredentials(user, password) + if err != nil { + if legacyCredentials { logger.Fatal(config.ErrInvalidCredentials(err), "Unable to validate credentials inherited from the shell environment") + } else { + logger.Fatal(config.ErrInvalidRootUserCredentials(err), + "Unable to validate credentials inherited from the shell environment") } - if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) { - msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+ - " Please use %s and %s", - config.EnvAccessKey, config.EnvSecretKey, - config.EnvRootUser, config.EnvRootPassword) - logger.Info(color.RedBold(msg)) - } - globalActiveCred = cred - globalCredViaEnv = true - } else { - globalActiveCred = auth.DefaultCredentials } + if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) { + msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+ + " Please use %s and %s", + config.EnvAccessKey, config.EnvSecretKey, + config.EnvRootUser, config.EnvRootPassword) + logger.Info(color.RedBold(msg)) + } + globalCredViaEnv = true + return cred } -// Initialize KMS global variable after valiadating and loading the configuration. -// It depends on KMS env variables and global cli flags. -func handleKMSConfig() { - if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) { - logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint)) +// autoGenerateRootCredentials generates root credentials deterministically if +// a KMS is configured, no manual credentials have been specified and if root +// access is disabled. +func autoGenerateRootCredentials() auth.Credentials { + if GlobalKMS == nil { + return globalActiveCred } - if env.IsSet(kms.EnvKMSSecretKey) { - KMS, err := kms.Parse(env.Get(kms.EnvKMSSecretKey, "")) - if err != nil { - logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment") - } - GlobalKMS = KMS + aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")}) + if IsErrIgnored(err, kes.ErrNotAllowed, kms.ErrNotSupported, errors.ErrUnsupported, kms.ErrPermission) { + // If we don't have permission to compute the HMAC, don't change the cred. + return globalActiveCred + } + if err != nil { + logger.Fatal(err, "Unable to generate root access key using KMS") } - if env.IsSet(kms.EnvKESEndpoint) { - if env.IsSet(kms.EnvKESAPIKey) { - if env.IsSet(kms.EnvKESClientKey) { - logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey)) - } - if env.IsSet(kms.EnvKESClientCert) { - logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert)) - } - } - if !env.IsSet(kms.EnvKESKeyName) { - logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName)) - } - var endpoints []string - for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") { - if strings.TrimSpace(endpoint) == "" { - continue - } - if !ellipses.HasEllipses(endpoint) { - endpoints = append(endpoints, endpoint) - continue - } - patterns, err := ellipses.FindEllipsesPatterns(endpoint) - if err != nil { - logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint)) - } - for _, lbls := range patterns.Expand() { - endpoints = append(endpoints, strings.Join(lbls, "")) - } - } - rootCAs, err := certs.GetRootCAs(env.Get(kms.EnvKESServerCA, globalCertsCADir.Get())) - if err != nil { - logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(kms.EnvKESServerCA, globalCertsCADir.Get()))) - } + sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")}) + if err != nil { + // Here, we must have permission. Otherwise, we would have failed earlier. + logger.Fatal(err, "Unable to generate root secret key using KMS") + } - var kmsConf kms.Config - if env.IsSet(kms.EnvKESAPIKey) { - key, err := kes.ParseAPIKey(env.Get(kms.EnvKESAPIKey, "")) - if err != nil { - logger.Fatal(err, fmt.Sprintf("Failed to parse KES API key from %q", env.Get(kms.EnvKESAPIKey, ""))) - } - kmsConf = kms.Config{ - Endpoints: endpoints, - DefaultKeyID: env.Get(kms.EnvKESKeyName, ""), - APIKey: key, - RootCAs: rootCAs, - } - } else { - loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) { - // Manually load the certificate and private key into memory. - // We need to check whether the private key is encrypted, and - // if so, decrypt it using the user-provided password. - certBytes, err := os.ReadFile(certFile) - if err != nil { - return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err) - } - keyBytes, err := os.ReadFile(keyFile) - if err != nil { - return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err) - } - privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes)) - if len(rest) != 0 { - return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data") - } - if x509.IsEncryptedPEMBlock(privateKeyPEM) { - keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(kms.EnvKESClientPassword, ""))) - if err != nil { - return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err) - } - keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes}) - } - certificate, err := tls.X509KeyPair(certBytes, keyBytes) - if err != nil { - return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err) - } - return certificate, nil - } + accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey)) + if err != nil { + logger.Fatal(err, "Unable to generate root access key") + } + secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey)) + if err != nil { + logger.Fatal(err, "Unable to generate root secret key") + } - reloadCertEvents := make(chan tls.Certificate, 1) - certificate, err := certs.NewCertificate(env.Get(kms.EnvKESClientCert, ""), env.Get(kms.EnvKESClientKey, ""), loadX509KeyPair) - if err != nil { - logger.Fatal(err, "Failed to load KES client certificate") - } - certificate.Watch(context.Background(), 15*time.Minute, syscall.SIGHUP) - certificate.Notify(reloadCertEvents) - - kmsConf = kms.Config{ - Endpoints: endpoints, - DefaultKeyID: env.Get(kms.EnvKESKeyName, ""), - Certificate: certificate, - ReloadCertEvents: reloadCertEvents, - RootCAs: rootCAs, - } - } + logger.Info("Automatically generated root access key and secret key with the KMS") + return auth.Credentials{ + AccessKey: accessKey, + SecretKey: secretKey, + } +} - KMS, err := kms.NewWithConfig(kmsConf) - if err != nil { - logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment") - } - // We check that the default key ID exists or try to create it otherwise. - // This implicitly checks that we can communicate to KES. We don't treat - // a policy error as failure condition since MinIO may not have the permission - // to create keys - just to generate/decrypt data encryption keys. - if err = KMS.CreateKey(context.Background(), env.Get(kms.EnvKESKeyName, "")); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) { - logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment") - } - GlobalKMS = KMS +// Initialize KMS global variable after valiadating and loading the configuration. +// It depends on KMS env variables and global cli flags. +func handleKMSConfig() { + present, err := kms.IsPresent() + if err != nil { + logger.Fatal(err, "Invalid KMS configuration specified") + } + if !present { + return + } + + KMS, err := kms.Connect(GlobalContext, &kms.ConnectionOptions{ + CADir: globalCertsCADir.Get(), + }) + if err != nil { + logger.Fatal(err, "Failed to connect to KMS") + } + + if _, err = KMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{}); errors.Is(err, kms.ErrKeyNotFound) { + err = KMS.CreateKey(GlobalContext, &kms.CreateKeyRequest{Name: KMS.DefaultKey}) + } + if err != nil && !errors.Is(err, kms.ErrKeyExists) && !errors.Is(err, kms.ErrPermission) { + logger.Fatal(err, "Failed to connect to KMS") } + GlobalKMS = KMS } func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) { - if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) { + if !isFile(getPublicCertFile()) || !isFile(getPrivateKeyFile()) { return nil, nil, false, nil } @@ -1035,7 +1010,7 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu } if err = manager.AddCertificate(certFile, keyFile); err != nil { err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err) - logger.LogIf(GlobalContext, err, logger.ErrorKind) + bootLogIf(GlobalContext, err, logger.ErrorKind) } } secureConn = true @@ -1084,6 +1059,6 @@ func (a bgCtx) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } -func (a bgCtx) Value(key interface{}) interface{} { +func (a bgCtx) Value(key any) any { return a.parent.Value(key) } diff --git a/cmd/common-main_test.go b/cmd/common-main_test.go index f7516eac0491a..9757267d28624 100644 --- a/cmd/common-main_test.go +++ b/cmd/common-main_test.go @@ -43,9 +43,8 @@ func Test_readFromSecret(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } @@ -155,9 +154,8 @@ MINIO_ROOT_PASSWORD=minio123`, }, } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } diff --git a/cmd/config-current.go b/cmd/config-current.go index d84d823bf9682..a87e2876a6867 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -18,23 +18,19 @@ package cmd import ( - "bytes" "context" "errors" "fmt" + "maps" "strings" "sync" - "github.com/minio/kms-go/kes" - "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config/browser" - "github.com/minio/minio/internal/kms" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/api" "github.com/minio/minio/internal/config/batch" - "github.com/minio/minio/internal/config/cache" "github.com/minio/minio/internal/config/callhome" "github.com/minio/minio/internal/config/compress" "github.com/minio/minio/internal/config/dns" @@ -56,7 +52,7 @@ import ( "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) func initHelp() { @@ -80,16 +76,11 @@ func initHelp() { config.CallhomeSubSys: callhome.DefaultKVS, config.DriveSubSys: drive.DefaultKVS, config.ILMSubSys: ilm.DefaultKVS, - config.CacheSubSys: cache.DefaultKVS, config.BatchSubSys: batch.DefaultKVS, config.BrowserSubSys: browser.DefaultKVS, } - for k, v := range notify.DefaultNotificationKVS { - kvs[k] = v - } - for k, v := range lambda.DefaultLambdaKVS { - kvs[k] = v - } + maps.Copy(kvs, notify.DefaultNotificationKVS) + maps.Copy(kvs, lambda.DefaultLambdaKVS) if globalIsErasure { kvs[config.StorageClassSubSys] = storageclass.DefaultKVS kvs[config.HealSubSys] = heal.DefaultKVS @@ -101,7 +92,7 @@ func initHelp() { config.HelpKV{ Key: config.SubnetSubSys, Type: "string", - Description: "register the cluster to MinIO SUBNET", + Description: "register Enterprise license for the cluster", Optional: true, }, config.HelpKV{ @@ -230,14 +221,13 @@ func initHelp() { Description: "persist IAM assets externally to etcd", }, config.HelpKV{ - Key: config.CacheSubSys, - Type: "string", - Description: "enable cache plugin on MinIO for GET/HEAD requests", + Key: config.BrowserSubSys, + Description: "manage Browser HTTP specific features, such as Security headers, etc.", Optional: true, }, config.HelpKV{ - Key: config.BrowserSubSys, - Description: "manage Browser HTTP specific features, such as Security headers, etc.", + Key: config.ILMSubSys, + Description: "manage ILM settings for expiration and transition workers", Optional: true, }, } @@ -286,8 +276,8 @@ func initHelp() { config.SubnetSubSys: subnet.HelpSubnet, config.CallhomeSubSys: callhome.HelpCallhome, config.DriveSubSys: drive.HelpDrive, - config.CacheSubSys: cache.Help, config.BrowserSubSys: browser.Help, + config.ILMSubSys: ilm.Help, } config.RegisterHelpSubSys(helpMap) @@ -362,7 +352,9 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o } case config.IdentityOpenIDSubSys: if _, err := openid.LookupConfig(s, - NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { + xhttp.WithUserAgent(NewHTTPTransport(), func() string { + return getUserAgent(getMinioMode()) + }), xhttp.DrainBody, globalSite.Region()); err != nil { return err } case config.IdentityLDAPSubSys: @@ -383,7 +375,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o } case config.IdentityPluginSubSys: if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], - NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil { return err } case config.SubnetSubSys: @@ -403,10 +395,6 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o if _, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil { return err } - case config.CacheSubSys: - if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport); err != nil { - return err - } case config.PolicyOPASubSys: // In case legacy OPA config is being set, we treat it as if the // AuthZPlugin is being set. @@ -479,7 +467,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } if err == nil && dnsURL != "" { bootstrapTraceMsg("initialize remote bucket DNS store") @@ -487,27 +475,27 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dns.Authentication(dnsUser, dnsPass), dns.RootCAs(globalRootCAs)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } } etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if etcdCfg.Enabled { bootstrapTraceMsg("initialize etcd store") globalEtcdClient, err = etcd.New(etcdCfg) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil { if globalDNSConfig != nil { // if global DNS is already configured, indicate with a warning, in case // users are confused. - logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig)) + configLogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig)) } else { globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config, dns.DomainNames(globalDomainNames), @@ -516,7 +504,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dns.CoreDNSPath(etcdCfg.CoreDNSPath), ) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", + configLogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", globalDomainNames, err)) } } @@ -530,10 +518,11 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { // but not federation. globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled - globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]) + siteCfg, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err)) } + globalSite.Update(siteCfg) globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled if globalAutoEncryption && GlobalKMS == nil { @@ -545,19 +534,19 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { bootstrapTraceMsg("initialize the event notification targets") globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err)) } bootstrapTraceMsg("initialize the lambda targets") globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err)) } bootstrapTraceMsg("applying the dynamic configuration") // Apply dynamic config values if err := applyDynamicConfig(ctx, objAPI, s); err != nil { - logger.LogIf(ctx, err) + configLogIf(ctx, err) } } @@ -566,21 +555,17 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf return errServerNotInitialized } + var errs []error setDriveCounts := objAPI.SetDriveCounts() switch subSys { case config.APISubSys: apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err)) } - globalAPIConfig.init(apiConfig, setDriveCounts) - autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now. - - // Initialize remote instance transport once. - getRemoteInstanceTransportOnce.Do(func() { - getRemoteInstanceTransport = NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline) - }) + globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy()) + setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline)) case config.CompressionSubSys: cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]) if err != nil { @@ -592,95 +577,96 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf case config.HealSubSys: healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default]) if err != nil { - return fmt.Errorf("Unable to apply heal config: %w", err) + errs = append(errs, fmt.Errorf("Unable to apply heal config: %w", err)) + } else { + globalHealConfig.Update(healCfg) } - globalHealConfig.Update(healCfg) case config.BatchSubSys: batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default]) if err != nil { - return fmt.Errorf("Unable to apply batch config: %w", err) + errs = append(errs, fmt.Errorf("Unable to apply batch config: %w", err)) + } else { + globalBatchConfig.Update(batchCfg) } - globalBatchConfig.Update(batchCfg) case config.ScannerSubSys: scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default]) if err != nil { - return fmt.Errorf("Unable to apply scanner config: %w", err) - } - // update dynamic scanner values. - scannerIdleMode.Store(scannerCfg.IdleMode) - scannerCycle.Store(scannerCfg.Cycle) - scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions) - scannerExcessFolders.Store(scannerCfg.ExcessFolders) - logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait)) + errs = append(errs, fmt.Errorf("Unable to apply scanner config: %w", err)) + } else { + // update dynamic scanner values. + scannerIdleMode.Store(scannerCfg.IdleMode) + scannerCycle.Store(scannerCfg.Cycle) + scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions) + scannerExcessFolders.Store(scannerCfg.ExcessFolders) + configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait)) + } case config.LoggerWebhookSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err)) } userAgent := getUserAgent(getMinioMode()) for n, l := range loggerCfg.HTTP { if l.Enabled { - l.LogOnce = logger.LogOnceConsoleIf + l.LogOnceIf = configLogOnceConsoleIf l.UserAgent = userAgent l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) } loggerCfg.HTTP[n] = l } - if errs := logger.UpdateSystemTargets(ctx, loggerCfg); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs)) + if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 { + configLogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs)) } case config.AuditWebhookSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err)) } userAgent := getUserAgent(getMinioMode()) for n, l := range loggerCfg.AuditWebhook { if l.Enabled { - l.LogOnce = logger.LogOnceConsoleIf + l.LogOnceIf = configLogOnceConsoleIf l.UserAgent = userAgent l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) } loggerCfg.AuditWebhook[n] = l } - if errs := logger.UpdateAuditWebhookTargets(ctx, loggerCfg); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs)) + if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 { + configLogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs)) } case config.AuditKafkaSubSys: loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err)) } for n, l := range loggerCfg.AuditKafka { if l.Enabled { if l.TLS.Enable { l.TLS.RootCAs = globalRootCAs } - l.LogOnce = logger.LogOnceIf + l.LogOnce = configLogOnceIf loggerCfg.AuditKafka[n] = l } } if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 { - logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs)) + configLogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs)) } case config.StorageClassSubSys: for i, setDriveCount := range setDriveCounts { sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err)) break } - // if we validated all setDriveCounts and it was successful - // proceed to store the correct storage class globally. - if i == len(setDriveCounts)-1 { + if i == 0 { globalStorageClass.Update(sc) } } case config.SubnetSubSys: - subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport) + subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalRemoteTargetTransport) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) } else { globalSubnetConfig.Update(subnetConfig, globalIsCICD) globalSubnetConfig.ApplyEnv() // update environment settings for Console UI @@ -688,7 +674,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf case config.CallhomeSubSys: callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err)) } else { enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled() globalCallhomeConfig.Update(callhomeCfg) @@ -697,95 +683,44 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf } } case config.DriveSubSys: - if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) - } else { - err := globalDriveConfig.Update(driveConfig) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) - } - } - case config.CacheSubSys: - cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport) + driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err)) } else { - globalCacheConfig.Update(cacheCfg) + if err = globalDriveConfig.Update(driveConfig); err != nil { + configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err)) + } } case config.BrowserSubSys: browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default]) if err != nil { - return fmt.Errorf("Unable to apply browser config: %w", err) + errs = append(errs, fmt.Errorf("Unable to apply browser config: %w", err)) + } else { + globalBrowserConfig.Update(browserCfg) } - globalBrowserConfig.Update(browserCfg) case config.ILMSubSys: ilmCfg, err := ilm.LookupConfig(s[config.ILMSubSys][config.Default]) if err != nil { - return fmt.Errorf("Unable to apply ilm config: %w", err) - } - if globalTransitionState != nil { - globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers) - } - if globalExpiryState != nil { - globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers) + errs = append(errs, fmt.Errorf("Unable to apply ilm config: %w", err)) + } else { + if globalTransitionState != nil { + globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers) + } + if globalExpiryState != nil { + globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers) + } + globalILMConfig.update(ilmCfg) } - globalILMConfig.update(ilmCfg) } globalServerConfigMu.Lock() defer globalServerConfigMu.Unlock() if globalServerConfig != nil { globalServerConfig[subSys] = s[subSys] } - return nil -} - -// autoGenerateRootCredentials generates root credentials deterministically if -// a KMS is configured, no manual credentials have been specified and if root -// access is disabled. -func autoGenerateRootCredentials() { - if GlobalKMS == nil { - return - } - if globalAPIConfig.permitRootAccess() || !globalActiveCred.Equal(auth.DefaultCredentials) { - return - } - - if manager, ok := GlobalKMS.(kms.KeyManager); ok { - stat, err := GlobalKMS.Stat(GlobalContext) - if err != nil { - logger.LogIf(GlobalContext, err, "Unable to generate root credentials using KMS") - return - } - - aKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root access key")) - if errors.Is(err, kes.ErrNotAllowed) { - return // If we don't have permission to compute the HMAC, don't change the cred. - } - if err != nil { - logger.Fatal(err, "Unable to generate root access key using KMS") - } - - sKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root secret key")) - if err != nil { - // Here, we must have permission. Otherwise, we would have failed earlier. - logger.Fatal(err, "Unable to generate root secret key using KMS") - } - - accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey)) - if err != nil { - logger.Fatal(err, "Unable to generate root access key") - } - secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey)) - if err != nil { - logger.Fatal(err, "Unable to generate root secret key") - } - - logger.Info("Automatically generated root access key and secret key with the KMS") - globalActiveCred = auth.Credentials{ - AccessKey: accessKey, - SecretKey: secretKey, - } + if len(errs) > 0 { + return errors.Join(errs...) } + return nil } // applyDynamicConfig will apply dynamic config values. diff --git a/cmd/config-current_test.go b/cmd/config-current_test.go index ceaae7f03890e..338cebbe8df16 100644 --- a/cmd/config-current_test.go +++ b/cmd/config-current_test.go @@ -26,7 +26,7 @@ import ( ) func TestServerConfig(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() objLayer, fsDir, err := prepareFS(ctx) @@ -39,8 +39,8 @@ func TestServerConfig(t *testing.T) { t.Fatalf("Init Test config failed") } - if globalSite.Region != globalMinioDefaultRegion { - t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region) + if globalSite.Region() != globalMinioDefaultRegion { + t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region()) } // Set new region and verify. @@ -52,11 +52,11 @@ func TestServerConfig(t *testing.T) { if err != nil { t.Fatal(err) } - if site.Region != "us-west-1" { - t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region) + if site.Region() != "us-west-1" { + t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region()) } - if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil { + if err := saveServerConfig(t.Context(), objLayer, globalServerConfig); err != nil { t.Fatalf("Unable to save updated config file %s", err) } diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index 380a58404ae8a..0cceb1859c2e2 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -33,17 +33,17 @@ import ( "github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/event/target" "github.com/minio/minio/internal/logger" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/quick" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/quick" ) // Save config file to corresponding backend -func Save(configFile string, data interface{}) error { +func Save(configFile string, data any) error { return quick.SaveConfig(data, configFile, globalEtcdClient) } // Load config from backend -func Load(configFile string, data interface{}) (quick.Config, error) { +func Load(configFile string, data any) (quick.Config, error) { return quick.LoadConfig(configFile, globalEtcdClient, data) } diff --git a/cmd/config-versions.go b/cmd/config-versions.go index 63012afeee110..020bfa4402c2f 100644 --- a/cmd/config-versions.go +++ b/cmd/config-versions.go @@ -27,7 +27,7 @@ import ( "github.com/minio/minio/internal/config/policy/opa" "github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/quick" + "github.com/minio/pkg/v3/quick" ) // FileLogger is introduced to workaround the dependency about logrus diff --git a/cmd/config.go b/cmd/config.go index 3a6dd69570443..7311d628fcffb 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -129,7 +129,7 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) return saveConfig(ctx, objAPI, historyFile, kv) } -func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg interface{}) error { +func saveServerConfig(ctx context.Context, objAPI ObjectLayer, cfg any) error { data, err := json.Marshal(cfg) if err != nil { return err diff --git a/cmd/consolelogger.go b/cmd/consolelogger.go index 5bb50c537a33b..18488e192a004 100644 --- a/cmd/consolelogger.go +++ b/cmd/consolelogger.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -20,16 +20,17 @@ package cmd import ( "container/ring" "context" + "io" "sync" "sync/atomic" "github.com/minio/madmin-go/v3" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger/target/console" - "github.com/minio/minio/internal/logger/target/types" + types "github.com/minio/minio/internal/logger/target/loggertypes" "github.com/minio/minio/internal/pubsub" - "github.com/minio/pkg/v2/logger/message/log" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // number of log messages to buffer @@ -49,10 +50,10 @@ type HTTPConsoleLoggerSys struct { // NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to // the console logging pub sub system -func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys { +func NewConsoleLogger(ctx context.Context, w io.Writer) *HTTPConsoleLoggerSys { return &HTTPConsoleLoggerSys{ pubsub: pubsub.New[log.Info, madmin.LogMask](8), - console: console.New(), + console: console.New(w), logBuf: ring.New(defaultLogBufferCount), } } @@ -100,7 +101,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st lastN = make([]log.Info, last) sys.RLock() - sys.logBuf.Do(func(p interface{}) { + sys.logBuf.Do(func(p any) { if p != nil { lg, ok := p.(log.Info) if ok && lg.SendLog(node, logKind) { @@ -112,7 +113,7 @@ func (sys *HTTPConsoleLoggerSys) Subscribe(subCh chan log.Info, doneCh <-chan st sys.RUnlock() // send last n console log messages in order filtered by node if cnt > 0 { - for i := 0; i < last; i++ { + for i := range last { entry := lastN[(cnt+i)%last] if (entry == log.Info{}) { continue @@ -154,7 +155,7 @@ func (sys *HTTPConsoleLoggerSys) Stats() types.TargetStats { // Content returns the console stdout log func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) { sys.RLock() - sys.logBuf.Do(func(p interface{}) { + sys.logBuf.Do(func(p any) { if p != nil { lg, ok := p.(log.Info) if ok { @@ -166,7 +167,7 @@ func (sys *HTTPConsoleLoggerSys) Content() (logs []log.Entry) { }) sys.RUnlock() - return + return logs } // Cancel - cancels the target @@ -180,7 +181,7 @@ func (sys *HTTPConsoleLoggerSys) Type() types.TargetType { // Send log message 'e' to console and publish to console // log pubsub system -func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry interface{}) error { +func (sys *HTTPConsoleLoggerSys) Send(ctx context.Context, entry any) error { var lg log.Info switch e := entry.(type) { case log.Entry: diff --git a/cmd/crossdomain-xml-handler.go b/cmd/crossdomain-xml-handler.go index 14856e6af88e2..78cd96525a819 100644 --- a/cmd/crossdomain-xml-handler.go +++ b/cmd/crossdomain-xml-handler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -32,10 +32,14 @@ const crossDomainXMLEntity = "/crossdomain.xml" // policy file that grants access to the source domain, allowing the client to continue the transaction. func setCrossDomainPolicyMiddleware(h http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + cxml := crossDomainXML + if globalServerCtxt.CrossDomainXML != "" { + cxml = globalServerCtxt.CrossDomainXML + } // Look for 'crossdomain.xml' in the incoming request. if r.URL.Path == crossDomainXMLEntity { // Write the standard cross domain policy xml. - w.Write([]byte(crossDomainXML)) + w.Write([]byte(cxml)) // Request completed, no need to serve to other handlers. return } diff --git a/cmd/data-scanner-metric.go b/cmd/data-scanner-metric.go index 87c70de84b0d3..db0bd21b93c6f 100644 --- a/cmd/data-scanner-metric.go +++ b/cmd/data-scanner-metric.go @@ -104,6 +104,20 @@ func (p *scannerMetrics) log(s scannerMetric, paths ...string) func(custom map[s } } +// time n scanner actions. +// Use for s < scannerMetricLastRealtime +func (p *scannerMetrics) timeN(s scannerMetric) func(n int) { + startTime := time.Now() + return func(n int) { + duration := time.Since(startTime) + + atomic.AddUint64(&p.operations[s], uint64(n)) + if s < scannerMetricLastRealtime { + p.latency[s].add(duration) + } + } +} + // time a scanner action. // Use for s < scannerMetricLastRealtime func (p *scannerMetrics) time(s scannerMetric) func() { @@ -182,7 +196,7 @@ func (p *scannerMetrics) currentPathUpdater(disk, initial string) (update func(p func (p *scannerMetrics) getCurrentPaths() []string { var res []string prefix := globalLocalNodeName + "/" - p.currentPaths.Range(func(key, value interface{}) bool { + p.currentPaths.Range(func(key, value any) bool { // We are a bit paranoid, but better miss an entry than crash. name, ok := key.(string) if !ok { @@ -205,7 +219,7 @@ func (p *scannerMetrics) getCurrentPaths() []string { // (since this is concurrent it may not be 100% reliable) func (p *scannerMetrics) activeDrives() int { var i int - p.currentPaths.Range(func(k, v interface{}) bool { + p.currentPaths.Range(func(k, v any) bool { i++ return true }) @@ -283,7 +297,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics { m.CollectedAt = time.Now() m.ActivePaths = p.getCurrentPaths() m.LifeTimeOps = make(map[string]uint64, scannerMetricLast) - for i := scannerMetric(0); i < scannerMetricLast; i++ { + for i := range scannerMetricLast { if n := atomic.LoadUint64(&p.operations[i]); n > 0 { m.LifeTimeOps[i.String()] = n } @@ -293,7 +307,7 @@ func (p *scannerMetrics) report() madmin.ScannerMetrics { } m.LastMinute.Actions = make(map[string]madmin.TimedAction, scannerMetricLastRealtime) - for i := scannerMetric(0); i < scannerMetricLastRealtime; i++ { + for i := range scannerMetricLastRealtime { lm := p.lastMinute(i) if lm.N > 0 { m.LastMinute.Actions[i.String()] = lm.asTimedAction() diff --git a/cmd/data-scanner.go b/cmd/data-scanner.go index 505f51dc12d01..288f4d7162c4a 100644 --- a/cmd/data-scanner.go +++ b/cmd/data-scanner.go @@ -31,6 +31,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "github.com/minio/madmin-go/v3" @@ -41,8 +42,7 @@ import ( "github.com/minio/minio/internal/config/heal" "github.com/minio/minio/internal/event" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" + "github.com/minio/pkg/v3/console" uatomic "go.uber.org/atomic" ) @@ -52,7 +52,7 @@ const ( dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch. dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch. dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder. - dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level). + dataScannerForceCompactAtFolders = 250_000 // Compact when this many subfolders in a single folder (even top level). dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles. healDeleteDangling = true @@ -63,11 +63,12 @@ var ( globalHealConfig heal.Config // Sleeper values are updated when config is loaded. - scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults - scannerCycle = uatomic.NewDuration(dataScannerStartDelay) - scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle - scannerExcessObjectVersions = uatomic.NewInt64(100) - scannerExcessFolders = uatomic.NewInt64(50000) + scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults + scannerCycle = uatomic.NewDuration(dataScannerStartDelay) + scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle + scannerExcessObjectVersions = uatomic.NewInt64(100) + scannerExcessObjectVersionsTotalSize = uatomic.NewInt64(1024 * 1024 * 1024 * 1024) // 1 TB + scannerExcessFolders = uatomic.NewInt64(50000) ) // initDataScanner will start the scanner in the background. @@ -77,11 +78,9 @@ func initDataScanner(ctx context.Context, objAPI ObjectLayer) { // Run the data scanner in a loop for { runDataScanner(ctx, objAPI) - duration := time.Duration(r.Float64() * float64(scannerCycle.Load())) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(scannerCycle.Load())), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } }() @@ -122,13 +121,13 @@ func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundH buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath) if err != nil { if !errors.Is(err, errConfigNotFound) { - logger.LogOnceIf(ctx, err, backgroundHealInfoPath) + internalLogOnceIf(ctx, err, backgroundHealInfoPath) } return backgroundHealInfo{} } var info backgroundHealInfo if err = json.Unmarshal(buf, &info); err != nil { - logger.LogOnceIf(ctx, err, backgroundHealInfoPath) + bugLogIf(ctx, err, backgroundHealInfoPath) } return info } @@ -140,13 +139,13 @@ func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgr b, err := json.Marshal(info) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } // Get last healing information err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b) if err != nil { - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } } @@ -167,7 +166,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { cycleInfo.next = binary.LittleEndian.Uint64(buf[:8]) buf = buf[8:] _, err := cycleInfo.UnmarshalMsg(buf) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) } scannerTimer := time.NewTimer(scannerCycle.Load()) @@ -204,7 +203,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { results := make(chan DataUsageInfo, 1) go storeDataUsageInBackend(ctx, objAPI, results) err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode) - logger.LogOnceIf(ctx, err, "ns-scanner") + scannerLogIf(ctx, err) res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)} if err != nil { res["error"] = err.Error() @@ -224,7 +223,9 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) { binary.LittleEndian.PutUint64(tmp, cycleInfo.next) tmp, _ = cycleInfo.MarshalMsg(tmp) err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp) - logger.LogOnceIf(ctx, err, dataUsageBloomNamePath) + if err != nil { + scannerLogIf(ctx, fmt.Errorf("%w, Object %s", err, dataUsageBloomNamePath)) + } } } } @@ -247,7 +248,8 @@ type folderScanner struct { healObjectSelect uint32 // Do a heal check on an object once every n cycles. Must divide into healFolderInclude scanMode madmin.HealScanMode - weSleep func() bool + weSleep func() bool + shouldHeal func() bool disks []StorageAPI disksQuorum int @@ -302,11 +304,12 @@ type folderScanner struct { // The returned cache will always be valid, but may not be updated from the existing. // Before each operation sleepDuration is called which can be used to temporarily halt the scanner. // If the supplied context is canceled the function will return at the first chance. -func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, cache dataUsageCache, getSize getSizeFn, scanMode madmin.HealScanMode, weSleep func() bool) (dataUsageCache, error) { +func scanDataFolder(ctx context.Context, disks []StorageAPI, drive *xlStorage, cache dataUsageCache, getSize getSizeFn, scanMode madmin.HealScanMode, weSleep func() bool) (dataUsageCache, error) { switch cache.Info.Name { case "", dataUsageRoot: return cache, errors.New("internal error: root scan attempted") } + basePath := drive.drivePath updatePath, closeDisk := globalScannerMetrics.currentPathUpdater(basePath, cache.Info.Name) defer closeDisk() @@ -326,6 +329,26 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca disksQuorum: len(disks) / 2, } + var skipHeal atomic.Bool + if !globalIsErasure || cache.Info.SkipHealing { + skipHeal.Store(true) + } + + // Check if we should do healing at all. + s.shouldHeal = func() bool { + if skipHeal.Load() { + return false + } + if s.healObjectSelect == 0 { + return false + } + if di, _ := drive.DiskInfo(ctx, DiskInfoOptions{}); di.Healing { + skipHeal.Store(true) + return false + } + return true + } + // Enable healing in XL mode. if globalIsErasure && !cache.Info.SkipHealing { // Do a heal check on an object once every n cycles. Must divide into healFolderInclude @@ -347,6 +370,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca // No useful information... return cache, err } + s.newCache.forceCompact(dataScannerCompactAtChildren) s.newCache.Info.LastUpdate = UTCNow() s.newCache.Info.NextCycle = cache.Info.NextCycle return s.newCache, nil @@ -479,14 +503,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int replication: replicationCfg, } - item.heal.enabled = thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && globalIsErasure + item.heal.enabled = thisHash.modAlt(f.oldCache.Info.NextCycle/folder.objectHealProbDiv, f.healObjectSelect/folder.objectHealProbDiv) && f.shouldHeal() item.heal.bitrot = f.scanMode == madmin.HealDeepScan - // if the drive belongs to an erasure set - // that is already being healed, skip the - // healing attempt on this drive. - item.heal.enabled = item.heal.enabled && f.healObjectSelect > 0 - sz, err := f.getSize(item) if err != nil && err != errIgnoreFileContrib { wait() // wait to proceed to next entry. @@ -546,7 +565,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int APIName: "Scanner", Bucket: f.root, Object: prefixName, - Tags: map[string]interface{}{ + Tags: map[string]string{ "x-minio-prefixes-total": strconv.Itoa(totalFolders), }, }) @@ -641,6 +660,12 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int into.addChild(h) continue } + // Adjust the probability of healing. + // This first removes lowest x from the mod check and makes it x times more likely. + // So if duudc = 10 and we want heal check every 50 cycles, we check + // if (cycle/10) % (50/10) == 0, which would make heal checks run once every 50 cycles, + // if the objects are pre-selected as 1:10. + folder.objectHealProbDiv = dataUsageUpdateDirCycles } f.updateCurrentPath(folder.name) stopFn := globalScannerMetrics.log(scannerMetricScanFolder, f.root, folder.name) @@ -649,7 +674,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } // Scan for healing - if f.healObjectSelect == 0 || len(abandonedChildren) == 0 { + if len(abandonedChildren) == 0 || !f.shouldHeal() { // If we are not heal scanning, return now. break } @@ -684,6 +709,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int healObjectsPrefix := color.Green("healObjects:") for k := range abandonedChildren { + if !f.shouldHeal() { + break + } bucket, prefix := path2BucketObject(k) stopFn := globalScannerMetrics.time(scannerMetricCheckMissing) f.updateCurrentPath(k) @@ -717,6 +745,10 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int }, // Some disks have data for this. partial: func(entries metaCacheEntries, errs []error) { + if !f.shouldHeal() { + cancel() + return + } entry, ok := entries.resolve(&resolver) if !ok { // check if we can get one entry at least @@ -752,7 +784,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int versionID: "", }, madmin.HealItemObject) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogOnceIf(ctx, err, entry.name) + scannerLogIf(ctx, err) } foundObjs = foundObjs || err == nil return @@ -769,7 +801,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int }, madmin.HealItemObject) stopFn(int(ver.Size)) if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogOnceIf(ctx, err, fiv.Name) + if err != nil { + scannerLogIf(ctx, fmt.Errorf("%w, Object %s/%s/%s", err, bucket, fiv.Name, ver.VersionID)) + } } if err == nil { successVersions++ @@ -826,8 +860,8 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } } } - } + if compact { stop := globalScannerMetrics.log(scannerMetricCompactFolder, folder.name) f.newCache.deleteRecursive(thisHash) @@ -841,7 +875,6 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int } stop(total) } - } // Compact if too many children... if !into.Compacted { @@ -914,10 +947,7 @@ func (i *scannerItem) transformMetaDir() { i.objectName = split[len(split)-1] } -var ( - applyActionsLogPrefix = color.Green("applyActions:") - applyVersionActionsLogPrefix = color.Green("applyVersionActions:") -) +var applyActionsLogPrefix = color.Green("applyActions:") func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi ObjectInfo) (size int64) { if i.debug { @@ -942,129 +972,8 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object return 0 } -func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) { - size, err := oi.GetActualSize() - if i.debug { - logger.LogIf(ctx, err) - } - if i.lifeCycle == nil { - return action, size - } - - versionID := oi.VersionID - vcfg, _ := globalBucketVersioningSys.Get(i.bucket) - rCfg, _ := globalBucketObjectLockSys.Get(i.bucket) - replcfg, _ := getReplicationConfig(ctx, i.bucket) - lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi) - if i.debug { - if versionID != "" { - console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action) - } else { - console.Debugf(applyActionsLogPrefix+" lifecycle: %q Initial scan: %v\n", i.objectPath(), lcEvt.Action) - } - } - - switch lcEvt.Action { - // This version doesn't contribute towards sizeS only when it is permanently deleted. - // This can happen when, - // - ExpireObjectAllVersions flag is enabled - // - NoncurrentVersionExpiration is applicable - case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction: - size = 0 - case lifecycle.DeleteAction: - // On a non-versioned bucket, DeleteObject removes the only version permanently. - if !vcfg.PrefixEnabled(oi.Name) { - size = 0 - } - } - - applyLifecycleAction(lcEvt, lcEventSrc_Scanner, oi) - return lcEvt.Action, size -} - -// applyNewerNoncurrentVersionLimit removes noncurrent versions older than the most recent NewerNoncurrentVersions configured. -// Note: This function doesn't update sizeSummary since it always removes versions that it doesn't return. -func (i *scannerItem) applyNewerNoncurrentVersionLimit(ctx context.Context, _ ObjectLayer, fivs []FileInfo, expState *expiryState) ([]ObjectInfo, error) { - done := globalScannerMetrics.time(scannerMetricApplyNonCurrent) - defer done() - - rcfg, _ := globalBucketObjectLockSys.Get(i.bucket) - vcfg, _ := globalBucketVersioningSys.Get(i.bucket) - - versioned := vcfg != nil && vcfg.Versioned(i.objectPath()) - - objectInfos := make([]ObjectInfo, 0, len(fivs)) - - if i.lifeCycle == nil { - for _, fi := range fivs { - objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned)) - } - return objectInfos, nil - } - - event := i.lifeCycle.NoncurrentVersionsExpirationLimit(lifecycle.ObjectOpts{Name: i.objectPath()}) - lim := event.NewerNoncurrentVersions - if lim == 0 || len(fivs) <= lim+1 { // fewer than lim _noncurrent_ versions - for _, fi := range fivs { - objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned)) - } - return objectInfos, nil - } - - overflowVersions := fivs[lim+1:] - // Retain the current version + most recent lim noncurrent versions - for _, fi := range fivs[:lim+1] { - objectInfos = append(objectInfos, fi.ToObjectInfo(i.bucket, i.objectPath(), versioned)) - } - - toDel := make([]ObjectToDelete, 0, len(overflowVersions)) - for _, fi := range overflowVersions { - obj := fi.ToObjectInfo(i.bucket, i.objectPath(), versioned) - // skip versions with object locking enabled - if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) { - if i.debug { - if obj.VersionID != "" { - console.Debugf(applyVersionActionsLogPrefix+" lifecycle: %s v(%s) is locked, not deleting\n", obj.Name, obj.VersionID) - } else { - console.Debugf(applyVersionActionsLogPrefix+" lifecycle: %s is locked, not deleting\n", obj.Name) - } - } - // add this version back to remaining versions for - // subsequent lifecycle policy applications - objectInfos = append(objectInfos, obj) - continue - } - - // NoncurrentDays not passed yet. - if time.Now().UTC().Before(lifecycle.ExpectedExpiryTime(obj.SuccessorModTime, event.NoncurrentDays)) { - // add this version back to remaining versions for - // subsequent lifecycle policy applications - objectInfos = append(objectInfos, obj) - continue - } - - toDel = append(toDel, ObjectToDelete{ - ObjectV: ObjectV{ - ObjectName: obj.Name, - VersionID: obj.VersionID, - }, - }) - } - - expState.enqueueByNewerNoncurrent(i.bucket, toDel, event) - return objectInfos, nil -} - -// applyVersionActions will apply lifecycle checks on all versions of a scanned item. Returns versions that remain -// after applying lifecycle checks configured. -func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fivs []FileInfo, expState *expiryState) ([]ObjectInfo, error) { - objInfos, err := i.applyNewerNoncurrentVersionLimit(ctx, o, fivs, expState) - if err != nil { - return nil, err - } - - // Check if we have many versions after applyNewerNoncurrentVersionLimit. - if len(objInfos) > int(scannerExcessObjectVersions.Load()) { +func (i *scannerItem) alertExcessiveVersions(remainingVersions int, cumulativeSize int64) { + if remainingVersions >= int(scannerExcessObjectVersions.Load()) { // Notify object accessed via a GET request. sendEvent(eventArgs{ EventName: event.ObjectManyVersions, @@ -1074,7 +983,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi }, UserAgent: "Scanner", Host: globalLocalNodeName, - RespElements: map[string]string{"x-minio-versions": strconv.Itoa(len(objInfos))}, + RespElements: map[string]string{"x-minio-versions": strconv.Itoa(remainingVersions)}, }) auditLogInternal(context.Background(), AuditLogOptions{ @@ -1082,56 +991,174 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi APIName: "Scanner", Bucket: i.bucket, Object: i.objectPath(), - Tags: map[string]interface{}{ - "x-minio-versions": strconv.Itoa(len(objInfos)), + Tags: map[string]string{ + "x-minio-versions": strconv.Itoa(remainingVersions), }, }) } - return objInfos, nil + // Check if the cumulative size of all versions of this object is high. + if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() { + // Notify object accessed via a GET request. + sendEvent(eventArgs{ + EventName: event.ObjectLargeVersions, + BucketName: i.bucket, + Object: ObjectInfo{ + Name: i.objectPath(), + }, + UserAgent: "Scanner", + Host: globalLocalNodeName, + RespElements: map[string]string{ + "x-minio-versions-count": strconv.Itoa(remainingVersions), + "x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10), + }, + }) + + auditLogInternal(context.Background(), AuditLogOptions{ + Event: "scanner:largeversions", + APIName: "Scanner", + Bucket: i.bucket, + Object: i.objectPath(), + Tags: map[string]string{ + "x-minio-versions-count": strconv.Itoa(remainingVersions), + "x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10), + }, + }) + } } +type actionsAccountingFn func(oi ObjectInfo, sz, actualSz int64, sizeS *sizeSummary) + // applyActions will apply lifecycle checks on to a scanned item. // The resulting size on disk will always be returned. // The metadata will be compared to consensus on the object layer before any changes are applied. // If no metadata is supplied, -1 is returned if no action is taken. -func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) (objDeleted bool, size int64) { - done := globalScannerMetrics.time(scannerMetricILM) - var action lifecycle.Action - action, size = i.applyLifecycle(ctx, o, oi) - done() - - // Note: objDeleted is true if and only if action == - // lifecycle.DeleteAllVersionsAction - if action == lifecycle.DeleteAllVersionsAction { - return true, 0 +func (i *scannerItem) applyActions(ctx context.Context, objAPI ObjectLayer, objInfos []ObjectInfo, lr lock.Retention, sizeS *sizeSummary, fn actionsAccountingFn) { + if len(objInfos) == 0 { + return } - - // For instance, an applied lifecycle means we remove/transitioned an object - // from the current deployment, which means we don't have to call healing - // routine even if we are asked to do via heal flag. - if action == lifecycle.NoneAction { + healActions := func(oi ObjectInfo, actualSz int64) int64 { + size := actualSz if i.heal.enabled { done := globalScannerMetrics.time(scannerMetricHealCheck) - size = i.applyHealing(ctx, o, oi) + size = i.applyHealing(ctx, objAPI, oi) done() if healDeleteDangling { done := globalScannerMetrics.time(scannerMetricCleanAbandoned) - err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling}) + err := objAPI.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling}) done() if err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath()) + healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath()) } } } // replicate only if lifecycle rules are not applied. done := globalScannerMetrics.time(scannerMetricCheckReplication) - i.healReplication(ctx, o, oi.Clone(), sizeS) + i.healReplication(ctx, oi.Clone(), sizeS) done() + return size + } + + vc, err := globalBucketVersioningSys.Get(i.bucket) + if err != nil { + scannerLogOnceIf(ctx, err, i.bucket) + return + } + + // start ILM check timer + done := globalScannerMetrics.timeN(scannerMetricILM) + if i.lifeCycle == nil { // no ILM configured, apply healing and replication checks + var cumulativeSize int64 + for _, oi := range objInfos { + actualSz, err := oi.GetActualSize() + if err != nil { + scannerLogIf(ctx, err) + continue + } + size := healActions(oi, actualSz) + if fn != nil { // call accountingfn + fn(oi, size, actualSz, sizeS) + } + cumulativeSize += size + } + // end ILM check timer + done(len(objInfos)) + i.alertExcessiveVersions(len(objInfos), cumulativeSize) + return + } + objOpts := make([]lifecycle.ObjectOpts, len(objInfos)) + for i, oi := range objInfos { + objOpts[i] = oi.ToLifecycleOpts() + } + evaluator := lifecycle.NewEvaluator(*i.lifeCycle).WithLockRetention(&lr).WithReplicationConfig(i.replication.Config) + events, err := evaluator.Eval(objOpts) + if err != nil { + // This error indicates that the objOpts passed to Eval is invalid. + bugLogIf(ctx, err, i.bucket, i.objectPath()) + done(len(objInfos)) // end ILM check timer + return + } + done(len(objInfos)) // end ILM check timer + + var ( + toDel []ObjectToDelete + noncurrentEvents []lifecycle.Event + cumulativeSize int64 + ) + remainingVersions := len(objInfos) +eventLoop: + for idx, event := range events { + oi := objInfos[idx] + actualSz, err := oi.GetActualSize() + if i.debug { + scannerLogIf(ctx, err) + } + size := actualSz + switch event.Action { + case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction: + remainingVersions = 0 + applyExpiryRule(event, lcEventSrc_Scanner, oi) + break eventLoop + + case lifecycle.DeleteAction, lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction: + if !vc.PrefixEnabled(i.objectPath()) && event.Action == lifecycle.DeleteAction { + remainingVersions-- + size = 0 + } + applyExpiryRule(event, lcEventSrc_Scanner, oi) + + case lifecycle.DeleteVersionAction: // noncurrent versions expiration + opts := objOpts[idx] + remainingVersions-- + size = 0 + toDel = append(toDel, ObjectToDelete{ + ObjectV: ObjectV{ + ObjectName: opts.Name, + VersionID: opts.VersionID, + }, + }) + noncurrentEvents = append(noncurrentEvents, event) + + case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: + applyTransitionRule(event, lcEventSrc_Scanner, oi) + + case lifecycle.NoneAction: + size = healActions(oi, actualSz) + } + // NB fn must be called for every object version except if it is + // expired or was a dangling object. + if fn != nil { + fn(oi, size, actualSz, sizeS) + } + cumulativeSize += size } - return false, size + + if len(toDel) > 0 { + globalExpiryState.enqueueNoncurrentVersions(i.bucket, toDel, noncurrentEvents) + } + i.alertExcessiveVersions(remainingVersions, cumulativeSize) } func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr lock.Retention, rcfg *replication.Config, obj ObjectInfo) lifecycle.Event { @@ -1140,17 +1167,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action) } - if event.Action == lifecycle.NoneAction { - return event - } - - if obj.IsLatest && event.Action == lifecycle.DeleteAllVersionsAction { - if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) { + switch event.Action { + case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction: + // Skip if bucket has object locking enabled; To prevent the + // possibility of violating an object retention on one of the + // noncurrent versions of this object. + if lr.LockEnabled { return lifecycle.Event{Action: lifecycle.NoneAction} } - } - switch event.Action { case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction: // Defensive code, should never happen if obj.VersionID == "" { @@ -1175,36 +1200,29 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc } func applyTransitionRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) bool { - if obj.DeleteMarker { + if obj.DeleteMarker || obj.IsDir { return false } globalTransitionState.queueTransitionTask(obj, event, src) return true } -func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool { - var err error - defer func() { - if err != nil { - return - } - // Note: DeleteAllVersions action is not supported for - // transitioned objects - globalScannerMetrics.timeILM(lcEvent.Action)(1) - }() - - if err = expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil { +func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) { + timeILM := globalScannerMetrics.timeILM(lcEvent.Action) + if err := expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { return false } - logger.LogOnceIf(ctx, err, obj.Name) + ilmLogIf(ctx, fmt.Errorf("expireTransitionedObject(%s, %s): %w", obj.Bucket, obj.Name, err)) return false } + timeILM(1) + // Notification already sent in *expireTransitionedObject*, just return 'true' here. return true } -func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool { +func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) { traceFn := globalLifecycleSys.trace(obj) opts := ObjectOptions{ Expiration: ExpirationOptions{Expire: true}, @@ -1219,32 +1237,39 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay if lcEvent.Action.DeleteAll() { opts.DeletePrefix = true + // use prefix delete on exact object (this is an optimization to avoid fan-out calls) + opts.DeletePrefixObject = true } var ( dobj ObjectInfo err error ) + + timeILM := globalScannerMetrics.timeILM(lcEvent.Action) defer func() { - if err != nil { + if !ok { return } if lcEvent.Action != lifecycle.NoneAction { numVersions := uint64(1) - if lcEvent.Action == lifecycle.DeleteAllVersionsAction { + if lcEvent.Action.DeleteAll() { numVersions = uint64(obj.NumVersions) } - globalScannerMetrics.timeILM(lcEvent.Action)(numVersions) + timeILM(numVersions) } }() - dobj, err = objLayer.DeleteObject(ctx, obj.Bucket, obj.Name, opts) + dobj, err = objLayer.DeleteObject(ctx, obj.Bucket, encodeDirObject(obj.Name), opts) if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { + traceFn(ILMExpiry, nil, nil) return false } // Assume it is still there. - logger.LogOnceIf(ctx, err, "non-transition-expiry") + err := fmt.Errorf("DeleteObject(%s, %s): %w", obj.Bucket, obj.Name, err) + ilmLogOnceIf(ctx, err, "non-transition-expiry"+obj.Name) + traceFn(ILMExpiry, nil, err) return false } if dobj.Name == "" { @@ -1252,6 +1277,8 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay } tags := newLifecycleAuditEvent(src, lcEvent).Tags() + tags["version-id"] = dobj.VersionID + // Send audit for the lifecycle delete operation auditLogLifecycle(ctx, dobj, ILMExpiry, tags, traceFn) @@ -1259,7 +1286,12 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay if obj.DeleteMarker { eventName = event.ObjectRemovedDeleteMarkerCreated } - + switch lcEvent.Action { + case lifecycle.DeleteAllVersionsAction: + eventName = event.ObjectRemovedDeleteAllVersions + case lifecycle.DelMarkerDeleteAllVersionsAction: + eventName = event.ILMDelMarkerExpirationDelete + } // Notify object deleted event. sendEvent(eventArgs{ EventName: eventName, @@ -1273,22 +1305,8 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay } // Apply object, object version, restored object or restored object version action on the given object -func applyExpiryRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) bool { +func applyExpiryRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) { globalExpiryState.enqueueByDays(obj, event, src) - return true -} - -// Perform actions (removal or transitioning of objects), return true the action is successfully performed -func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo) (success bool) { - switch action := event.Action; action { - case lifecycle.DeleteVersionAction, lifecycle.DeleteAction, - lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction, - lifecycle.DeleteAllVersionsAction: - success = applyExpiryRule(event, src, obj) - case lifecycle.TransitionAction, lifecycle.TransitionVersionAction: - success = applyTransitionRule(event, src, obj) - } - return } // objectPath returns the prefix and object name. @@ -1297,7 +1315,7 @@ func (i *scannerItem) objectPath() string { } // healReplication will heal a scanned item that has failed replication. -func (i *scannerItem) healReplication(ctx context.Context, o ObjectLayer, oi ObjectInfo, sizeS *sizeSummary) { +func (i *scannerItem) healReplication(ctx context.Context, oi ObjectInfo, sizeS *sizeSummary) { if oi.VersionID == "" { return } @@ -1383,48 +1401,7 @@ func (d *dynamicSleeper) Timer(ctx context.Context) func() { t := time.Now() return func() { doneAt := time.Now() - for { - // Grab current values - d.mu.RLock() - minWait, maxWait := d.minSleep, d.maxSleep - factor := d.factor - cycle := d.cycle - d.mu.RUnlock() - elapsed := doneAt.Sub(t) - // Don't sleep for really small amount of time - wantSleep := time.Duration(float64(elapsed) * factor) - if wantSleep <= minWait { - return - } - if maxWait > 0 && wantSleep > maxWait { - wantSleep = maxWait - } - timer := time.NewTimer(wantSleep) - select { - case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } - if d.isScanner { - globalScannerMetrics.incTime(scannerMetricYield, wantSleep) - } - return - case <-timer.C: - if d.isScanner { - globalScannerMetrics.incTime(scannerMetricYield, wantSleep) - } - return - case <-cycle: - if !timer.Stop() { - // We expired. - <-timer.C - if d.isScanner { - globalScannerMetrics.incTime(scannerMetricYield, wantSleep) - } - return - } - } - } + d.Sleep(ctx, doneAt.Sub(t)) } } @@ -1499,7 +1476,7 @@ const ( ILMTransition = " ilm:transition" ) -func auditLogLifecycle(ctx context.Context, oi ObjectInfo, event string, tags map[string]interface{}, traceFn func(event string)) { +func auditLogLifecycle(ctx context.Context, oi ObjectInfo, event string, tags map[string]string, traceFn func(event string, metadata map[string]string, err error)) { var apiName string switch event { case ILMExpiry: @@ -1517,5 +1494,5 @@ func auditLogLifecycle(ctx context.Context, oi ObjectInfo, event string, tags ma VersionID: oi.VersionID, Tags: tags, }) - traceFn(event) + traceFn(event, tags, nil) } diff --git a/cmd/data-scanner_test.go b/cmd/data-scanner_test.go index a55007a67d855..c45ad1e79aa0a 100644 --- a/cmd/data-scanner_test.go +++ b/cmd/data-scanner_test.go @@ -18,58 +18,67 @@ package cmd import ( - "context" "encoding/xml" + "fmt" + "slices" + "strings" "sync" "testing" "time" "github.com/google/uuid" + "github.com/minio/minio/internal/amztime" "github.com/minio/minio/internal/bucket/lifecycle" + objectlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/bucket/versioning" + xhttp "github.com/minio/minio/internal/http" ) func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) { - objAPI, disks, err := prepareErasure(context.Background(), 8) + // Prepare object layer + objAPI, disks, err := prepareErasure(t.Context(), 8) if err != nil { t.Fatalf("Failed to initialize object layer: %v", err) } defer removeRoots(disks) setObjectLayer(objAPI) + + // Prepare bucket metadata globalBucketMetadataSys = NewBucketMetadataSys() globalBucketObjectLockSys = &BucketObjectLockSys{} globalBucketVersioningSys = &BucketVersioningSys{} - es := newExpiryState(context.Background(), objAPI, 0) - workers := []chan expiryOp{make(chan expiryOp)} - es.workers.Store(&workers) - globalExpiryState = es - var wg sync.WaitGroup - wg.Add(1) - expired := make([]ObjectToDelete, 0, 5) - go func() { - defer wg.Done() - workers := globalExpiryState.workers.Load() - for t := range (*workers)[0] { - if t, ok := t.(newerNoncurrentTask); ok { - expired = append(expired, t.versions...) - } - } - }() - lc := lifecycle.Lifecycle{ - Rules: []lifecycle.Rule{ - { - ID: "max-versions", - Status: "Enabled", - NoncurrentVersionExpiration: lifecycle.NoncurrentVersionExpiration{ - NewerNoncurrentVersions: 1, - }, - }, - }, - } - lcXML, err := xml.Marshal(lc) + + lcXML := ` + + + max-versions + Enabled + + 2 + + + + delete-all-versions + Enabled + + + del-all + true + + + + 1 + true + + + +` + lc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(lcXML)) if err != nil { - t.Fatalf("Failed to marshal lifecycle config: %v", err) + t.Fatalf("Failed to unmarshal lifecycle config: %v", err) } + vcfg := versioning.Versioning{ Status: "Enabled", } @@ -79,34 +88,46 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) { } bucket := "bucket" - obj := "obj-1" now := time.Now() meta := BucketMetadata{ Name: bucket, Created: now, - LifecycleConfigXML: lcXML, + LifecycleConfigXML: []byte(lcXML), VersioningConfigXML: vcfgXML, VersioningConfigUpdatedAt: now, LifecycleConfigUpdatedAt: now, - lifecycleConfig: &lc, + lifecycleConfig: lc, versioningConfig: &vcfg, } globalBucketMetadataSys.Set(bucket, meta) - item := scannerItem{ - Path: obj, - bucket: bucket, - prefix: "", - objectName: obj, - lifeCycle: &lc, - } + // Prepare lifecycle expiration workers + es := newExpiryState(t.Context(), objAPI, 0) + globalExpiryState = es - modTime := time.Now() + // Prepare object versions + obj := "obj-1" + // Simulate objects uploaded 30 hours ago + modTime := now.Add(-48 * time.Hour) uuids := make([]uuid.UUID, 5) for i := range uuids { uuids[i] = uuid.UUID([16]byte{15: uint8(i + 1)}) } fivs := make([]FileInfo, 5) - for i := 0; i < 5; i++ { + objInfos := make([]ObjectInfo, 5) + objRetentionMeta := make(map[string]string) + objRetentionMeta[strings.ToLower(xhttp.AmzObjectLockMode)] = string(objectlock.RetCompliance) + // Set retain until date 12 hours into the future + objRetentionMeta[strings.ToLower(xhttp.AmzObjectLockRetainUntilDate)] = amztime.ISO8601Format(now.Add(12 * time.Hour)) + /* + objInfos: + version stack for obj-1 + v5 uuid-5 modTime + v4 uuid-4 modTime -1m + v3 uuid-3 modTime -2m + v2 uuid-2 modTime -3m + v1 uuid-1 modTime -4m + */ + for i := range 5 { fivs[i] = FileInfo{ Volume: bucket, Name: obj, @@ -116,28 +137,271 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) { Size: 1 << 10, NumVersions: 5, } + objInfos[i] = fivs[i].ToObjectInfo(bucket, obj, true) } - versioned := vcfg.Status == "Enabled" - wants := make([]ObjectInfo, 2) - for i, fi := range fivs[:2] { - wants[i] = fi.ToObjectInfo(bucket, obj, versioned) + /* + lrObjInfos: objInfos with following modifications + version stack for obj-1 + v2 uuid-2 modTime -3m objRetentionMeta + */ + lrObjInfos := slices.Clone(objInfos) + lrObjInfos[3].UserDefined = objRetentionMeta + var lrWants []ObjectInfo + lrWants = append(lrWants, lrObjInfos[:4]...) + + /* + replObjInfos: objInfos with following modifications + version stack for obj-1 + v1 uuid-1 modTime -4m "VersionPurgeStatus: replication.VersionPurgePending" + */ + replObjInfos := slices.Clone(objInfos) + replObjInfos[4].VersionPurgeStatus = replication.VersionPurgePending + var replWants []ObjectInfo + replWants = append(replWants, replObjInfos[:3]...) + replWants = append(replWants, replObjInfos[4]) + + allVersExpObjInfos := slices.Clone(objInfos) + allVersExpObjInfos[0].UserTags = "del-all=true" + + replCfg := replication.Config{ + Rules: []replication.Rule{ + { + ID: "", + Status: "Enabled", + Priority: 1, + Destination: replication.Destination{ + ARN: "arn:minio:replication:::dest-bucket", + Bucket: "dest-bucket", + }, + }, + }, + } + lr := objectlock.Retention{ + Mode: objectlock.RetCompliance, + Validity: 12 * time.Hour, + LockEnabled: true, } - gots, err := item.applyNewerNoncurrentVersionLimit(context.TODO(), objAPI, fivs, es) + + expiryWorker := func(wg *sync.WaitGroup, readyCh chan<- struct{}, taskCh <-chan expiryOp, gotExpired *[]ObjectToDelete) { + defer wg.Done() + // signal the calling goroutine that the worker is ready tor receive tasks + close(readyCh) + var expired []ObjectToDelete + for t := range taskCh { + switch v := t.(type) { + case noncurrentVersionsTask: + expired = append(expired, v.versions...) + case expiryTask: + expired = append(expired, ObjectToDelete{ + ObjectV: ObjectV{ + ObjectName: v.objInfo.Name, + VersionID: v.objInfo.VersionID, + }, + }) + } + } + if len(expired) > 0 { + *gotExpired = expired + } + } + tests := []struct { + replCfg replicationConfig + lr objectlock.Retention + objInfos []ObjectInfo + wants []ObjectInfo + wantExpired []ObjectToDelete + }{ + { + // With replication configured, version(s) with PENDING purge status + replCfg: replicationConfig{Config: &replCfg}, + objInfos: replObjInfos, + wants: replWants, + wantExpired: []ObjectToDelete{ + {ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[3].VersionID}}, + }, + }, + { + // With lock retention configured and version(s) with retention metadata + lr: lr, + objInfos: lrObjInfos, + wants: lrWants, + wantExpired: []ObjectToDelete{ + {ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[4].VersionID}}, + }, + }, + { + // With replication configured, but no versions with PENDING purge status + replCfg: replicationConfig{Config: &replCfg}, + objInfos: objInfos, + wants: objInfos[:3], + wantExpired: []ObjectToDelete{ + {ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[3].VersionID}}, + {ObjectV: ObjectV{ObjectName: obj, VersionID: objInfos[4].VersionID}}, + }, + }, + { + objInfos: allVersExpObjInfos, + wants: nil, + wantExpired: []ObjectToDelete{{ObjectV: ObjectV{ObjectName: obj, VersionID: allVersExpObjInfos[0].VersionID}}}, + }, + { + // When no versions are present, in practice this could be an object with only free versions + objInfos: nil, + wants: nil, + wantExpired: nil, + }, + } + for i, test := range tests { + t.Run(fmt.Sprintf("TestApplyNewerNoncurrentVersionsLimit-%d", i), func(t *testing.T) { + workers := []chan expiryOp{make(chan expiryOp)} + es.workers.Store(&workers) + workerReady := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + var gotExpired []ObjectToDelete + go expiryWorker(&wg, workerReady, workers[0], &gotExpired) + <-workerReady + + item := scannerItem{ + Path: obj, + bucket: bucket, + prefix: "", + objectName: obj, + lifeCycle: lc, + replication: test.replCfg, + } + + var ( + sizeS sizeSummary + gots []ObjectInfo + ) + item.applyActions(t.Context(), objAPI, test.objInfos, test.lr, &sizeS, func(oi ObjectInfo, sz, _ int64, _ *sizeSummary) { + if sz != 0 { + gots = append(gots, oi) + } + }) + + if len(gots) != len(test.wants) { + t.Fatalf("Expected %d objects but got %d", len(test.wants), len(gots)) + } + if slices.CompareFunc(gots, test.wants, func(g, w ObjectInfo) int { + if g.VersionID == w.VersionID { + return 0 + } + return -1 + }) != 0 { + t.Fatalf("Expected %v but got %v", test.wants, gots) + } + // verify the objects to be deleted + close(workers[0]) + wg.Wait() + if len(gotExpired) != len(test.wantExpired) { + t.Fatalf("Expected expiry of %d objects but got %d", len(test.wantExpired), len(gotExpired)) + } + if slices.CompareFunc(gotExpired, test.wantExpired, func(g, w ObjectToDelete) int { + if g.VersionID == w.VersionID { + return 0 + } + return -1 + }) != 0 { + t.Fatalf("Expected %v but got %v", test.wantExpired, gotExpired) + } + }) + } +} + +func TestEvalActionFromLifecycle(t *testing.T) { + // Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions + numVersions := 4 + obj := ObjectInfo{ + Name: "foo", + ModTime: time.Now().Add(-31 * 24 * time.Hour), + Size: 100 << 20, + VersionID: uuid.New().String(), + IsLatest: true, + NumVersions: numVersions, + } + delMarker := ObjectInfo{ + Name: "foo-deleted", + ModTime: time.Now().Add(-61 * 24 * time.Hour), + Size: 0, + VersionID: uuid.New().String(), + IsLatest: true, + DeleteMarker: true, + NumVersions: numVersions, + } + + deleteAllILM := ` + + + 30 + true + + + Enabled + DeleteAllVersions + + ` + delMarkerILM := ` + + DelMarkerExpiration + + Enabled + + 60 + + + ` + deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM)) if err != nil { - t.Fatalf("Failed with err: %v", err) + t.Fatalf("Failed to parse deleteAllILM test ILM policy %v", err) } - if len(gots) != len(wants) { - t.Fatalf("Expected %d objects but got %d", len(wants), len(gots)) + delMarkerLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(delMarkerILM)) + if err != nil { + t.Fatalf("Failed to parse delMarkerILM test ILM policy %v", err) + } + tests := []struct { + ilm lifecycle.Lifecycle + retention *objectlock.Retention + obj ObjectInfo + want lifecycle.Action + }{ + { + // with object locking + ilm: *deleteAllLc, + retention: &objectlock.Retention{LockEnabled: true}, + obj: obj, + want: lifecycle.NoneAction, + }, + { + // without object locking + ilm: *deleteAllLc, + retention: &objectlock.Retention{}, + obj: obj, + want: lifecycle.DeleteAllVersionsAction, + }, + { + // with object locking + ilm: *delMarkerLc, + retention: &objectlock.Retention{LockEnabled: true}, + obj: delMarker, + want: lifecycle.NoneAction, + }, + { + // without object locking + ilm: *delMarkerLc, + retention: &objectlock.Retention{}, + obj: delMarker, + want: lifecycle.DelMarkerDeleteAllVersionsAction, + }, } - // Close expiry state's channel to inspect object versions enqueued for expiration - close(workers[0]) - wg.Wait() - for _, obj := range expired { - switch obj.ObjectV.VersionID { - case uuids[2].String(), uuids[3].String(), uuids[4].String(): - default: - t.Errorf("Unexpected versionID being expired: %#v\n", obj) - } + for i, test := range tests { + t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) { + gotEvent := evalActionFromLifecycle(t.Context(), test.ilm, *test.retention, nil, test.obj) + if gotEvent.Action != test.want { + t.Fatalf("Expected %v but got %v", test.want, gotEvent.Action) + } + }) } } diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 1ab28fe698e94..5752daae057f7 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -18,11 +18,11 @@ package cmd import ( - "bytes" "context" "errors" "fmt" "io" + "maps" "math/rand" "net/http" "path" @@ -36,12 +36,12 @@ import ( "github.com/klauspost/compress/zstd" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/lifecycle" - "github.com/minio/minio/internal/hash" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" "github.com/valyala/bytebufferpool" ) +//msgp:clearomitted + //go:generate msgp -file $GOFILE -unexported // dataUsageHash is the hash type used. @@ -60,15 +60,14 @@ type versionsHistogram [dataUsageVersionLen]uint64 type dataUsageEntry struct { Children dataUsageHashMap `msg:"ch"` // These fields do no include any children. - Size int64 `msg:"sz"` - Objects uint64 `msg:"os"` - Versions uint64 `msg:"vs"` // Versions that are not delete markers. - DeleteMarkers uint64 `msg:"dms"` - ObjSizes sizeHistogram `msg:"szs"` - ObjVersions versionsHistogram `msg:"vh"` - ReplicationStats *replicationAllStats `msg:"rs,omitempty"` - AllTierStats *allTierStats `msg:"ats,omitempty"` - Compacted bool `msg:"c"` + Size int64 `msg:"sz"` + Objects uint64 `msg:"os"` + Versions uint64 `msg:"vs"` // Versions that are not delete markers. + DeleteMarkers uint64 `msg:"dms"` + ObjSizes sizeHistogram `msg:"szs"` + ObjVersions versionsHistogram `msg:"vh"` + AllTierStats *allTierStats `msg:"ats,omitempty"` + Compacted bool `msg:"c"` } // allTierStats is a collection of per-tier stats across all configured remote @@ -101,9 +100,7 @@ func (ats *allTierStats) clone() *allTierStats { } dst := *ats dst.Tiers = make(map[string]tierStats, len(ats.Tiers)) - for tier, st := range ats.Tiers { - dst.Tiers[tier] = st - } + maps.Copy(dst.Tiers, ats.Tiers) return &dst } @@ -120,7 +117,6 @@ func (ats *allTierStats) populateStats(stats map[string]madmin.TierStats) { NumObjects: st.NumObjects, } } - return } // tierStats holds per-tier stats of a remote tier. @@ -138,77 +134,6 @@ func (ts tierStats) add(u tierStats) tierStats { } } -//msgp:tuple replicationStatsV1 -type replicationStatsV1 struct { - PendingSize uint64 - ReplicatedSize uint64 - FailedSize uint64 - ReplicaSize uint64 - FailedCount uint64 - PendingCount uint64 - MissedThresholdSize uint64 - AfterThresholdSize uint64 - MissedThresholdCount uint64 - AfterThresholdCount uint64 -} - -func (rsv1 replicationStatsV1) Empty() bool { - return rsv1.ReplicatedSize == 0 && - rsv1.FailedSize == 0 && - rsv1.FailedCount == 0 -} - -//msgp:tuple replicationStats -type replicationStats struct { - PendingSize uint64 - ReplicatedSize uint64 - FailedSize uint64 - FailedCount uint64 - PendingCount uint64 - MissedThresholdSize uint64 - AfterThresholdSize uint64 - MissedThresholdCount uint64 - AfterThresholdCount uint64 - ReplicatedCount uint64 -} - -func (rs replicationStats) Empty() bool { - return rs.ReplicatedSize == 0 && - rs.FailedSize == 0 && - rs.FailedCount == 0 -} - -type replicationAllStats struct { - Targets map[string]replicationStats `msg:"t,omitempty"` - ReplicaSize uint64 `msg:"r,omitempty"` - ReplicaCount uint64 `msg:"rc,omitempty"` -} - -//msgp:tuple replicationAllStatsV1 -type replicationAllStatsV1 struct { - Targets map[string]replicationStats - ReplicaSize uint64 `msg:"ReplicaSize,omitempty"` - ReplicaCount uint64 `msg:"ReplicaCount,omitempty"` -} - -// clone creates a deep-copy clone. -func (r *replicationAllStats) clone() *replicationAllStats { - if r == nil { - return nil - } - - // Shallow copy - dst := *r - - // Copy individual targets. - dst.Targets = make(map[string]replicationStats, len(r.Targets)) - for k, v := range r.Targets { - dst.Targets[k] = v - } - - return &dst -} - //msgp:encode ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7 //msgp:marshal ignore dataUsageEntryV2 dataUsageEntryV3 dataUsageEntryV4 dataUsageEntryV5 dataUsageEntryV6 dataUsageEntryV7 @@ -224,62 +149,54 @@ type dataUsageEntryV2 struct { //msgp:tuple dataUsageEntryV3 type dataUsageEntryV3 struct { // These fields do no include any children. - Size int64 - ReplicatedSize uint64 - ReplicationPendingSize uint64 - ReplicationFailedSize uint64 - ReplicaSize uint64 - Objects uint64 - ObjSizes sizeHistogram - Children dataUsageHashMap + Size int64 + Objects uint64 + ObjSizes sizeHistogram + Children dataUsageHashMap } //msgp:tuple dataUsageEntryV4 type dataUsageEntryV4 struct { Children dataUsageHashMap // These fields do no include any children. - Size int64 - Objects uint64 - ObjSizes sizeHistogram - ReplicationStats replicationStatsV1 + Size int64 + Objects uint64 + ObjSizes sizeHistogram } //msgp:tuple dataUsageEntryV5 type dataUsageEntryV5 struct { Children dataUsageHashMap // These fields do no include any children. - Size int64 - Objects uint64 - Versions uint64 // Versions that are not delete markers. - ObjSizes sizeHistogram - ReplicationStats *replicationStatsV1 - Compacted bool + Size int64 + Objects uint64 + Versions uint64 // Versions that are not delete markers. + ObjSizes sizeHistogram + Compacted bool } //msgp:tuple dataUsageEntryV6 type dataUsageEntryV6 struct { Children dataUsageHashMap // These fields do no include any children. - Size int64 - Objects uint64 - Versions uint64 // Versions that are not delete markers. - ObjSizes sizeHistogram - ReplicationStats *replicationAllStatsV1 - Compacted bool + Size int64 + Objects uint64 + Versions uint64 // Versions that are not delete markers. + ObjSizes sizeHistogram + Compacted bool } type dataUsageEntryV7 struct { Children dataUsageHashMap `msg:"ch"` // These fields do no include any children. - Size int64 `msg:"sz"` - Objects uint64 `msg:"os"` - Versions uint64 `msg:"vs"` // Versions that are not delete markers. - DeleteMarkers uint64 `msg:"dms"` - ObjSizes sizeHistogramV1 `msg:"szs"` - ObjVersions versionsHistogram `msg:"vh"` - ReplicationStats *replicationAllStats `msg:"rs,omitempty"` - AllTierStats *allTierStats `msg:"ats,omitempty"` - Compacted bool `msg:"c"` + Size int64 `msg:"sz"` + Objects uint64 `msg:"os"` + Versions uint64 `msg:"vs"` // Versions that are not delete markers. + DeleteMarkers uint64 `msg:"dms"` + ObjSizes sizeHistogramV1 `msg:"szs"` + ObjVersions versionsHistogram `msg:"vh"` + AllTierStats *allTierStats `msg:"ats,omitempty"` + Compacted bool `msg:"c"` } // dataUsageCache contains a cache of data usage entries latest version. @@ -360,29 +277,6 @@ func (e *dataUsageEntry) addSizes(summary sizeSummary) { e.ObjSizes.add(summary.totalSize) e.ObjVersions.add(summary.versions) - if e.ReplicationStats == nil { - e.ReplicationStats = &replicationAllStats{ - Targets: make(map[string]replicationStats), - } - } else if e.ReplicationStats.Targets == nil { - e.ReplicationStats.Targets = make(map[string]replicationStats) - } - e.ReplicationStats.ReplicaSize += uint64(summary.replicaSize) - e.ReplicationStats.ReplicaCount += uint64(summary.replicaCount) - - for arn, st := range summary.replTargetStats { - tgtStat, ok := e.ReplicationStats.Targets[arn] - if !ok { - tgtStat = replicationStats{} - } - tgtStat.PendingSize += uint64(st.pendingSize) - tgtStat.FailedSize += uint64(st.failedSize) - tgtStat.ReplicatedSize += uint64(st.replicatedSize) - tgtStat.ReplicatedCount += uint64(st.replicatedCount) - tgtStat.FailedCount += st.failedCount - tgtStat.PendingCount += st.pendingCount - e.ReplicationStats.Targets[arn] = tgtStat - } if len(summary.tiers) != 0 { if e.AllTierStats == nil { e.AllTierStats = newAllTierStats() @@ -397,26 +291,6 @@ func (e *dataUsageEntry) merge(other dataUsageEntry) { e.Versions += other.Versions e.DeleteMarkers += other.DeleteMarkers e.Size += other.Size - if other.ReplicationStats != nil { - if e.ReplicationStats == nil { - e.ReplicationStats = &replicationAllStats{Targets: make(map[string]replicationStats)} - } else if e.ReplicationStats.Targets == nil { - e.ReplicationStats.Targets = make(map[string]replicationStats) - } - e.ReplicationStats.ReplicaSize += other.ReplicationStats.ReplicaSize - e.ReplicationStats.ReplicaCount += other.ReplicationStats.ReplicaCount - for arn, stat := range other.ReplicationStats.Targets { - st := e.ReplicationStats.Targets[arn] - e.ReplicationStats.Targets[arn] = replicationStats{ - PendingSize: stat.PendingSize + st.PendingSize, - FailedSize: stat.FailedSize + st.FailedSize, - ReplicatedSize: stat.ReplicatedSize + st.ReplicatedSize, - PendingCount: stat.PendingCount + st.PendingCount, - FailedCount: stat.FailedCount + st.FailedCount, - ReplicatedCount: stat.ReplicatedCount + st.ReplicatedCount, - } - } - } for i, v := range other.ObjSizes[:] { e.ObjSizes[i] += v @@ -472,15 +346,10 @@ func (e dataUsageEntry) clone() dataUsageEntry { // We operate on a copy from the receiver. if e.Children != nil { ch := make(dataUsageHashMap, len(e.Children)) - for k, v := range e.Children { - ch[k] = v - } + maps.Copy(ch, e.Children) e.Children = ch } - if e.ReplicationStats != nil { - // Clone ReplicationStats - e.ReplicationStats = e.ReplicationStats.clone() - } + if e.AllTierStats != nil { e.AllTierStats = e.AllTierStats.clone() } @@ -523,20 +392,18 @@ func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash { want := h.Key() if idx := strings.LastIndexByte(want, '/'); idx >= 0 { if v := d.find(want[:idx]); v != nil { - for child := range v.Children { - if child == want { - found := hashPath(want[:idx]) - return &found - } + _, ok := v.Children[want] + if ok { + found := hashPath(want[:idx]) + return &found } } } for k, v := range d.Cache { - for child := range v.Children { - if child == want { - found := dataUsageHash(k) - return &found - } + _, ok := v.Children[want] + if ok { + found := dataUsageHash(k) + return &found } } return nil @@ -621,7 +488,7 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas d.Cache[hash.Key()] = e for ch := range e.Children { if ch == hash.Key() { - logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) + scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference")) return } d.copyWithChildren(src, dataUsageHash(ch), &hash) @@ -718,6 +585,53 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact } } +// forceCompact will force compact the cache of the top entry. +// If the number of children is more than limit*100, it will compact self. +// When above the limit a cleanup will also be performed to remove any possible abandoned entries. +func (d *dataUsageCache) forceCompact(limit int) { + if d == nil || len(d.Cache) <= limit { + return + } + top := hashPath(d.Info.Name).Key() + topE := d.find(top) + if topE == nil { + scannerLogIf(GlobalContext, errors.New("forceCompact: root not found")) + return + } + // If off by 2 orders of magnitude, compact self and log error. + if len(topE.Children) > dataScannerForceCompactAtFolders { + // If we still have too many children, compact self. + scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name) + d.reduceChildrenOf(hashPath(d.Info.Name), limit, true) + } + if len(d.Cache) <= limit { + return + } + + // Check for abandoned entries. + found := make(map[string]struct{}, len(d.Cache)) + + // Mark all children recursively + var mark func(entry dataUsageEntry) + mark = func(entry dataUsageEntry) { + for k := range entry.Children { + found[k] = struct{}{} + if ch, ok := d.Cache[k]; ok { + mark(ch) + } + } + } + found[top] = struct{}{} + mark(*topE) + + // Delete all entries not found. + for k := range d.Cache { + if _, ok := found[k]; !ok { + delete(d.Cache, k) + } + } +} + // StringAll returns a detailed string representation of all entries in the cache. func (d *dataUsageCache) StringAll() string { // Remove bloom filter from print. @@ -873,22 +787,6 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke ObjectSizesHistogram: flat.ObjSizes.toMap(), ObjectVersionsHistogram: flat.ObjVersions.toMap(), } - if flat.ReplicationStats != nil { - bui.ReplicaSize = flat.ReplicationStats.ReplicaSize - bui.ReplicaCount = flat.ReplicationStats.ReplicaCount - - bui.ReplicationInfo = make(map[string]BucketTargetUsageInfo, len(flat.ReplicationStats.Targets)) - for arn, stat := range flat.ReplicationStats.Targets { - bui.ReplicationInfo[arn] = BucketTargetUsageInfo{ - ReplicationPendingSize: stat.PendingSize, - ReplicatedSize: stat.ReplicatedSize, - ReplicationFailedSize: stat.FailedSize, - ReplicationPendingCount: stat.PendingCount, - ReplicationFailedCount: stat.FailedCount, - ReplicatedCount: stat.ReplicatedCount, - } - } - } dst[bucket.Name] = bui } return dst @@ -989,11 +887,23 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true}) + r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true}) if err != nil { switch err.(type) { case ObjectNotFound, BucketNotFound: - return false, nil + r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true}) + if err != nil { + switch err.(type) { + case ObjectNotFound, BucketNotFound: + return false, nil + case InsufficientReadQuorum, StorageErr: + return true, nil + } + return false, err + } + err = d.deserialize(r) + r.Close() + return err != nil, nil case InsufficientReadQuorum, StorageErr: return true, nil } @@ -1024,7 +934,7 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string) } if retries == 5 { - logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") + scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache") } return nil @@ -1054,24 +964,11 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) } save := func(name string, timeout time.Duration) error { - hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len())) - if err != nil { - return err - } - // Abandon if more than a minute, so we don't hold up scanner. ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - _, err = store.PutObject(ctx, - dataUsageBucket, - name, - NewPutObjReader(hr), - ObjectOptions{NoLock: true}) - if isErrBucketNotFound(err) { - return nil - } - return err + return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes()) } defer save(name+".bkp", 5*time.Second) // Keep a backup as well @@ -1178,20 +1075,6 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { ObjSizes: v.ObjSizes, Children: v.Children, } - if v.ReplicatedSize > 0 || v.ReplicaSize > 0 || v.ReplicationFailedSize > 0 || v.ReplicationPendingSize > 0 { - cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) - if cfg != nil && cfg.RoleArn != "" { - due.ReplicationStats = &replicationAllStats{ - Targets: make(map[string]replicationStats), - } - due.ReplicationStats.ReplicaSize = v.ReplicaSize - due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ - ReplicatedSize: v.ReplicatedSize, - FailedSize: v.ReplicationFailedSize, - PendingSize: v.ReplicationPendingSize, - } - } - } due.Compacted = len(due.Children) == 0 && k != d.Info.Name d.Cache[k] = due @@ -1217,36 +1100,10 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { ObjSizes: v.ObjSizes, Children: v.Children, } - empty := replicationStatsV1{} - - if v.ReplicationStats != empty { - cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) - if cfg != nil && cfg.RoleArn != "" { - due.ReplicationStats = &replicationAllStats{ - Targets: make(map[string]replicationStats), - } - due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ - ReplicatedSize: v.ReplicationStats.ReplicatedSize, - FailedSize: v.ReplicationStats.FailedSize, - FailedCount: v.ReplicationStats.FailedCount, - PendingSize: v.ReplicationStats.PendingSize, - PendingCount: v.ReplicationStats.PendingCount, - } - due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize - } - } due.Compacted = len(due.Children) == 0 && k != d.Info.Name d.Cache[k] = due } - - // Populate compacted value and remove unneeded replica stats. - for k, e := range d.Cache { - if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 { - e.ReplicationStats = nil - } - d.Cache[k] = e - } return nil case dataUsageCacheVerV5: // Zstd compressed. @@ -1268,36 +1125,10 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { ObjSizes: v.ObjSizes, Children: v.Children, } - if v.ReplicationStats != nil && !v.ReplicationStats.Empty() { - cfg, _ := getReplicationConfig(GlobalContext, d.Info.Name) - if cfg != nil && cfg.RoleArn != "" { - due.ReplicationStats = &replicationAllStats{ - Targets: make(map[string]replicationStats), - } - d.Info.replication = replicationConfig{Config: cfg} - - due.ReplicationStats.Targets[cfg.RoleArn] = replicationStats{ - ReplicatedSize: v.ReplicationStats.ReplicatedSize, - FailedSize: v.ReplicationStats.FailedSize, - FailedCount: v.ReplicationStats.FailedCount, - PendingSize: v.ReplicationStats.PendingSize, - PendingCount: v.ReplicationStats.PendingCount, - } - due.ReplicationStats.ReplicaSize = v.ReplicationStats.ReplicaSize - } - } due.Compacted = len(due.Children) == 0 && k != d.Info.Name d.Cache[k] = due } - - // Populate compacted value and remove unneeded replica stats. - for k, e := range d.Cache { - if e.ReplicationStats != nil && len(e.ReplicationStats.Targets) == 0 { - e.ReplicationStats = nil - } - d.Cache[k] = e - } return nil case dataUsageCacheVerV6: // Zstd compressed. @@ -1313,22 +1144,13 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { d.Info = dold.Info d.Cache = make(map[string]dataUsageEntry, len(dold.Cache)) for k, v := range dold.Cache { - var replicationStats *replicationAllStats - if v.ReplicationStats != nil { - replicationStats = &replicationAllStats{ - Targets: v.ReplicationStats.Targets, - ReplicaSize: v.ReplicationStats.ReplicaSize, - ReplicaCount: v.ReplicationStats.ReplicaCount, - } - } due := dataUsageEntry{ - Children: v.Children, - Size: v.Size, - Objects: v.Objects, - Versions: v.Versions, - ObjSizes: v.ObjSizes, - ReplicationStats: replicationStats, - Compacted: v.Compacted, + Children: v.Children, + Size: v.Size, + Objects: v.Objects, + Versions: v.Versions, + ObjSizes: v.ObjSizes, + Compacted: v.Compacted, } d.Cache[k] = due } @@ -1350,13 +1172,12 @@ func (d *dataUsageCache) deserialize(r io.Reader) error { var szHist sizeHistogram szHist.mergeV1(v.ObjSizes) d.Cache[k] = dataUsageEntry{ - Children: v.Children, - Size: v.Size, - Objects: v.Objects, - Versions: v.Versions, - ObjSizes: szHist, - ReplicationStats: v.ReplicationStats, - Compacted: v.Compacted, + Children: v.Children, + Size: v.Size, + Objects: v.Objects, + Versions: v.Versions, + ObjSizes: szHist, + Compacted: v.Compacted, } } @@ -1400,11 +1221,11 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) { zb0002, err = dc.ReadArrayHeader() if err != nil { err = msgp.WrapError(err) - return + return err } if zb0002 == 0 { *z = nil - return + return err } *z = make(dataUsageHashMap, zb0002) for i := uint32(0); i < zb0002; i++ { @@ -1413,12 +1234,12 @@ func (z *dataUsageHashMap) DecodeMsg(dc *msgp.Reader) (err error) { zb0003, err = dc.ReadString() if err != nil { err = msgp.WrapError(err) - return + return err } (*z)[zb0003] = struct{}{} } } - return + return err } // EncodeMsg implements msgp.Encodable @@ -1426,16 +1247,16 @@ func (z dataUsageHashMap) EncodeMsg(en *msgp.Writer) (err error) { err = en.WriteArrayHeader(uint32(len(z))) if err != nil { err = msgp.WrapError(err) - return + return err } for zb0004 := range z { err = en.WriteString(zb0004) if err != nil { err = msgp.WrapError(err, zb0004) - return + return err } } - return + return err } // MarshalMsg implements msgp.Marshaler @@ -1445,7 +1266,7 @@ func (z dataUsageHashMap) MarshalMsg(b []byte) (o []byte, err error) { for zb0004 := range z { o = msgp.AppendString(o, zb0004) } - return + return o, err } // UnmarshalMsg implements msgp.Unmarshaler @@ -1454,7 +1275,7 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } if zb0002 == 0 { *z = nil @@ -1467,13 +1288,13 @@ func (z *dataUsageHashMap) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0003, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } (*z)[zb0003] = struct{}{} } } o = bts - return + return o, err } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message @@ -1482,7 +1303,7 @@ func (z dataUsageHashMap) Msgsize() (s int) { for zb0004 := range z { s += msgp.StringPrefixSize + len(zb0004) } - return + return s } //msgp:encode ignore currentScannerCycle diff --git a/cmd/data-usage-cache_gen.go b/cmd/data-usage-cache_gen.go index 0be4fd0fa1a13..df9d34a164a7e 100644 --- a/cmd/data-usage-cache_gen.go +++ b/cmd/data-usage-cache_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "time" @@ -36,19 +36,17 @@ func (z *allTierStats) DecodeMsg(dc *msgp.Reader) (err error) { if z.Tiers == nil { z.Tiers = make(map[string]tierStats, zb0002) } else if len(z.Tiers) > 0 { - for key := range z.Tiers { - delete(z.Tiers, key) - } + clear(z.Tiers) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 tierStats za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Tiers") return } + var za0002 tierStats var zb0003 uint32 zb0003, err = dc.ReadMapHeader() if err != nil { @@ -207,14 +205,12 @@ func (z *allTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Tiers == nil { z.Tiers = make(map[string]tierStats, zb0002) } else if len(z.Tiers) > 0 { - for key := range z.Tiers { - delete(z.Tiers, key) - } + clear(z.Tiers) } for zb0002 > 0 { - var za0001 string var za0002 tierStats zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Tiers") @@ -415,19 +411,17 @@ func (z *dataUsageCache) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntry, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntry za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntry err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -543,14 +537,12 @@ func (z *dataUsageCache) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntry, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntry zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -799,19 +791,17 @@ func (z *dataUsageCacheV2) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV2 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV2 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -864,14 +854,12 @@ func (z *dataUsageCacheV2) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV2, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV2 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -942,19 +930,17 @@ func (z *dataUsageCacheV3) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV3 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV3 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -1007,14 +993,12 @@ func (z *dataUsageCacheV3) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV3, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV3 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1085,19 +1069,17 @@ func (z *dataUsageCacheV4) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV4 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV4 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -1150,14 +1132,12 @@ func (z *dataUsageCacheV4) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV4, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV4 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1228,19 +1208,17 @@ func (z *dataUsageCacheV5) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV5 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV5 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -1293,14 +1271,12 @@ func (z *dataUsageCacheV5) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV5, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV5 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1371,19 +1347,17 @@ func (z *dataUsageCacheV6) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV6 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV6 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -1436,14 +1410,12 @@ func (z *dataUsageCacheV6) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV6, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV6 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1514,19 +1486,17 @@ func (z *dataUsageCacheV7) DecodeMsg(dc *msgp.Reader) (err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV7, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 dataUsageEntryV7 za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Cache") return } + var za0002 dataUsageEntryV7 err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Cache", za0001) @@ -1579,14 +1549,12 @@ func (z *dataUsageCacheV7) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Cache == nil { z.Cache = make(map[string]dataUsageEntryV7, zb0002) } else if len(z.Cache) > 0 { - for key := range z.Cache { - delete(z.Cache, key) - } + clear(z.Cache) } for zb0002 > 0 { - var za0001 string var za0002 dataUsageEntryV7 zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Cache") @@ -1633,6 +1601,8 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -1707,24 +1677,6 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) { return } } - case "rs": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStats) - } - err = z.ReplicationStats.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } case "ats": if dc.IsNil() { err = dc.ReadNil() @@ -1737,12 +1689,93 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) { if z.AllTierStats == nil { z.AllTierStats = new(allTierStats) } - err = z.AllTierStats.DecodeMsg(dc) + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "AllTierStats") return } + for zb0004 > 0 { + zb0004-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + switch msgp.UnsafeString(field) { + case "ts": + var zb0005 uint32 + zb0005, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + if z.AllTierStats.Tiers == nil { + z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) + } else if len(z.AllTierStats.Tiers) > 0 { + clear(z.AllTierStats.Tiers) + } + for zb0005 > 0 { + zb0005-- + var za0003 string + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + var za0004 tierStats + var zb0006 uint32 + zb0006, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + for zb0006 > 0 { + zb0006-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + switch msgp.UnsafeString(field) { + case "ts": + za0004.TotalSize, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "TotalSize") + return + } + case "nv": + za0004.NumVersions, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumVersions") + return + } + case "no": + za0004.NumObjects, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumObjects") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + } + } + z.AllTierStats.Tiers[za0003] = za0004 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + } + } } + zb0001Mask |= 0x1 case "c": z.Compacted, err = dc.ReadBool() if err != nil { @@ -1757,162 +1790,189 @@ func (z *dataUsageEntry) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.AllTierStats = nil + } + return } // EncodeMsg implements msgp.Encodable func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values - zb0001Len := uint32(10) - var zb0001Mask uint16 /* 10 bits */ + // check for omitted fields + zb0001Len := uint32(9) + var zb0001Mask uint16 /* 9 bits */ _ = zb0001Mask - if z.ReplicationStats == nil { - zb0001Len-- - zb0001Mask |= 0x80 - } if z.AllTierStats == nil { zb0001Len-- - zb0001Mask |= 0x100 + zb0001Mask |= 0x80 } // variable map header, size zb0001Len err = en.Append(0x80 | uint8(zb0001Len)) if err != nil { return } - if zb0001Len == 0 { - return - } - // write "ch" - err = en.Append(0xa2, 0x63, 0x68) - if err != nil { - return - } - err = z.Children.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Children") - return - } - // write "sz" - err = en.Append(0xa2, 0x73, 0x7a) - if err != nil { - return - } - err = en.WriteInt64(z.Size) - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - // write "os" - err = en.Append(0xa2, 0x6f, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.Objects) - if err != nil { - err = msgp.WrapError(err, "Objects") - return - } - // write "vs" - err = en.Append(0xa2, 0x76, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.Versions) - if err != nil { - err = msgp.WrapError(err, "Versions") - return - } - // write "dms" - err = en.Append(0xa3, 0x64, 0x6d, 0x73) - if err != nil { - return - } - err = en.WriteUint64(z.DeleteMarkers) - if err != nil { - err = msgp.WrapError(err, "DeleteMarkers") - return - } - // write "szs" - err = en.Append(0xa3, 0x73, 0x7a, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(dataUsageBucketLen)) - if err != nil { - err = msgp.WrapError(err, "ObjSizes") - return - } - for za0001 := range z.ObjSizes { - err = en.WriteUint64(z.ObjSizes[za0001]) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "ch" + err = en.Append(0xa2, 0x63, 0x68) if err != nil { - err = msgp.WrapError(err, "ObjSizes", za0001) return } - } - // write "vh" - err = en.Append(0xa2, 0x76, 0x68) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(dataUsageVersionLen)) - if err != nil { - err = msgp.WrapError(err, "ObjVersions") - return - } - for za0002 := range z.ObjVersions { - err = en.WriteUint64(z.ObjVersions[za0002]) + err = z.Children.EncodeMsg(en) if err != nil { - err = msgp.WrapError(err, "ObjVersions", za0002) + err = msgp.WrapError(err, "Children") return } - } - if (zb0001Mask & 0x80) == 0 { // if not empty - // write "rs" - err = en.Append(0xa2, 0x72, 0x73) + // write "sz" + err = en.Append(0xa2, 0x73, 0x7a) if err != nil { return } - if z.ReplicationStats == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.ReplicationStats.EncodeMsg(en) + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "os" + err = en.Append(0xa2, 0x6f, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Objects) + if err != nil { + err = msgp.WrapError(err, "Objects") + return + } + // write "vs" + err = en.Append(0xa2, 0x76, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.Versions) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + // write "dms" + err = en.Append(0xa3, 0x64, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteUint64(z.DeleteMarkers) + if err != nil { + err = msgp.WrapError(err, "DeleteMarkers") + return + } + // write "szs" + err = en.Append(0xa3, 0x73, 0x7a, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(dataUsageBucketLen)) + if err != nil { + err = msgp.WrapError(err, "ObjSizes") + return + } + for za0001 := range z.ObjSizes { + err = en.WriteUint64(z.ObjSizes[za0001]) if err != nil { - err = msgp.WrapError(err, "ReplicationStats") + err = msgp.WrapError(err, "ObjSizes", za0001) return } } - } - if (zb0001Mask & 0x100) == 0 { // if not empty - // write "ats" - err = en.Append(0xa3, 0x61, 0x74, 0x73) + // write "vh" + err = en.Append(0xa2, 0x76, 0x68) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(dataUsageVersionLen)) if err != nil { + err = msgp.WrapError(err, "ObjVersions") return } - if z.AllTierStats == nil { - err = en.WriteNil() + for za0002 := range z.ObjVersions { + err = en.WriteUint64(z.ObjVersions[za0002]) if err != nil { + err = msgp.WrapError(err, "ObjVersions", za0002) return } - } else { - err = z.AllTierStats.EncodeMsg(en) + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // write "ats" + err = en.Append(0xa3, 0x61, 0x74, 0x73) if err != nil { - err = msgp.WrapError(err, "AllTierStats") return } + if z.AllTierStats == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + // map header, size 1 + // write "ts" + err = en.Append(0x81, 0xa2, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.AllTierStats.Tiers))) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + for za0003, za0004 := range z.AllTierStats.Tiers { + err = en.WriteString(za0003) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + // map header, size 3 + // write "ts" + err = en.Append(0x83, 0xa2, 0x74, 0x73) + if err != nil { + return + } + err = en.WriteUint64(za0004.TotalSize) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "TotalSize") + return + } + // write "nv" + err = en.Append(0xa2, 0x6e, 0x76) + if err != nil { + return + } + err = en.WriteInt(za0004.NumVersions) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumVersions") + return + } + // write "no" + err = en.Append(0xa2, 0x6e, 0x6f) + if err != nil { + return + } + err = en.WriteInt(za0004.NumObjects) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumObjects") + return + } + } + } + } + // write "c" + err = en.Append(0xa1, 0x63) + if err != nil { + return + } + err = en.WriteBool(z.Compacted) + if err != nil { + err = msgp.WrapError(err, "Compacted") + return } - } - // write "c" - err = en.Append(0xa1, 0x63) - if err != nil { - return - } - err = en.WriteBool(z.Compacted) - if err != nil { - err = msgp.WrapError(err, "Compacted") - return } return } @@ -1920,83 +1980,79 @@ func (z *dataUsageEntry) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *dataUsageEntry) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values - zb0001Len := uint32(10) - var zb0001Mask uint16 /* 10 bits */ + // check for omitted fields + zb0001Len := uint32(9) + var zb0001Mask uint16 /* 9 bits */ _ = zb0001Mask - if z.ReplicationStats == nil { - zb0001Len-- - zb0001Mask |= 0x80 - } if z.AllTierStats == nil { zb0001Len-- - zb0001Mask |= 0x100 + zb0001Mask |= 0x80 } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - // string "ch" - o = append(o, 0xa2, 0x63, 0x68) - o, err = z.Children.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Children") - return - } - // string "sz" - o = append(o, 0xa2, 0x73, 0x7a) - o = msgp.AppendInt64(o, z.Size) - // string "os" - o = append(o, 0xa2, 0x6f, 0x73) - o = msgp.AppendUint64(o, z.Objects) - // string "vs" - o = append(o, 0xa2, 0x76, 0x73) - o = msgp.AppendUint64(o, z.Versions) - // string "dms" - o = append(o, 0xa3, 0x64, 0x6d, 0x73) - o = msgp.AppendUint64(o, z.DeleteMarkers) - // string "szs" - o = append(o, 0xa3, 0x73, 0x7a, 0x73) - o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen)) - for za0001 := range z.ObjSizes { - o = msgp.AppendUint64(o, z.ObjSizes[za0001]) - } - // string "vh" - o = append(o, 0xa2, 0x76, 0x68) - o = msgp.AppendArrayHeader(o, uint32(dataUsageVersionLen)) - for za0002 := range z.ObjVersions { - o = msgp.AppendUint64(o, z.ObjVersions[za0002]) - } - if (zb0001Mask & 0x80) == 0 { // if not empty - // string "rs" - o = append(o, 0xa2, 0x72, 0x73) - if z.ReplicationStats == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.ReplicationStats.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } - } - if (zb0001Mask & 0x100) == 0 { // if not empty - // string "ats" - o = append(o, 0xa3, 0x61, 0x74, 0x73) - if z.AllTierStats == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.AllTierStats.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "AllTierStats") - return + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "ch" + o = append(o, 0xa2, 0x63, 0x68) + o, err = z.Children.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Children") + return + } + // string "sz" + o = append(o, 0xa2, 0x73, 0x7a) + o = msgp.AppendInt64(o, z.Size) + // string "os" + o = append(o, 0xa2, 0x6f, 0x73) + o = msgp.AppendUint64(o, z.Objects) + // string "vs" + o = append(o, 0xa2, 0x76, 0x73) + o = msgp.AppendUint64(o, z.Versions) + // string "dms" + o = append(o, 0xa3, 0x64, 0x6d, 0x73) + o = msgp.AppendUint64(o, z.DeleteMarkers) + // string "szs" + o = append(o, 0xa3, 0x73, 0x7a, 0x73) + o = msgp.AppendArrayHeader(o, uint32(dataUsageBucketLen)) + for za0001 := range z.ObjSizes { + o = msgp.AppendUint64(o, z.ObjSizes[za0001]) + } + // string "vh" + o = append(o, 0xa2, 0x76, 0x68) + o = msgp.AppendArrayHeader(o, uint32(dataUsageVersionLen)) + for za0002 := range z.ObjVersions { + o = msgp.AppendUint64(o, z.ObjVersions[za0002]) + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // string "ats" + o = append(o, 0xa3, 0x61, 0x74, 0x73) + if z.AllTierStats == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 1 + // string "ts" + o = append(o, 0x81, 0xa2, 0x74, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.AllTierStats.Tiers))) + for za0003, za0004 := range z.AllTierStats.Tiers { + o = msgp.AppendString(o, za0003) + // map header, size 3 + // string "ts" + o = append(o, 0x83, 0xa2, 0x74, 0x73) + o = msgp.AppendUint64(o, za0004.TotalSize) + // string "nv" + o = append(o, 0xa2, 0x6e, 0x76) + o = msgp.AppendInt(o, za0004.NumVersions) + // string "no" + o = append(o, 0xa2, 0x6e, 0x6f) + o = msgp.AppendInt(o, za0004.NumObjects) + } } } + // string "c" + o = append(o, 0xa1, 0x63) + o = msgp.AppendBool(o, z.Compacted) } - // string "c" - o = append(o, 0xa1, 0x63) - o = msgp.AppendBool(o, z.Compacted) return } @@ -2010,6 +2066,8 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -2084,40 +2142,104 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - case "rs": + case "ats": if msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts) if err != nil { return } - z.ReplicationStats = nil + z.AllTierStats = nil } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStats) + if z.AllTierStats == nil { + z.AllTierStats = new(allTierStats) } - bts, err = z.ReplicationStats.UnmarshalMsg(bts) + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "ReplicationStats") + err = msgp.WrapError(err, "AllTierStats") return } - } - case "ats": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.AllTierStats = nil - } else { - if z.AllTierStats == nil { - z.AllTierStats = new(allTierStats) - } - bts, err = z.AllTierStats.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "AllTierStats") - return + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + switch msgp.UnsafeString(field) { + case "ts": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + if z.AllTierStats.Tiers == nil { + z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) + } else if len(z.AllTierStats.Tiers) > 0 { + clear(z.AllTierStats.Tiers) + } + for zb0005 > 0 { + var za0004 tierStats + zb0005-- + var za0003 string + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + var zb0006 uint32 + zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + switch msgp.UnsafeString(field) { + case "ts": + za0004.TotalSize, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "TotalSize") + return + } + case "nv": + za0004.NumVersions, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumVersions") + return + } + case "no": + za0004.NumObjects, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumObjects") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + } + } + z.AllTierStats.Tiers[za0003] = za0004 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + } } } + zb0001Mask |= 0x1 case "c": z.Compacted, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -2132,23 +2254,28 @@ func (z *dataUsageEntry) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.AllTierStats = nil + } + o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntry) Msgsize() (s int) { - s = 1 + 3 + z.Children.Msgsize() + 3 + msgp.Int64Size + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + 3 + msgp.ArrayHeaderSize + (dataUsageVersionLen * (msgp.Uint64Size)) + 3 - if z.ReplicationStats == nil { - s += msgp.NilSize - } else { - s += z.ReplicationStats.Msgsize() - } - s += 4 + s = 1 + 3 + z.Children.Msgsize() + 3 + msgp.Int64Size + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + 3 + msgp.ArrayHeaderSize + (dataUsageVersionLen * (msgp.Uint64Size)) + 4 if z.AllTierStats == nil { s += msgp.NilSize } else { - s += z.AllTierStats.Msgsize() + s += 1 + 3 + msgp.MapHeaderSize + if z.AllTierStats.Tiers != nil { + for za0003, za0004 := range z.AllTierStats.Tiers { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + 1 + 3 + msgp.Uint64Size + 3 + msgp.IntSize + 3 + msgp.IntSize + } + } } s += 2 + msgp.BoolSize return @@ -2263,8 +2390,8 @@ func (z *dataUsageEntryV3) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 8 { - err = msgp.ArrayError{Wanted: 8, Got: zb0001} + if zb0001 != 4 { + err = msgp.ArrayError{Wanted: 4, Got: zb0001} return } z.Size, err = dc.ReadInt64() @@ -2272,26 +2399,6 @@ func (z *dataUsageEntryV3) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Size") return } - z.ReplicatedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.ReplicationPendingSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicationPendingSize") - return - } - z.ReplicationFailedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicationFailedSize") - return - } - z.ReplicaSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } z.Objects, err = dc.ReadUint64() if err != nil { err = msgp.WrapError(err, "Objects") @@ -2330,8 +2437,8 @@ func (z *dataUsageEntryV3) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 8 { - err = msgp.ArrayError{Wanted: 8, Got: zb0001} + if zb0001 != 4 { + err = msgp.ArrayError{Wanted: 4, Got: zb0001} return } z.Size, bts, err = msgp.ReadInt64Bytes(bts) @@ -2339,26 +2446,6 @@ func (z *dataUsageEntryV3) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Size") return } - z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.ReplicationPendingSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationPendingSize") - return - } - z.ReplicationFailedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationFailedSize") - return - } - z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } z.Objects, bts, err = msgp.ReadUint64Bytes(bts) if err != nil { err = msgp.WrapError(err, "Objects") @@ -2392,7 +2479,7 @@ func (z *dataUsageEntryV3) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntryV3) Msgsize() (s int) { - s = 1 + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.Children.Msgsize() + s = 1 + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.Children.Msgsize() return } @@ -2404,8 +2491,8 @@ func (z *dataUsageEntryV4) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} + if zb0001 != 4 { + err = msgp.ArrayError{Wanted: 4, Got: zb0001} return } err = z.Children.DecodeMsg(dc) @@ -2440,11 +2527,6 @@ func (z *dataUsageEntryV4) DecodeMsg(dc *msgp.Reader) (err error) { return } } - err = z.ReplicationStats.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } return } @@ -2456,8 +2538,8 @@ func (z *dataUsageEntryV4) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} + if zb0001 != 4 { + err = msgp.ArrayError{Wanted: 4, Got: zb0001} return } bts, err = z.Children.UnmarshalMsg(bts) @@ -2492,18 +2574,13 @@ func (z *dataUsageEntryV4) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - bts, err = z.ReplicationStats.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntryV4) Msgsize() (s int) { - s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + z.ReplicationStats.Msgsize() + s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) return } @@ -2515,8 +2592,8 @@ func (z *dataUsageEntryV5) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 7 { - err = msgp.ArrayError{Wanted: 7, Got: zb0001} + if zb0001 != 6 { + err = msgp.ArrayError{Wanted: 6, Got: zb0001} return } err = z.Children.DecodeMsg(dc) @@ -2556,23 +2633,6 @@ func (z *dataUsageEntryV5) DecodeMsg(dc *msgp.Reader) (err error) { return } } - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationStatsV1) - } - err = z.ReplicationStats.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } z.Compacted, err = dc.ReadBool() if err != nil { err = msgp.WrapError(err, "Compacted") @@ -2589,8 +2649,8 @@ func (z *dataUsageEntryV5) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 7 { - err = msgp.ArrayError{Wanted: 7, Got: zb0001} + if zb0001 != 6 { + err = msgp.ArrayError{Wanted: 6, Got: zb0001} return } bts, err = z.Children.UnmarshalMsg(bts) @@ -2630,22 +2690,6 @@ func (z *dataUsageEntryV5) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationStatsV1) - } - bts, err = z.ReplicationStats.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } z.Compacted, bts, err = msgp.ReadBoolBytes(bts) if err != nil { err = msgp.WrapError(err, "Compacted") @@ -2657,13 +2701,7 @@ func (z *dataUsageEntryV5) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntryV5) Msgsize() (s int) { - s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) - if z.ReplicationStats == nil { - s += msgp.NilSize - } else { - s += z.ReplicationStats.Msgsize() - } - s += msgp.BoolSize + s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + msgp.BoolSize return } @@ -2675,8 +2713,8 @@ func (z *dataUsageEntryV6) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 7 { - err = msgp.ArrayError{Wanted: 7, Got: zb0001} + if zb0001 != 6 { + err = msgp.ArrayError{Wanted: 6, Got: zb0001} return } err = z.Children.DecodeMsg(dc) @@ -2716,23 +2754,6 @@ func (z *dataUsageEntryV6) DecodeMsg(dc *msgp.Reader) (err error) { return } } - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStatsV1) - } - err = z.ReplicationStats.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } z.Compacted, err = dc.ReadBool() if err != nil { err = msgp.WrapError(err, "Compacted") @@ -2749,8 +2770,8 @@ func (z *dataUsageEntryV6) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 7 { - err = msgp.ArrayError{Wanted: 7, Got: zb0001} + if zb0001 != 6 { + err = msgp.ArrayError{Wanted: 6, Got: zb0001} return } bts, err = z.Children.UnmarshalMsg(bts) @@ -2790,22 +2811,6 @@ func (z *dataUsageEntryV6) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStatsV1) - } - bts, err = z.ReplicationStats.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } z.Compacted, bts, err = msgp.ReadBoolBytes(bts) if err != nil { err = msgp.WrapError(err, "Compacted") @@ -2817,13 +2822,7 @@ func (z *dataUsageEntryV6) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntryV6) Msgsize() (s int) { - s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) - if z.ReplicationStats == nil { - s += msgp.NilSize - } else { - s += z.ReplicationStats.Msgsize() - } - s += msgp.BoolSize + s = 1 + z.Children.Msgsize() + msgp.Int64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.ArrayHeaderSize + (dataUsageBucketLen * (msgp.Uint64Size)) + msgp.BoolSize return } @@ -2837,6 +2836,8 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -2911,24 +2912,6 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) { return } } - case "rs": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStats) - } - err = z.ReplicationStats.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } case "ats": if dc.IsNil() { err = dc.ReadNil() @@ -2941,12 +2924,93 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) { if z.AllTierStats == nil { z.AllTierStats = new(allTierStats) } - err = z.AllTierStats.DecodeMsg(dc) + var zb0004 uint32 + zb0004, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err, "AllTierStats") return } + for zb0004 > 0 { + zb0004-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + switch msgp.UnsafeString(field) { + case "ts": + var zb0005 uint32 + zb0005, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + if z.AllTierStats.Tiers == nil { + z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) + } else if len(z.AllTierStats.Tiers) > 0 { + clear(z.AllTierStats.Tiers) + } + for zb0005 > 0 { + zb0005-- + var za0003 string + za0003, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + var za0004 tierStats + var zb0006 uint32 + zb0006, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + for zb0006 > 0 { + zb0006-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + switch msgp.UnsafeString(field) { + case "ts": + za0004.TotalSize, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "TotalSize") + return + } + case "nv": + za0004.NumVersions, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumVersions") + return + } + case "no": + za0004.NumObjects, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumObjects") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + } + } + z.AllTierStats.Tiers[za0003] = za0004 + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + } + } } + zb0001Mask |= 0x1 case "c": z.Compacted, err = dc.ReadBool() if err != nil { @@ -2961,6 +3025,11 @@ func (z *dataUsageEntryV7) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.AllTierStats = nil + } + return } @@ -2974,6 +3043,8 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -3048,23 +3119,6 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - case "rs": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ReplicationStats = nil - } else { - if z.ReplicationStats == nil { - z.ReplicationStats = new(replicationAllStats) - } - bts, err = z.ReplicationStats.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicationStats") - return - } - } case "ats": if msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts) @@ -3076,12 +3130,93 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.AllTierStats == nil { z.AllTierStats = new(allTierStats) } - bts, err = z.AllTierStats.UnmarshalMsg(bts) + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "AllTierStats") return } + for zb0004 > 0 { + zb0004-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + switch msgp.UnsafeString(field) { + case "ts": + var zb0005 uint32 + zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + if z.AllTierStats.Tiers == nil { + z.AllTierStats.Tiers = make(map[string]tierStats, zb0005) + } else if len(z.AllTierStats.Tiers) > 0 { + clear(z.AllTierStats.Tiers) + } + for zb0005 > 0 { + var za0004 tierStats + zb0005-- + var za0003 string + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers") + return + } + var zb0006 uint32 + zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + for zb0006 > 0 { + zb0006-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + switch msgp.UnsafeString(field) { + case "ts": + za0004.TotalSize, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "TotalSize") + return + } + case "nv": + za0004.NumVersions, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumVersions") + return + } + case "no": + za0004.NumObjects, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003, "NumObjects") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats", "Tiers", za0003) + return + } + } + } + z.AllTierStats.Tiers[za0003] = za0004 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "AllTierStats") + return + } + } + } } + zb0001Mask |= 0x1 case "c": z.Compacted, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -3096,23 +3231,28 @@ func (z *dataUsageEntryV7) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.AllTierStats = nil + } + o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *dataUsageEntryV7) Msgsize() (s int) { - s = 1 + 3 + z.Children.Msgsize() + 3 + msgp.Int64Size + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize + (dataUsageBucketLenV1 * (msgp.Uint64Size)) + 3 + msgp.ArrayHeaderSize + (dataUsageVersionLen * (msgp.Uint64Size)) + 3 - if z.ReplicationStats == nil { - s += msgp.NilSize - } else { - s += z.ReplicationStats.Msgsize() - } - s += 4 + s = 1 + 3 + z.Children.Msgsize() + 3 + msgp.Int64Size + 3 + msgp.Uint64Size + 3 + msgp.Uint64Size + 4 + msgp.Uint64Size + 4 + msgp.ArrayHeaderSize + (dataUsageBucketLenV1 * (msgp.Uint64Size)) + 3 + msgp.ArrayHeaderSize + (dataUsageVersionLen * (msgp.Uint64Size)) + 4 if z.AllTierStats == nil { s += msgp.NilSize } else { - s += z.AllTierStats.Msgsize() + s += 1 + 3 + msgp.MapHeaderSize + if z.AllTierStats.Tiers != nil { + for za0003, za0004 := range z.AllTierStats.Tiers { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + 1 + 3 + msgp.Uint64Size + 3 + msgp.IntSize + 3 + msgp.IntSize + } + } } s += 2 + msgp.BoolSize return @@ -3170,897 +3310,6 @@ func (z dataUsageHash) Msgsize() (s int) { return } -// DecodeMsg implements msgp.Decodable -func (z *replicationAllStats) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "t": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - if z.Targets == nil { - z.Targets = make(map[string]replicationStats, zb0002) - } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 replicationStats - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - err = za0002.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - z.Targets[za0001] = za0002 - } - case "r": - z.ReplicaSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - case "rc": - z.ReplicaCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *replicationAllStats) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values - zb0001Len := uint32(3) - var zb0001Mask uint8 /* 3 bits */ - _ = zb0001Mask - if z.Targets == nil { - zb0001Len-- - zb0001Mask |= 0x1 - } - if z.ReplicaSize == 0 { - zb0001Len-- - zb0001Mask |= 0x2 - } - if z.ReplicaCount == 0 { - zb0001Len-- - zb0001Mask |= 0x4 - } - // variable map header, size zb0001Len - err = en.Append(0x80 | uint8(zb0001Len)) - if err != nil { - return - } - if zb0001Len == 0 { - return - } - if (zb0001Mask & 0x1) == 0 { // if not empty - // write "t" - err = en.Append(0xa1, 0x74) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Targets))) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - for za0001, za0002 := range z.Targets { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - err = za0002.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - } - } - if (zb0001Mask & 0x2) == 0 { // if not empty - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return - } - err = en.WriteUint64(z.ReplicaSize) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // write "rc" - err = en.Append(0xa2, 0x72, 0x63) - if err != nil { - return - } - err = en.WriteUint64(z.ReplicaCount) - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *replicationAllStats) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values - zb0001Len := uint32(3) - var zb0001Mask uint8 /* 3 bits */ - _ = zb0001Mask - if z.Targets == nil { - zb0001Len-- - zb0001Mask |= 0x1 - } - if z.ReplicaSize == 0 { - zb0001Len-- - zb0001Mask |= 0x2 - } - if z.ReplicaCount == 0 { - zb0001Len-- - zb0001Mask |= 0x4 - } - // variable map header, size zb0001Len - o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - if (zb0001Mask & 0x1) == 0 { // if not empty - // string "t" - o = append(o, 0xa1, 0x74) - o = msgp.AppendMapHeader(o, uint32(len(z.Targets))) - for za0001, za0002 := range z.Targets { - o = msgp.AppendString(o, za0001) - o, err = za0002.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - } - } - if (zb0001Mask & 0x2) == 0 { // if not empty - // string "r" - o = append(o, 0xa1, 0x72) - o = msgp.AppendUint64(o, z.ReplicaSize) - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // string "rc" - o = append(o, 0xa2, 0x72, 0x63) - o = msgp.AppendUint64(o, z.ReplicaCount) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *replicationAllStats) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "t": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - if z.Targets == nil { - z.Targets = make(map[string]replicationStats, zb0002) - } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 replicationStats - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - bts, err = za0002.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - z.Targets[za0001] = za0002 - } - case "r": - z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - case "rc": - z.ReplicaCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *replicationAllStats) Msgsize() (s int) { - s = 1 + 2 + msgp.MapHeaderSize - if z.Targets != nil { - for za0001, za0002 := range z.Targets { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() - } - } - s += 2 + msgp.Uint64Size + 3 + msgp.Uint64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *replicationAllStatsV1) DecodeMsg(dc *msgp.Reader) (err error) { - var zb0001 uint32 - zb0001, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 3 { - err = msgp.ArrayError{Wanted: 3, Got: zb0001} - return - } - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - if z.Targets == nil { - z.Targets = make(map[string]replicationStats, zb0002) - } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 replicationStats - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - err = za0002.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - z.Targets[za0001] = za0002 - } - z.ReplicaSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - z.ReplicaCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *replicationAllStatsV1) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 3 - err = en.Append(0x93) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Targets))) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - for za0001, za0002 := range z.Targets { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - err = za0002.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - } - err = en.WriteUint64(z.ReplicaSize) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - err = en.WriteUint64(z.ReplicaCount) - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *replicationAllStatsV1) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // array header, size 3 - o = append(o, 0x93) - o = msgp.AppendMapHeader(o, uint32(len(z.Targets))) - for za0001, za0002 := range z.Targets { - o = msgp.AppendString(o, za0001) - o, err = za0002.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - } - o = msgp.AppendUint64(o, z.ReplicaSize) - o = msgp.AppendUint64(o, z.ReplicaCount) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *replicationAllStatsV1) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zb0001 uint32 - zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 3 { - err = msgp.ArrayError{Wanted: 3, Got: zb0001} - return - } - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - if z.Targets == nil { - z.Targets = make(map[string]replicationStats, zb0002) - } else if len(z.Targets) > 0 { - for key := range z.Targets { - delete(z.Targets, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 replicationStats - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Targets") - return - } - bts, err = za0002.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Targets", za0001) - return - } - z.Targets[za0001] = za0002 - } - z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - z.ReplicaCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaCount") - return - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *replicationAllStatsV1) Msgsize() (s int) { - s = 1 + msgp.MapHeaderSize - if z.Targets != nil { - for za0001, za0002 := range z.Targets { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + za0002.Msgsize() - } - } - s += msgp.Uint64Size + msgp.Uint64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *replicationStats) DecodeMsg(dc *msgp.Reader) (err error) { - var zb0001 uint32 - zb0001, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 10 { - err = msgp.ArrayError{Wanted: 10, Got: zb0001} - return - } - z.PendingSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - z.ReplicatedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.FailedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - z.FailedCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - z.PendingCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - z.MissedThresholdSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - z.AfterThresholdSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - z.MissedThresholdCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - z.AfterThresholdCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - z.ReplicatedCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicatedCount") - return - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *replicationStats) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 10 - err = en.Append(0x9a) - if err != nil { - return - } - err = en.WriteUint64(z.PendingSize) - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - err = en.WriteUint64(z.ReplicatedSize) - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - err = en.WriteUint64(z.FailedSize) - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - err = en.WriteUint64(z.FailedCount) - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - err = en.WriteUint64(z.PendingCount) - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - err = en.WriteUint64(z.MissedThresholdSize) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - err = en.WriteUint64(z.AfterThresholdSize) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - err = en.WriteUint64(z.MissedThresholdCount) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - err = en.WriteUint64(z.AfterThresholdCount) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - err = en.WriteUint64(z.ReplicatedCount) - if err != nil { - err = msgp.WrapError(err, "ReplicatedCount") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *replicationStats) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // array header, size 10 - o = append(o, 0x9a) - o = msgp.AppendUint64(o, z.PendingSize) - o = msgp.AppendUint64(o, z.ReplicatedSize) - o = msgp.AppendUint64(o, z.FailedSize) - o = msgp.AppendUint64(o, z.FailedCount) - o = msgp.AppendUint64(o, z.PendingCount) - o = msgp.AppendUint64(o, z.MissedThresholdSize) - o = msgp.AppendUint64(o, z.AfterThresholdSize) - o = msgp.AppendUint64(o, z.MissedThresholdCount) - o = msgp.AppendUint64(o, z.AfterThresholdCount) - o = msgp.AppendUint64(o, z.ReplicatedCount) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *replicationStats) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zb0001 uint32 - zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 10 { - err = msgp.ArrayError{Wanted: 10, Got: zb0001} - return - } - z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - z.MissedThresholdSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - z.AfterThresholdSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - z.MissedThresholdCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - z.AfterThresholdCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - z.ReplicatedCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicatedCount") - return - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *replicationStats) Msgsize() (s int) { - s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size - return -} - -// DecodeMsg implements msgp.Decodable -func (z *replicationStatsV1) DecodeMsg(dc *msgp.Reader) (err error) { - var zb0001 uint32 - zb0001, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 10 { - err = msgp.ArrayError{Wanted: 10, Got: zb0001} - return - } - z.PendingSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - z.ReplicatedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.FailedSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - z.ReplicaSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - z.FailedCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - z.PendingCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - z.MissedThresholdSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - z.AfterThresholdSize, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - z.MissedThresholdCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - z.AfterThresholdCount, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *replicationStatsV1) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 10 - err = en.Append(0x9a) - if err != nil { - return - } - err = en.WriteUint64(z.PendingSize) - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - err = en.WriteUint64(z.ReplicatedSize) - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - err = en.WriteUint64(z.FailedSize) - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - err = en.WriteUint64(z.ReplicaSize) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - err = en.WriteUint64(z.FailedCount) - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - err = en.WriteUint64(z.PendingCount) - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - err = en.WriteUint64(z.MissedThresholdSize) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - err = en.WriteUint64(z.AfterThresholdSize) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - err = en.WriteUint64(z.MissedThresholdCount) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - err = en.WriteUint64(z.AfterThresholdCount) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *replicationStatsV1) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // array header, size 10 - o = append(o, 0x9a) - o = msgp.AppendUint64(o, z.PendingSize) - o = msgp.AppendUint64(o, z.ReplicatedSize) - o = msgp.AppendUint64(o, z.FailedSize) - o = msgp.AppendUint64(o, z.ReplicaSize) - o = msgp.AppendUint64(o, z.FailedCount) - o = msgp.AppendUint64(o, z.PendingCount) - o = msgp.AppendUint64(o, z.MissedThresholdSize) - o = msgp.AppendUint64(o, z.AfterThresholdSize) - o = msgp.AppendUint64(o, z.MissedThresholdCount) - o = msgp.AppendUint64(o, z.AfterThresholdCount) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *replicationStatsV1) UnmarshalMsg(bts []byte) (o []byte, err error) { - var zb0001 uint32 - zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - if zb0001 != 10 { - err = msgp.ArrayError{Wanted: 10, Got: zb0001} - return - } - z.PendingSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PendingSize") - return - } - z.ReplicatedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicatedSize") - return - } - z.FailedSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FailedSize") - return - } - z.ReplicaSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "ReplicaSize") - return - } - z.FailedCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "FailedCount") - return - } - z.PendingCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "PendingCount") - return - } - z.MissedThresholdSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdSize") - return - } - z.AfterThresholdSize, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdSize") - return - } - z.MissedThresholdCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "MissedThresholdCount") - return - } - z.AfterThresholdCount, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "AfterThresholdCount") - return - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *replicationStatsV1) Msgsize() (s int) { - s = 1 + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size + msgp.Uint64Size - return -} - // DecodeMsg implements msgp.Decodable func (z *sizeHistogram) DecodeMsg(dc *msgp.Reader) (err error) { var zb0001 uint32 diff --git a/cmd/data-usage-cache_gen_test.go b/cmd/data-usage-cache_gen_test.go index 9b726e0775e3a..b19ca6bb8b757 100644 --- a/cmd/data-usage-cache_gen_test.go +++ b/cmd/data-usage-cache_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" @@ -519,458 +519,6 @@ func BenchmarkDecodedataUsageEntry(b *testing.B) { } } -func TestMarshalUnmarshalreplicationAllStats(t *testing.T) { - v := replicationAllStats{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgreplicationAllStats(b *testing.B) { - v := replicationAllStats{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgreplicationAllStats(b *testing.B) { - v := replicationAllStats{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalreplicationAllStats(b *testing.B) { - v := replicationAllStats{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodereplicationAllStats(t *testing.T) { - v := replicationAllStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodereplicationAllStats Msgsize() is inaccurate") - } - - vn := replicationAllStats{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodereplicationAllStats(b *testing.B) { - v := replicationAllStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodereplicationAllStats(b *testing.B) { - v := replicationAllStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalreplicationAllStatsV1(t *testing.T) { - v := replicationAllStatsV1{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgreplicationAllStatsV1(b *testing.B) { - v := replicationAllStatsV1{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgreplicationAllStatsV1(b *testing.B) { - v := replicationAllStatsV1{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalreplicationAllStatsV1(b *testing.B) { - v := replicationAllStatsV1{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodereplicationAllStatsV1(t *testing.T) { - v := replicationAllStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodereplicationAllStatsV1 Msgsize() is inaccurate") - } - - vn := replicationAllStatsV1{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodereplicationAllStatsV1(b *testing.B) { - v := replicationAllStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodereplicationAllStatsV1(b *testing.B) { - v := replicationAllStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalreplicationStats(t *testing.T) { - v := replicationStats{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgreplicationStats(b *testing.B) { - v := replicationStats{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgreplicationStats(b *testing.B) { - v := replicationStats{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalreplicationStats(b *testing.B) { - v := replicationStats{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodereplicationStats(t *testing.T) { - v := replicationStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodereplicationStats Msgsize() is inaccurate") - } - - vn := replicationStats{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodereplicationStats(b *testing.B) { - v := replicationStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodereplicationStats(b *testing.B) { - v := replicationStats{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalreplicationStatsV1(t *testing.T) { - v := replicationStatsV1{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgreplicationStatsV1(b *testing.B) { - v := replicationStatsV1{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgreplicationStatsV1(b *testing.B) { - v := replicationStatsV1{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalreplicationStatsV1(b *testing.B) { - v := replicationStatsV1{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodereplicationStatsV1(t *testing.T) { - v := replicationStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodereplicationStatsV1 Msgsize() is inaccurate") - } - - vn := replicationStatsV1{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodereplicationStatsV1(b *testing.B) { - v := replicationStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodereplicationStatsV1(b *testing.B) { - v := replicationStatsV1{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - func TestMarshalUnmarshalsizeHistogram(t *testing.T) { v := sizeHistogram{} bts, err := v.MarshalMsg(nil) diff --git a/cmd/data-usage-utils.go b/cmd/data-usage-utils.go index 6cc21c4c3701c..1f6b3f11e2487 100644 --- a/cmd/data-usage-utils.go +++ b/cmd/data-usage-utils.go @@ -140,7 +140,7 @@ func (dui DataUsageInfo) tierStats() []madmin.TierInfo { return infos } -func (dui DataUsageInfo) tierMetrics() (metrics []Metric) { +func (dui DataUsageInfo) tierMetrics() (metrics []MetricV2) { if dui.TierStats == nil { return nil } @@ -148,17 +148,17 @@ func (dui DataUsageInfo) tierMetrics() (metrics []Metric) { // minio_cluster_ilm_transitioned_objects{tier="S3TIER-1"}=1 // minio_cluster_ilm_transitioned_versions{tier="S3TIER-1"}=3 for tier, st := range dui.TierStats.Tiers { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterTransitionedBytesMD(), Value: float64(st.TotalSize), VariableLabels: map[string]string{"tier": tier}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterTransitionedObjectsMD(), Value: float64(st.NumObjects), VariableLabels: map[string]string{"tier": tier}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterTransitionedVersionsMD(), Value: float64(st.NumVersions), VariableLabels: map[string]string{"tier": tier}, diff --git a/cmd/data-usage.go b/cmd/data-usage.go index 13acb5af93986..51227e106defb 100644 --- a/cmd/data-usage.go +++ b/cmd/data-usage.go @@ -25,7 +25,6 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/minio/minio/internal/cachevalue" - "github.com/minio/minio/internal/logger" ) const ( @@ -49,7 +48,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan json := jsoniter.ConfigCompatibleWithStandardLibrary dataUsageJSON, err := json.Marshal(dataUsageInfo) if err != nil { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) continue } if attempts > 10 { @@ -57,7 +56,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan attempts = 1 } if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil { - logger.LogOnceIf(ctx, err, dataUsageObjNamePath) + scannerLogOnceIf(ctx, err, dataUsageObjNamePath) } attempts++ } @@ -80,12 +79,12 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket prefixUsageCache.InitOnce(30*time.Second, // No need to fail upon Update() error, fallback to old value. cachevalue.Opts{ReturnLastGood: true, NoWait: true}, - func() (map[string]uint64, error) { + func(ctx context.Context) (map[string]uint64, error) { m := make(map[string]uint64) for _, pool := range z.serverPools { for _, er := range pool.sets { // Load bucket usage prefixes - ctx, done := context.WithTimeout(context.Background(), 2*time.Second) + ctx, done := context.WithTimeout(ctx, 2*time.Second) ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil done() if ok { @@ -108,7 +107,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket }, ) - return prefixUsageCache.Get() + return prefixUsageCache.GetWithCtx(ctx) } func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) { diff --git a/cmd/data-usage_test.go b/cmd/data-usage_test.go index b8b5a9a7ce55a..51a46e65d7a88 100644 --- a/cmd/data-usage_test.go +++ b/cmd/data-usage_test.go @@ -26,6 +26,9 @@ import ( "path" "path/filepath" "testing" + "time" + + "github.com/minio/minio/internal/cachevalue" ) type usageTestFile struct { @@ -53,18 +56,21 @@ func TestDataUsageUpdate(t *testing.T) { var s os.FileInfo s, err = os.Stat(item.Path) if err != nil { - return + return sizeS, err } sizeS.totalSize = s.Size() sizeS.versions++ return sizeS, nil } - return + return sizeS, err } - + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) weSleep := func() bool { return false } - got, err := scanDataFolder(context.Background(), nil, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) + got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } @@ -173,8 +179,8 @@ func TestDataUsageUpdate(t *testing.T) { t.Fatal(err) } // Changed dir must be picked up in this many cycles. - for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), nil, base, got, getSize, 0, weSleep) + for range dataUsageUpdateDirCycles { + got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -273,18 +279,22 @@ func TestDataUsageUpdatePrefix(t *testing.T) { var s os.FileInfo s, err = os.Stat(item.Path) if err != nil { - return + return sizeS, err } sizeS.totalSize = s.Size() sizeS.versions++ - return + return sizeS, err } - return + return sizeS, err } weSleep := func() bool { return false } + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) - got, err := scanDataFolder(context.Background(), nil, base, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep) + got, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: "bucket"}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } @@ -364,6 +374,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) { } if e == nil { t.Fatal("got nil result") + return } if e.Size != int64(w.size) { t.Error("got size", e.Size, "want", w.size) @@ -417,8 +428,8 @@ func TestDataUsageUpdatePrefix(t *testing.T) { t.Fatal(err) } // Changed dir must be picked up in this many cycles. - for i := 0; i < dataUsageUpdateDirCycles; i++ { - got, err = scanDataFolder(context.Background(), nil, base, got, getSize, 0, weSleep) + for range dataUsageUpdateDirCycles { + got, err = scanDataFolder(t.Context(), nil, &xls, got, getSize, 0, weSleep) got.Info.NextCycle++ if err != nil { t.Fatal(err) @@ -515,13 +526,13 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi // generateUsageTestFiles create nFolders * nFiles files of size bytes each. func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles, size int) { pl := make([]byte, size) - for i := 0; i < nFolders; i++ { + for i := range nFolders { name := filepath.Join(base, bucket, fmt.Sprint(i), "0.txt") err := os.MkdirAll(filepath.Dir(name), os.ModePerm) if err != nil { t.Fatal(err) } - for j := 0; j < nFiles; j++ { + for j := range nFiles { name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt") err = os.WriteFile(name, pl, os.ModePerm) if err != nil { @@ -558,31 +569,24 @@ func TestDataUsageCacheSerialize(t *testing.T) { var s os.FileInfo s, err = os.Stat(item.Path) if err != nil { - return + return sizeS, err } sizeS.versions++ sizeS.totalSize = s.Size() - return + return sizeS, err } - return + return sizeS, err } + xls := xlStorage{drivePath: base, diskInfoCache: cachevalue.New[DiskInfo]()} + xls.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, func(ctx context.Context) (DiskInfo, error) { + return DiskInfo{Total: 1 << 40, Free: 1 << 40}, nil + }) weSleep := func() bool { return false } - want, err := scanDataFolder(context.Background(), nil, base, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) + want, err := scanDataFolder(t.Context(), nil, &xls, dataUsageCache{Info: dataUsageCacheInfo{Name: bucket}}, getSize, 0, weSleep) if err != nil { t.Fatal(err) } e := want.find("abucket/dir2") - e.ReplicationStats = &replicationAllStats{ - Targets: map[string]replicationStats{ - "arn": { - PendingSize: 1, - ReplicatedSize: 2, - FailedSize: 3, - FailedCount: 5, - PendingCount: 6, - }, - }, - } want.replace("abucket/dir2", "", *e) var buf bytes.Buffer err = want.serializeTo(&buf) @@ -614,7 +618,7 @@ func TestDataUsageCacheSerialize(t *testing.T) { } // equalAsJSON returns whether the values are equal when encoded as JSON. -func equalAsJSON(a, b interface{}) bool { +func equalAsJSON(a, b any) bool { aj, err := json.Marshal(a) if err != nil { panic(err) diff --git a/cmd/dummy-data-generator_test.go b/cmd/dummy-data-generator_test.go index b6e36f2c298f0..07400b30ab8b9 100644 --- a/cmd/dummy-data-generator_test.go +++ b/cmd/dummy-data-generator_test.go @@ -61,10 +61,9 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker { } skipOffset %= int64(len(alphabets)) - as := make([]byte, 2*len(alphabets)) - copy(as, alphabets) - copy(as[len(alphabets):], alphabets) - b := as[skipOffset : skipOffset+int64(len(alphabets))] + const multiply = 100 + as := bytes.Repeat(alphabets, multiply) + b := as[skipOffset : skipOffset+int64(len(alphabets)*(multiply-1))] return &DummyDataGen{ length: totalLength, b: b, @@ -88,7 +87,7 @@ func (d *DummyDataGen) Read(b []byte) (n int, err error) { } err = io.EOF } - return + return n, err } func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) { @@ -166,7 +165,7 @@ func TestCmpReaders(t *testing.T) { r1 := bytes.NewReader([]byte("abc")) r2 := bytes.NewReader([]byte("abc")) ok, msg := cmpReaders(r1, r2) - if !(ok && msg == "") { + if !ok || msg != "" { t.Fatalf("unexpected") } } diff --git a/cmd/dummy-handlers.go b/cmd/dummy-handlers.go index f92c667439b91..685b792564a55 100644 --- a/cmd/dummy-handlers.go +++ b/cmd/dummy-handlers.go @@ -22,7 +22,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // Data types used for returning dummy tagging XML. @@ -44,7 +44,7 @@ func (api objectAPIHandlers) GetBucketWebsiteHandler(w http.ResponseWriter, r *h return } - // Allow getBucketCors if policy action is set, since this is a dummy call + // Allow GetBucketWebsite if policy action is set, since this is a dummy call // we are simply re-purposing the bucketPolicyAction. if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) @@ -76,7 +76,7 @@ func (api objectAPIHandlers) GetBucketAccelerateHandler(w http.ResponseWriter, r return } - // Allow getBucketCors if policy action is set, since this is a dummy call + // Allow GetBucketAccelerate if policy action is set, since this is a dummy call // we are simply re-purposing the bucketPolicyAction. if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) @@ -109,7 +109,7 @@ func (api objectAPIHandlers) GetBucketRequestPaymentHandler(w http.ResponseWrite return } - // Allow getBucketCors if policy action is set, since this is a dummy call + // Allow GetBucketRequestPaymentHandler if policy action is set, since this is a dummy call // we are simply re-purposing the bucketPolicyAction. if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) @@ -143,7 +143,7 @@ func (api objectAPIHandlers) GetBucketLoggingHandler(w http.ResponseWriter, r *h return } - // Allow getBucketCors if policy action is set, since this is a dummy call + // Allow GetBucketLoggingHandler if policy action is set, since this is a dummy call // we are simply re-purposing the bucketPolicyAction. if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) @@ -181,9 +181,7 @@ func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http return } - // Allow getBucketCors if policy action is set, since this is a dummy call - // we are simply re-purposing the bucketPolicyAction. - if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone { + if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketCorsAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return } @@ -197,3 +195,63 @@ func (api objectAPIHandlers) GetBucketCorsHandler(w http.ResponseWriter, r *http writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchCORSConfiguration), r.URL) } + +// PutBucketCorsHandler - PUT bucket cors, a dummy api +func (api objectAPIHandlers) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "PutBucketCors") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objAPI := api.ObjectAPI() + if objAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketCorsAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) + return + } + + // Validate if bucket exists, before proceeding further... + _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) +} + +// DeleteBucketCorsHandler - DELETE bucket cors, a dummy api +func (api objectAPIHandlers) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "DeleteBucketCors") + + defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + + vars := mux.Vars(r) + bucket := vars["bucket"] + + objAPI := api.ObjectAPI() + if objAPI == nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) + return + } + + if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketCorsAction, bucket, ""); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) + return + } + + // Validate if bucket exists, before proceeding further... + _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) +} diff --git a/cmd/dynamic-timeouts.go b/cmd/dynamic-timeouts.go index 23d3b1266c490..9c4f297bbe3a6 100644 --- a/cmd/dynamic-timeouts.go +++ b/cmd/dynamic-timeouts.go @@ -98,7 +98,6 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) { // We leak entries while we copy if entries == dynamicTimeoutLogSize { - // Make copy on stack in order to call adjust() logCopy := [dynamicTimeoutLogSize]time.Duration{} copy(logCopy[:], dt.log[:]) @@ -117,12 +116,12 @@ func (dt *dynamicTimeout) logEntry(duration time.Duration) { // adjust changes the value of the dynamic timeout based on the // previous results func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { - failures, max := 0, time.Duration(0) + failures, maxDur := 0, time.Duration(0) for _, dur := range entries[:] { if dur == maxDuration { failures++ - } else if dur > max { - max = dur + } else if dur > maxDur { + maxDur = dur } } @@ -130,12 +129,9 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { if failPct > dynamicTimeoutIncreaseThresholdPct { // We are hitting the timeout too often, so increase the timeout by 25% - timeout := atomic.LoadInt64(&dt.timeout) * 125 / 100 - - // Set upper cap. - if timeout > int64(maxDynamicTimeout) { - timeout = int64(maxDynamicTimeout) - } + timeout := min( + // Set upper cap. + atomic.LoadInt64(&dt.timeout)*125/100, int64(maxDynamicTimeout)) // Safety, shouldn't happen if timeout < dt.minimum { timeout = dt.minimum @@ -144,12 +140,12 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) { } else if failPct < dynamicTimeoutDecreaseThresholdPct { // We are hitting the timeout relatively few times, // so decrease the timeout towards 25 % of maximum time spent. - max = max * 125 / 100 + maxDur = maxDur * 125 / 100 timeout := atomic.LoadInt64(&dt.timeout) - if max < time.Duration(timeout) { + if maxDur < time.Duration(timeout) { // Move 50% toward the max. - timeout = (int64(max) + timeout) / 2 + timeout = (int64(maxDur) + timeout) / 2 } if timeout < dt.minimum { timeout = dt.minimum diff --git a/cmd/dynamic-timeouts_test.go b/cmd/dynamic-timeouts_test.go index 42f66c998d139..b353b983bc94c 100644 --- a/cmd/dynamic-timeouts_test.go +++ b/cmd/dynamic-timeouts_test.go @@ -30,7 +30,7 @@ func TestDynamicTimeoutSingleIncrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } @@ -46,13 +46,13 @@ func TestDynamicTimeoutDualIncrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } adjusted := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogFailure() } @@ -68,7 +68,7 @@ func TestDynamicTimeoutSingleDecrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } @@ -84,13 +84,13 @@ func TestDynamicTimeoutDualDecrease(t *testing.T) { initial := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } adjusted := timeout.Timeout() - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range dynamicTimeoutLogSize { timeout.LogSuccess(20 * time.Second) } @@ -107,8 +107,8 @@ func TestDynamicTimeoutManyDecreases(t *testing.T) { initial := timeout.Timeout() const successTimeout = 20 * time.Second - for l := 0; l < 100; l++ { - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range 100 { + for range dynamicTimeoutLogSize { timeout.LogSuccess(successTimeout) } } @@ -129,8 +129,8 @@ func TestDynamicTimeoutConcurrent(t *testing.T) { rng := rand.New(rand.NewSource(int64(i))) go func() { defer wg.Done() - for i := 0; i < 100; i++ { - for j := 0; j < 100; j++ { + for range 100 { + for range 100 { timeout.LogSuccess(time.Duration(float64(time.Second) * rng.Float64())) } to := timeout.Timeout() @@ -150,8 +150,8 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) { initial := timeout.Timeout() const successTimeout = 20 * time.Second - for l := 0; l < 100; l++ { - for i := 0; i < dynamicTimeoutLogSize; i++ { + for range 100 { + for range dynamicTimeoutLogSize { timeout.LogSuccess(successTimeout) } } @@ -166,14 +166,9 @@ func TestDynamicTimeoutHitMinimum(t *testing.T) { func testDynamicTimeoutAdjust(t *testing.T, timeout *dynamicTimeout, f func() float64) { const successTimeout = 20 * time.Second - for i := 0; i < dynamicTimeoutLogSize; i++ { - + for range dynamicTimeoutLogSize { rnd := f() - duration := time.Duration(float64(successTimeout) * rnd) - - if duration < 100*time.Millisecond { - duration = 100 * time.Millisecond - } + duration := max(time.Duration(float64(successTimeout)*rnd), 100*time.Millisecond) if duration >= time.Minute { timeout.LogFailure() } else { @@ -189,7 +184,7 @@ func TestDynamicTimeoutAdjustExponential(t *testing.T) { initial := timeout.Timeout() - for try := 0; try < 10; try++ { + for range 10 { testDynamicTimeoutAdjust(t, timeout, rand.ExpFloat64) } @@ -206,7 +201,7 @@ func TestDynamicTimeoutAdjustNormalized(t *testing.T) { initial := timeout.Timeout() - for try := 0; try < 10; try++ { + for range 10 { testDynamicTimeoutAdjust(t, timeout, func() float64 { return 1.0 + rand.NormFloat64() }) diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index c112724c8f1fc..c3da051a8f974 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -29,6 +29,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "path" "strconv" @@ -37,7 +38,6 @@ import ( "github.com/minio/kms-go/kes" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/etag" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" @@ -109,8 +109,8 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string { // be AWS S3 compliant. // // DecryptETags uses a KMS bulk decryption API, if available, which -// is more efficient than decrypting ETags sequentually. -func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error { +// is more efficient than decrypting ETags sequentially. +func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error { const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default. var ( metadata = make([]map[string]string, 0, BatchSize) @@ -118,10 +118,7 @@ func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error { names = make([]string, 0, BatchSize) ) for len(objects) > 0 { - N := BatchSize - if len(objects) < BatchSize { - N = len(objects) - } + N := min(len(objects), BatchSize) batch := objects[:N] // We have to decrypt only ETags of SSE-S3 single-part @@ -134,11 +131,16 @@ func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error { SSES3SinglePartObjects := make(map[int]bool) for i, object := range batch { if kind, ok := crypto.IsEncrypted(object.UserDefined); ok && kind == crypto.S3 && !crypto.IsMultiPart(object.UserDefined) { - SSES3SinglePartObjects[i] = true - - metadata = append(metadata, object.UserDefined) - buckets = append(buckets, object.Bucket) - names = append(names, object.Name) + ETag, err := etag.Parse(object.ETag) + if err != nil { + continue + } + if ETag.IsEncrypted() { + SSES3SinglePartObjects[i] = true + metadata = append(metadata, object.UserDefined) + buckets = append(buckets, object.Bucket) + names = append(names, object.Name) + } } } @@ -190,7 +192,7 @@ func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error { if err != nil { return err } - if SSES3SinglePartObjects[i] && ETag.IsEncrypted() { + if SSES3SinglePartObjects[i] { ETag, err = etag.Decrypt(keys[0][:], ETag) if err != nil { return err @@ -267,7 +269,11 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt if err != nil { return err } - oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)}) + oldKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{ + Name: keyID, + Ciphertext: kmsKey, + AssociatedData: kms.Context{bucket: path.Join(bucket, object)}, + }) if err != nil { return err } @@ -276,7 +282,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt return err } - newKey, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)}) + newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + Name: GlobalKMS.DefaultKey, + AssociatedData: kms.Context{bucket: path.Join(bucket, object)}, + }) if err != nil { return err } @@ -306,13 +315,14 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt // of the client provided context and add the bucket // key, if not present. kmsCtx := kms.Context{} - for k, v := range cryptoCtx { - kmsCtx[k] = v - } + maps.Copy(kmsCtx, cryptoCtx) if _, ok := kmsCtx[bucket]; !ok { kmsCtx[bucket] = path.Join(bucket, object) } - newKey, err := GlobalKMS.GenerateKey(ctx, newKeyID, kmsCtx) + newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + Name: newKeyID, + AssociatedData: kmsCtx, + }) if err != nil { return err } @@ -332,8 +342,8 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt return errInvalidSSEParameters // AWS returns special error for equal but invalid keys. } return crypto.ErrInvalidCustomerKey // To provide strict AWS S3 compatibility we return: access denied. - } + if subtle.ConstantTimeCompare(oldKey, newKey) == 1 && sealedKey.Algorithm == crypto.SealAlgorithm { return nil // don't rotate on equal keys if seal algorithm is latest } @@ -352,7 +362,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key if GlobalKMS == nil { return crypto.ObjectKey{}, errKMSNotConfigured } - key, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)}) + key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + AssociatedData: kms.Context{bucket: path.Join(bucket, object)}, + }) if err != nil { return crypto.ObjectKey{}, err } @@ -373,13 +385,14 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key // of the client provided context and add the bucket // key, if not present. kmsCtx := kms.Context{} - for k, v := range cryptoCtx { - kmsCtx[k] = v - } + maps.Copy(kmsCtx, cryptoCtx) if _, ok := kmsCtx[bucket]; !ok { kmsCtx[bucket] = path.Join(bucket, object) } - key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsCtx) + key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{ + Name: keyID, + AssociatedData: kmsCtx, + }) if err != nil { if errors.Is(err, kes.ErrKeyNotFound) { return crypto.ObjectKey{}, errKMSKeyNotFound @@ -407,7 +420,7 @@ func newEncryptReader(ctx context.Context, content io.Reader, kind crypto.Type, return nil, crypto.ObjectKey{}, err } - reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20, CipherSuites: fips.DARECiphers()}) + reader, err := sio.EncryptReader(content, sio.Config{Key: objectEncryptionKey[:], MinVersion: sio.Version20}) if err != nil { return nil, crypto.ObjectKey{}, crypto.ErrInvalidCustomerKey } @@ -437,7 +450,7 @@ func setEncryptionMetadata(r *http.Request, bucket, object string, metadata map[ } } _, err = newEncryptMetadata(r.Context(), kind, keyID, key, bucket, object, metadata, kmsCtx) - return + return err } // EncryptRequest takes the client provided content and encrypts the data @@ -475,11 +488,10 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m func decryptObjectMeta(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) { switch kind, _ := crypto.IsEncrypted(metadata); kind { case crypto.S3: - KMS := GlobalKMS - if KMS == nil { + if GlobalKMS == nil { return nil, errKMSNotConfigured } - objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object) + objectKey, err := crypto.S3.UnsealObjectKey(GlobalKMS, metadata, bucket, object) if err != nil { return nil, err } @@ -551,7 +563,6 @@ func newDecryptReaderWithObjectKey(client io.Reader, objectEncryptionKey []byte, reader, err := sio.DecryptReader(client, sio.Config{ Key: objectEncryptionKey, SequenceNumber: seqNumber, - CipherSuites: fips.DARECiphers(), }) if err != nil { return nil, crypto.ErrInvalidCustomerKey @@ -582,28 +593,44 @@ func DecryptBlocksRequestR(inputReader io.Reader, h http.Header, seqNumber uint3 partEncRelOffset := int64(seqNumber) * (SSEDAREPackageBlockSize + SSEDAREPackageMetaSize) w := &DecryptBlocksReader{ - reader: inputReader, - startSeqNum: seqNumber, - partDecRelOffset: partDecRelOffset, - partEncRelOffset: partEncRelOffset, - parts: oi.Parts, - partIndex: partStart, - header: h, - bucket: bucket, - object: object, - customerKeyHeader: h.Get(xhttp.AmzServerSideEncryptionCustomerKey), - copySource: copySource, - metadata: cloneMSS(oi.UserDefined), + reader: inputReader, + startSeqNum: seqNumber, + partDecRelOffset: partDecRelOffset, + partEncRelOffset: partEncRelOffset, + parts: oi.Parts, + partIndex: partStart, + } + + // In case of SSE-C, we have to decrypt the OEK using the client-provided key. + // In case of a SSE-C server-side copy, the client might provide two keys, + // one for the source and one for the target. This reader is the source. + var ssecClientKey []byte + if crypto.SSEC.IsEncrypted(oi.UserDefined) { + if copySource && crypto.SSECopy.IsRequested(h) { + key, err := crypto.SSECopy.ParseHTTP(h) + if err != nil { + return nil, err + } + ssecClientKey = key[:] + } else { + key, err := crypto.SSEC.ParseHTTP(h) + if err != nil { + return nil, err + } + ssecClientKey = key[:] + } } - if w.copySource { - w.customerKeyHeader = h.Get(xhttp.AmzServerSideEncryptionCopyCustomerKey) + // Decrypt the OEK once and reuse it for all subsequent parts. + objectEncryptionKey, err := decryptObjectMeta(ssecClientKey, bucket, object, oi.UserDefined) + if err != nil { + return nil, err } + w.objectEncryptionKey = objectEncryptionKey if err := w.buildDecrypter(w.parts[w.partIndex].Number); err != nil { return nil, err } - return w, nil } @@ -619,48 +646,17 @@ type DecryptBlocksReader struct { // Current part index partIndex int // Parts information - parts []ObjectPartInfo - header http.Header - bucket, object string - metadata map[string]string + parts []ObjectPartInfo + objectEncryptionKey []byte partDecRelOffset, partEncRelOffset int64 - - copySource bool - // Customer Key - customerKeyHeader string } func (d *DecryptBlocksReader) buildDecrypter(partID int) error { - m := cloneMSS(d.metadata) - // Initialize the first decrypter; new decrypters will be - // initialized in Read() operation as needed. - var key []byte - var err error - if d.copySource { - if crypto.SSEC.IsEncrypted(d.metadata) { - d.header.Set(xhttp.AmzServerSideEncryptionCopyCustomerKey, d.customerKeyHeader) - key, err = ParseSSECopyCustomerRequest(d.header, d.metadata) - } - } else { - if crypto.SSEC.IsEncrypted(d.metadata) { - d.header.Set(xhttp.AmzServerSideEncryptionCustomerKey, d.customerKeyHeader) - key, err = ParseSSECustomerHeader(d.header) - } - } - if err != nil { - return err - } - - objectEncryptionKey, err := decryptObjectMeta(key, d.bucket, d.object, m) - if err != nil { - return err - } - var partIDbin [4]byte binary.LittleEndian.PutUint32(partIDbin[:], uint32(partID)) // marshal part ID - mac := hmac.New(sha256.New, objectEncryptionKey) // derive part encryption key from part ID and object key + mac := hmac.New(sha256.New, d.objectEncryptionKey) // derive part encryption key from part ID and object key mac.Write(partIDbin[:]) partEncryptionKey := mac.Sum(nil) @@ -724,8 +720,9 @@ func (d *DecryptBlocksReader) Read(p []byte) (int, error) { // but has an invalid size. func (o ObjectInfo) DecryptedSize() (int64, error) { if _, ok := crypto.IsEncrypted(o.UserDefined); !ok { - return 0, errors.New("Cannot compute decrypted size of an unencrypted object") + return -1, errors.New("Cannot compute decrypted size of an unencrypted object") } + if !o.isMultipart() { size, err := sio.DecryptedSize(uint64(o.Size)) if err != nil { @@ -738,7 +735,7 @@ func (o ObjectInfo) DecryptedSize() (int64, error) { for _, part := range o.Parts { partSize, err := sio.DecryptedSize(uint64(part.Size)) if err != nil { - return 0, errObjectTampered + return -1, errObjectTampered } size += int64(partSize) } @@ -858,7 +855,7 @@ func tryDecryptETag(key []byte, encryptedETag string, sses3 bool) string { func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, skipLen int64, seqNumber uint32, partStart int, err error) { if _, ok := crypto.IsEncrypted(o.UserDefined); !ok { err = errors.New("Object is not encrypted") - return + return encOff, encLength, skipLen, seqNumber, partStart, err } if rs == nil { @@ -876,7 +873,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk partSize, err = sio.DecryptedSize(uint64(part.Size)) if err != nil { err = errObjectTampered - return + return encOff, encLength, skipLen, seqNumber, partStart, err } sizes[i] = int64(partSize) decObjSize += int64(partSize) @@ -886,7 +883,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk partSize, err = sio.DecryptedSize(uint64(o.Size)) if err != nil { err = errObjectTampered - return + return encOff, encLength, skipLen, seqNumber, partStart, err } sizes = []int64{int64(partSize)} decObjSize = sizes[0] @@ -895,7 +892,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk var off, length int64 off, length, err = rs.GetOffsetLength(decObjSize) if err != nil { - return + return encOff, encLength, skipLen, seqNumber, partStart, err } // At this point, we have: @@ -1010,8 +1007,10 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e if encrypted { if crypto.SSEC.IsEncrypted(info.UserDefined) { - if !(crypto.SSEC.IsRequested(headers) || crypto.SSECopy.IsRequested(headers)) { - return encrypted, errEncryptedObject + if !crypto.SSEC.IsRequested(headers) && !crypto.SSECopy.IsRequested(headers) { + if r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" { + return encrypted, errEncryptedObject + } } } @@ -1055,7 +1054,7 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn { var buffer bytes.Buffer mac := hmac.New(sha256.New, key[:]) mac.Write([]byte(baseKey)) - if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil { + if _, err := sio.Encrypt(&buffer, bytes.NewReader(data), sio.Config{Key: mac.Sum(nil)}); err != nil { logger.CriticalIf(context.Background(), errors.New("unable to encrypt using object key")) } return buffer.Bytes() @@ -1063,33 +1062,47 @@ func metadataEncrypter(key crypto.ObjectKey) objectMetaEncryptFn { } // metadataDecrypter reverses metadataEncrypter. -func (o *ObjectInfo) metadataDecrypter() objectMetaDecryptFn { +func (o *ObjectInfo) metadataDecrypter(h http.Header) objectMetaDecryptFn { return func(baseKey string, input []byte) ([]byte, error) { if len(input) == 0 { return input, nil } - - key, err := decryptObjectMeta(nil, o.Bucket, o.Name, o.UserDefined) + var key []byte + if crypto.SSECopy.IsRequested(h) { + sseCopyKey, err := crypto.SSECopy.ParseHTTP(h) + if err != nil { + return nil, err + } + key = sseCopyKey[:] + } else { + if k, err := crypto.SSEC.ParseHTTP(h); err == nil { + key = k[:] + } + } + key, err := decryptObjectMeta(key, o.Bucket, o.Name, o.UserDefined) if err != nil { return nil, err } mac := hmac.New(sha256.New, key) mac.Write([]byte(baseKey)) - return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}) + return sio.DecryptBuffer(nil, input, sio.Config{Key: mac.Sum(nil)}) } } -// decryptChecksums will attempt to decode checksums and return it/them if set. +// decryptPartsChecksums will attempt to decrypt and decode part checksums, and save +// only the decrypted part checksum values on ObjectInfo directly. // if part > 0, and we have the checksum for the part that will be returned. -func (o *ObjectInfo) decryptPartsChecksums() { +func (o *ObjectInfo) decryptPartsChecksums(h http.Header) { data := o.Checksum if len(data) == 0 { return } if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { - decrypted, err := o.metadataDecrypter()("object-checksum", data) + decrypted, err := o.metadataDecrypter(h)("object-checksum", data) if err != nil { - logger.LogIf(GlobalContext, err) + if !errors.Is(err, crypto.ErrSecretKeyMismatch) { + encLogIf(GlobalContext, err) + } return } data = decrypted @@ -1100,7 +1113,23 @@ func (o *ObjectInfo) decryptPartsChecksums() { o.Parts[i].Checksums = cs[i] } } - return +} + +// decryptChecksum will attempt to decrypt the ObjectInfo.Checksum, returns the decrypted value +// An error is only returned if it was encrypted and the decryption failed. +func (o *ObjectInfo) decryptChecksum(h http.Header) ([]byte, error) { + data := o.Checksum + if len(data) == 0 { + return data, nil + } + if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { + decrypted, err := o.metadataDecrypter(h)("object-checksum", data) + if err != nil { + return nil, err + } + data = decrypted + } + return data, nil } // metadataEncryptFn provides an encryption function for metadata. @@ -1143,16 +1172,27 @@ func (o *ObjectInfo) metadataEncryptFn(headers http.Header) (objectMetaEncryptFn // decryptChecksums will attempt to decode checksums and return it/them if set. // if part > 0, and we have the checksum for the part that will be returned. -func (o *ObjectInfo) decryptChecksums(part int) map[string]string { +// Returns whether the checksum (main part 0) is a multipart checksum. +func (o *ObjectInfo) decryptChecksums(part int, h http.Header) (cs map[string]string, isMP bool) { data := o.Checksum if len(data) == 0 { - return nil + return nil, false + } + if part > 0 && !crypto.SSEC.IsEncrypted(o.UserDefined) { + // already decrypted in ToObjectInfo for multipart objects + for _, pi := range o.Parts { + if pi.Number == part { + return pi.Checksums, true + } + } } if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted { - decrypted, err := o.metadataDecrypter()("object-checksum", data) + decrypted, err := o.metadataDecrypter(h)("object-checksum", data) if err != nil { - logger.LogIf(GlobalContext, err) - return nil + if err != crypto.ErrSecretKeyMismatch { + encLogIf(GlobalContext, err) + } + return nil, part > 0 } data = decrypted } diff --git a/cmd/encryption-v1_test.go b/cmd/encryption-v1_test.go index 7001f4c0ebc66..95f78ef899fa1 100644 --- a/cmd/encryption-v1_test.go +++ b/cmd/encryption-v1_test.go @@ -362,7 +362,6 @@ func TestGetDecryptedRange(t *testing.T) { t.Errorf("Case %d: test failed: %d %d %d %d %d", i, o, l, skip, sn, ps) } } - } // Multipart object tests @@ -385,7 +384,7 @@ func TestGetDecryptedRange(t *testing.T) { // Simple useful utilities repeat = func(k int64, n int) []int64 { a := []int64{} - for i := 0; i < n; i++ { + for range n { a = append(a, k) } return a @@ -472,10 +471,7 @@ func TestGetDecryptedRange(t *testing.T) { // round up the lbPartOffset // to the end of the // corresponding DARE package - lbPkgEndOffset := lbPartOffset - (lbPartOffset % pkgSz) + pkgSz - if lbPkgEndOffset > v { - lbPkgEndOffset = v - } + lbPkgEndOffset := min(lbPartOffset-(lbPartOffset%pkgSz)+pkgSz, v) bytesToDrop := v - lbPkgEndOffset // Last segment to update `l` @@ -487,7 +483,7 @@ func TestGetDecryptedRange(t *testing.T) { cumulativeSum += v cumulativeEncSum += getEncSize(v) } - return + return o, l, skip, sn, ps } for i, test := range testMPs { @@ -538,7 +534,6 @@ func TestGetDecryptedRange(t *testing.T) { i, o, l, skip, sn, ps, oRef, lRef, skipRef, snRef, psRef) } } - } } diff --git a/cmd/endpoint-ellipses.go b/cmd/endpoint-ellipses.go index 3b812085bdc38..f1924945765a5 100644 --- a/cmd/endpoint-ellipses.go +++ b/cmd/endpoint-ellipses.go @@ -22,15 +22,14 @@ import ( "fmt" "net/url" "runtime" - "sort" - "strconv" + "slices" "strings" "github.com/cespare/xxhash/v2" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/ellipses" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/ellipses" + "github.com/minio/pkg/v3/env" ) // This file implements and supports ellipses pattern for @@ -123,9 +122,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar // eyes that we prefer a sorted setCount slice for the // subsequent function to figure out the right common // divisor, it avoids loops. - sort.Slice(setCounts, func(i, j int) bool { - return setCounts[i] < setCounts[j] - }) + slices.Sort(setCounts) return setCounts } @@ -134,7 +131,7 @@ func possibleSetCountsWithSymmetry(setCounts []uint64, argPatterns []ellipses.Ar // on each index, this function also determines the final set size // The final set size has the affinity towards choosing smaller // indexes (total sets) -func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) { +func getSetIndexes(args []string, totalSizes []uint64, setDriveCount uint64, argPatterns []ellipses.ArgPattern) (setIndexes [][]uint64, err error) { if len(totalSizes) == 0 || len(args) == 0 { return nil, errInvalidArgument } @@ -142,7 +139,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6 setIndexes = make([][]uint64, len(totalSizes)) for _, totalSize := range totalSizes { // Check if totalSize has minimum range upto setSize - if totalSize < setSizes[0] || totalSize < customSetDriveCount { + if totalSize < setSizes[0] || totalSize < setDriveCount { msg := fmt.Sprintf("Incorrect number of endpoints provided %s", args) return nil, config.ErrInvalidNumberOfErasureEndpoints(nil).Msg(msg) } @@ -167,11 +164,11 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6 var setSize uint64 // Custom set drive count allows to override automatic distribution. // only meant if you want to further optimize drive distribution. - if customSetDriveCount > 0 { + if setDriveCount > 0 { msg := fmt.Sprintf("Invalid set drive count. Acceptable values for %d number drives are %d", commonSize, setCounts) var found bool for _, ss := range setCounts { - if ss == customSetDriveCount { + if ss == setDriveCount { found = true } } @@ -180,8 +177,7 @@ func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint6 } // No automatic symmetry calculation expected, user is on their own - setSize = customSetDriveCount - globalCustomErasureDriveCount = true + setSize = setDriveCount } else { // Returns possible set counts with symmetry. setCounts = possibleSetCountsWithSymmetry(setCounts, argPatterns) @@ -256,7 +252,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 { // Parses all arguments and returns an endpointSet which is a collection // of endpoints following the ellipses pattern, this is what is used // by the object layer for initializing itself. -func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) { +func parseEndpointSet(setDriveCount uint64, args ...string) (ep endpointSet, err error) { argPatterns := make([]ellipses.ArgPattern, len(args)) for i, arg := range args { patterns, perr := ellipses.FindEllipsesPatterns(arg) @@ -266,7 +262,7 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe argPatterns[i] = patterns } - ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount, argPatterns) + ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), setDriveCount, argPatterns) if err != nil { return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error()) } @@ -281,23 +277,14 @@ func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSe // specific set size. // For example: {1...64} is divided into 4 sets each of size 16. // This applies to even distributed setup syntax as well. -func GetAllSets(args ...string) ([][]string, error) { - var customSetDriveCount uint64 - if v := env.Get(EnvErasureSetDriveCount, ""); v != "" { - driveCount, err := strconv.Atoi(v) - if err != nil { - return nil, config.ErrInvalidErasureSetSize(err) - } - customSetDriveCount = uint64(driveCount) - } - +func GetAllSets(setDriveCount uint64, args ...string) ([][]string, error) { var setArgs [][]string if !ellipses.HasEllipses(args...) { var setIndexes [][]uint64 // Check if we have more one args. if len(args) > 1 { var err error - setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount, nil) + setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, setDriveCount, nil) if err != nil { return nil, err } @@ -311,7 +298,7 @@ func GetAllSets(args ...string) ([][]string, error) { } setArgs = s.Get() } else { - s, err := parseEndpointSet(customSetDriveCount, args...) + s, err := parseEndpointSet(setDriveCount, args...) if err != nil { return nil, err } @@ -322,7 +309,7 @@ func GetAllSets(args ...string) ([][]string, error) { for _, sargs := range setArgs { for _, arg := range sargs { if uniqueArgs.Contains(arg) { - return nil, config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("Input args (%s) has duplicate ellipses", args)) + return nil, config.ErrInvalidErasureEndpoints(nil).Msgf("Input args (%s) has duplicate ellipses", args) } uniqueArgs.Add(arg) } @@ -336,8 +323,6 @@ const ( EnvErasureSetDriveCount = "MINIO_ERASURE_SET_DRIVE_COUNT" ) -var globalCustomErasureDriveCount = false - type node struct { nodeName string disks []string @@ -366,8 +351,13 @@ func (el *endpointsList) add(arg string) error { return nil } +type poolArgs struct { + args []string + setDriveCount uint64 +} + // buildDisksLayoutFromConfFile supports with and without ellipses transparently. -func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err error) { +func buildDisksLayoutFromConfFile(pools []poolArgs) (layout disksLayout, err error) { if len(pools) == 0 { return layout, errInvalidArgument } @@ -375,7 +365,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err for _, list := range pools { var endpointsList endpointsList - for _, arg := range list { + for _, arg := range list.args { switch { case ellipses.HasList(arg): patterns, err := ellipses.FindListPatterns(arg) @@ -436,7 +426,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err } } - setArgs, err := GetAllSets(eps...) + setArgs, err := GetAllSets(list.setDriveCount, eps...) if err != nil { return layout, err } @@ -453,7 +443,7 @@ func buildDisksLayoutFromConfFile(pools [][]string) (layout disksLayout, err err layout: setArgs, }) } - return + return layout, err } // mergeDisksLayoutFromArgs supports with and without ellipses transparently. @@ -469,17 +459,23 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) { var setArgs [][]string + v, err := env.GetInt(EnvErasureSetDriveCount, 0) + if err != nil { + return err + } + setDriveCount := uint64(v) + // None of the args have ellipses use the old style. if ok { - setArgs, err = GetAllSets(args...) + setArgs, err = GetAllSets(setDriveCount, args...) if err != nil { return err } ctxt.Layout = disksLayout{ legacy: true, - pools: []poolDisksLayout{{layout: setArgs}}, + pools: []poolDisksLayout{{layout: setArgs, cmdline: strings.Join(args, " ")}}, } - return + return err } for _, arg := range args { @@ -487,13 +483,13 @@ func mergeDisksLayoutFromArgs(args []string, ctxt *serverCtxt) (err error) { // TODO: support SNSD deployments to be decommissioned in future return fmt.Errorf("all args must have ellipses for pool expansion (%w) args: %s", errInvalidArgument, args) } - setArgs, err = GetAllSets(arg) + setArgs, err = GetAllSets(setDriveCount, arg) if err != nil { return err } ctxt.Layout.pools = append(ctxt.Layout.pools, poolDisksLayout{cmdline: arg, layout: setArgs}) } - return + return err } // CreateServerEndpoints - validates and creates new endpoints from input args, supports diff --git a/cmd/endpoint-ellipses_test.go b/cmd/endpoint-ellipses_test.go index 6714ea923d2f0..6caaebbb663fa 100644 --- a/cmd/endpoint-ellipses_test.go +++ b/cmd/endpoint-ellipses_test.go @@ -22,7 +22,7 @@ import ( "reflect" "testing" - "github.com/minio/pkg/v2/ellipses" + "github.com/minio/pkg/v3/ellipses" ) // Tests create endpoints with ellipses and without. @@ -55,7 +55,6 @@ func TestCreateServerEndpoints(t *testing.T) { } for i, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { srvCtxt := serverCtxt{} err := mergeDisksLayoutFromArgs(testCase.args, &srvCtxt) @@ -85,7 +84,6 @@ func TestGetDivisibleSize(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotGCD := getDivisibleSize(testCase.totalSizes) if testCase.result != gotGCD { @@ -172,7 +170,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { @@ -294,7 +291,6 @@ func TestGetSetIndexes(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { argPatterns := make([]ellipses.ArgPattern, len(testCase.args)) for i, arg := range testCase.args { @@ -637,7 +633,6 @@ func TestParseEndpointSet(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { gotEs, err := parseEndpointSet(0, testCase.arg) if err != nil && testCase.success { diff --git a/cmd/endpoint.go b/cmd/endpoint.go index 7c69ba37dcd75..dcfe2e61b5e9c 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -26,6 +26,7 @@ import ( "path/filepath" "reflect" "runtime" + "slices" "sort" "strconv" "strings" @@ -36,9 +37,8 @@ import ( "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mountinfo" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" - "golang.org/x/exp/slices" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" ) // EndpointType - enum for endpoint type. @@ -138,6 +138,17 @@ func (endpoint *Endpoint) SetDiskIndex(i int) { endpoint.DiskIdx = i } +func isValidURLEndpoint(u *url.URL) bool { + // URL style of endpoint. + // Valid URL style endpoint is + // - Scheme field must contain "http" or "https" + // - All field should be empty except Host and Path. + isURLOk := (u.Scheme == "http" || u.Scheme == "https") && + u.User == nil && u.Opaque == "" && !u.ForceQuery && + u.RawQuery == "" && u.Fragment == "" + return isURLOk +} + // NewEndpoint - returns new endpoint based on given arguments. func NewEndpoint(arg string) (ep Endpoint, e error) { // isEmptyPath - check whether given path is not empty. @@ -157,8 +168,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { // Valid URL style endpoint is // - Scheme field must contain "http" or "https" // - All field should be empty except Host and Path. - if !((u.Scheme == "http" || u.Scheme == "https") && - u.User == nil && u.Opaque == "" && !u.ForceQuery && u.RawQuery == "" && u.Fragment == "") { + if !isValidURLEndpoint(u) { return ep, fmt.Errorf("invalid URL endpoint format") } @@ -213,7 +223,6 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { u.Path = u.Path[1:] } } - } else { // Only check if the arg is an ip address and ask for scheme since its absent. // localhost, example.com, any FQDN cannot be disambiguated from a regular file path such as @@ -253,6 +262,14 @@ type PoolEndpoints struct { // EndpointServerPools - list of list of endpoints type EndpointServerPools []PoolEndpoints +// ESCount returns the total number of erasure sets in this cluster +func (l EndpointServerPools) ESCount() (count int) { + for _, p := range l { + count += p.SetCount + } + return count +} + // GetNodes returns a sorted list of nodes in this cluster func (l EndpointServerPools) GetNodes() (nodes []Node) { nodesMap := make(map[string]Node) @@ -280,7 +297,7 @@ func (l EndpointServerPools) GetNodes() (nodes []Node) { sort.Slice(nodes, func(i, j int) bool { return nodes[i].Host < nodes[j].Host }) - return + return nodes } // GetPoolIdx return pool index @@ -514,7 +531,7 @@ func (l EndpointServerPools) hostsSorted() []*xnet.Host { } host, err := xnet.ParseHost(hostStr) if err != nil { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) continue } hosts[i] = host @@ -571,7 +588,7 @@ func (endpoints Endpoints) GetAllStrings() (all []string) { for _, e := range endpoints { all = append(all, e.String()) } - return + return all } func hostResolveToLocalhost(endpoint Endpoint) bool { @@ -590,8 +607,6 @@ func hostResolveToLocalhost(endpoint Endpoint) bool { // UpdateIsLocal - resolves the host and discovers the local host. func (endpoints Endpoints) UpdateIsLocal() error { - orchestrated := IsDocker() || IsKubernetes() - var epsResolved int var foundLocal bool resolvedList := make([]bool, len(endpoints)) @@ -599,11 +614,8 @@ func (endpoints Endpoints) UpdateIsLocal() error { startTime := time.Now() keepAliveTicker := time.NewTicker(500 * time.Millisecond) defer keepAliveTicker.Stop() - for { + for !foundLocal && (epsResolved != len(endpoints)) { // Break if the local endpoint is found already Or all the endpoints are resolved. - if foundLocal || (epsResolved == len(endpoints)) { - break - } // Retry infinitely on Kubernetes and Docker swarm. // This is needed as the remote hosts are sometime @@ -647,7 +659,7 @@ func (endpoints Endpoints) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", + bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", endpoints[i].Hostname()), endpoints[i].Hostname(), logger.ErrorKind) } @@ -677,7 +689,7 @@ func (endpoints Endpoints) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind) + bootLogOnceIf(ctx, err, endpoints[i].Hostname(), logger.ErrorKind) } } else { resolvedList[i] = true @@ -775,8 +787,6 @@ type PoolEndpointList []Endpoints // UpdateIsLocal - resolves all hosts and discovers which are local func (p PoolEndpointList) UpdateIsLocal() error { - orchestrated := IsDocker() || IsKubernetes() - var epsResolved int var epCount int @@ -791,11 +801,8 @@ func (p PoolEndpointList) UpdateIsLocal() error { startTime := time.Now() keepAliveTicker := time.NewTicker(1 * time.Second) defer keepAliveTicker.Stop() - for { + for !foundLocal && (epsResolved != epCount) { // Break if the local endpoint is found already Or all the endpoints are resolved. - if foundLocal || (epsResolved == epCount) { - break - } // Retry infinitely on Kubernetes and Docker swarm. // This is needed as the remote hosts are sometime @@ -811,7 +818,7 @@ func (p PoolEndpointList) UpdateIsLocal() error { continue } - if endpoint.Host == "" { + if endpoint.Host == "" || (orchestrated && env.Get("_MINIO_SERVER_LOCAL", "") == endpoint.Host) { if !foundLocal { foundLocal = true } @@ -841,7 +848,7 @@ func (p PoolEndpointList) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", + bootLogOnceIf(ctx, fmt.Errorf("%s resolves to localhost in a containerized deployment, waiting for it to resolve to a valid IP", endpoint.Hostname()), endpoint.Hostname(), logger.ErrorKind) } continue @@ -870,7 +877,7 @@ func (p PoolEndpointList) UpdateIsLocal() error { )) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind) + bootLogOnceIf(ctx, fmt.Errorf("Unable to resolve DNS for %s: %w", endpoint, err), endpoint.Hostname(), logger.ErrorKind) } } else { resolvedList[endpoint] = true @@ -1034,7 +1041,6 @@ func CreatePoolEndpoints(serverAddr string, poolsLayout ...poolDisksLayout) ([]E } } - orchestrated := IsKubernetes() || IsDocker() reverseProxy := (env.Get("_MINIO_REVERSE_PROXY", "") != "") && ((env.Get("MINIO_CI_CD", "") != "") || (env.Get("CI", "") != "")) // If not orchestrated // and not setup in reverse proxy @@ -1191,7 +1197,7 @@ func GetProxyEndpointLocalIndex(proxyEps []ProxyEndpoint) int { } // GetProxyEndpoints - get all endpoints that can be used to proxy list request. -func GetProxyEndpoints(endpointServerPools EndpointServerPools) []ProxyEndpoint { +func GetProxyEndpoints(endpointServerPools EndpointServerPools, transport http.RoundTripper) []ProxyEndpoint { var proxyEps []ProxyEndpoint proxyEpSet := set.NewStringSet() @@ -1210,7 +1216,7 @@ func GetProxyEndpoints(endpointServerPools EndpointServerPools) []ProxyEndpoint proxyEps = append(proxyEps, ProxyEndpoint{ Endpoint: endpoint, - Transport: globalProxyTransport, + Transport: transport, }) } } diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index 5fd31ed8b9ae5..1eb034ed9c10b 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -312,7 +312,6 @@ func TestCreateEndpoints(t *testing.T) { } for i, testCase := range testCases { - i := i testCase := testCase t.Run("", func(t *testing.T) { var srvCtxt serverCtxt diff --git a/cmd/erasure-coding.go b/cmd/erasure-coding.go index fb9b326af3c75..3294458e6165c 100644 --- a/cmd/erasure-coding.go +++ b/cmd/erasure-coding.go @@ -69,7 +69,7 @@ func NewErasure(ctx context.Context, dataBlocks, parityBlocks int, blockSize int }) return enc } - return + return e, err } // EncodeData encodes the given data and returns the erasure-coded data. @@ -136,10 +136,7 @@ func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 shardSize := e.ShardSize() shardFileSize := e.ShardFileSize(totalLength) endShard := (startOffset + length) / e.blockSize - tillOffset := endShard*shardSize + shardSize - if tillOffset > shardFileSize { - tillOffset = shardFileSize - } + tillOffset := min(endShard*shardSize+shardSize, shardFileSize) return tillOffset } @@ -201,7 +198,6 @@ func erasureSelfTest() { ok = false continue } - } } if !ok { diff --git a/cmd/erasure-common.go b/cmd/erasure-common.go index e47aa8746c49d..7146766ac2e74 100644 --- a/cmd/erasure-common.go +++ b/cmd/erasure-common.go @@ -19,14 +19,9 @@ package cmd import ( "context" - "fmt" - "io" "math/rand" "sync" "time" - - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" ) func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) { @@ -35,7 +30,6 @@ func (er erasureObjects) getOnlineDisks() (newDisks []StorageAPI) { var mu sync.Mutex r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { - i := i wg.Add(1) go func() { defer wg.Done() @@ -88,87 +82,3 @@ func (er erasureObjects) getLocalDisks() (newDisks []StorageAPI) { } return newDisks } - -// readMultipleFiles Reads raw data from all specified files from all disks. -func readMultipleFiles(ctx context.Context, disks []StorageAPI, req ReadMultipleReq, readQuorum int) ([]ReadMultipleResp, error) { - resps := make([]chan ReadMultipleResp, len(disks)) - for i := range resps { - resps[i] = make(chan ReadMultipleResp, len(req.Files)) - } - g := errgroup.WithNErrs(len(disks)) - // Read files in parallel across disks. - for index := range disks { - index := index - g.Go(func() (err error) { - if disks[index] == nil { - return errDiskNotFound - } - return disks[index].ReadMultiple(ctx, req, resps[index]) - }, index) - } - - dataArray := make([]ReadMultipleResp, 0, len(req.Files)) - // Merge results. They should come in order from each. - for _, wantFile := range req.Files { - quorum := 0 - toAdd := ReadMultipleResp{ - Bucket: req.Bucket, - Prefix: req.Prefix, - File: wantFile, - } - for i := range resps { - if disks[i] == nil { - continue - } - select { - case <-ctx.Done(): - case gotFile, ok := <-resps[i]: - if !ok { - continue - } - if gotFile.Error != "" || !gotFile.Exists { - continue - } - if gotFile.File != wantFile || gotFile.Bucket != req.Bucket || gotFile.Prefix != req.Prefix { - continue - } - quorum++ - if toAdd.Modtime.After(gotFile.Modtime) || len(gotFile.Data) < len(toAdd.Data) { - // Pick latest, or largest to avoid possible truncated entries. - continue - } - toAdd = gotFile - } - } - if quorum < readQuorum { - toAdd.Exists = false - toAdd.Error = errErasureReadQuorum.Error() - toAdd.Data = nil - } - dataArray = append(dataArray, toAdd) - } - - ignoredErrs := []error{ - errFileNotFound, - errVolumeNotFound, - errFileVersionNotFound, - io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors - io.EOF, // some times we would read without locks, ignore these errors - } - ignoredErrs = append(ignoredErrs, objectOpIgnoredErrs...) - - errs := g.Wait() - for index, err := range errs { - if err == nil { - continue - } - if !IsErr(err, ignoredErrs...) { - logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)", - disks[index], req.Bucket, req.Prefix, err), - disks[index].String()) - } - } - - // Return all the metadata. - return dataArray, nil -} diff --git a/cmd/erasure-decode.go b/cmd/erasure-decode.go index 86a2ff1b5a935..f0cc90ab09fda 100644 --- a/cmd/erasure-decode.go +++ b/cmd/erasure-decode.go @@ -38,6 +38,7 @@ type parallelReader struct { shardFileSize int64 buf [][]byte readerToBuf []int + stashBuffer []byte } // newParallelReader returns parallelReader. @@ -46,6 +47,21 @@ func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int for i := range r2b { r2b[i] = i } + bufs := make([][]byte, len(readers)) + shardSize := int(e.ShardSize()) + var b []byte + + // We should always have enough capacity, but older objects may be bigger + // we do not need stashbuffer for them. + if globalBytePoolCap.Load().WidthCap() >= len(readers)*shardSize { + // Fill buffers + b = globalBytePoolCap.Load().Get() + // Seed the buffers. + for i := range bufs { + bufs[i] = b[i*shardSize : (i+1)*shardSize] + } + } + return ¶llelReader{ readers: readers, orgReaders: readers, @@ -55,6 +71,15 @@ func newParallelReader(readers []io.ReaderAt, e Erasure, offset, totalLength int shardFileSize: e.ShardFileSize(totalLength), buf: make([][]byte, len(readers)), readerToBuf: r2b, + stashBuffer: b, + } +} + +// Done will release any resources used by the parallelReader. +func (p *parallelReader) Done() { + if p.stashBuffer != nil { + globalBytePoolCap.Load().Put(p.stashBuffer) + p.stashBuffer = nil } } @@ -177,6 +202,9 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) { // This will be communicated upstream. p.orgReaders[bufIdx] = nil + if br, ok := p.readers[i].(io.Closer); ok { + br.Close() + } p.readers[i] = nil // Since ReadAt returned error, trigger another read. @@ -224,6 +252,7 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read if len(prefer) == len(readers) { reader.preferReaders(prefer) } + defer reader.Done() startBlock := offset / e.blockSize endBlock := (offset + length) / e.blockSize @@ -294,6 +323,7 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea if len(readers) == len(prefer) { reader.preferReaders(prefer) } + defer reader.Done() startBlock := int64(0) endBlock := totalLength / e.blockSize @@ -319,7 +349,7 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea return err } - w := parallelWriter{ + w := multiWriter{ writers: writers, writeQuorum: 1, errs: make([]error, len(writers)), diff --git a/cmd/erasure-decode_test.go b/cmd/erasure-decode_test.go index d1b033127b22a..229047e16a750 100644 --- a/cmd/erasure-decode_test.go +++ b/cmd/erasure-decode_test.go @@ -89,7 +89,7 @@ func TestErasureDecode(t *testing.T) { if err != nil { t.Fatalf("Test %d: failed to create test setup: %v", i, err) } - erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) } @@ -108,7 +108,7 @@ func TestErasureDecode(t *testing.T) { for i, disk := range disks { writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize()) } - n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil { t.Fatalf("Test %d: failed to create erasure test file: %v", i, err) @@ -134,7 +134,7 @@ func TestErasureDecode(t *testing.T) { } writer := bytes.NewBuffer(nil) - _, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil) + _, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil) closeBitrotReaders(bitrotReaders) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but failed with: %v", i, err) @@ -177,7 +177,7 @@ func TestErasureDecode(t *testing.T) { bitrotReaders[0] = nil } writer.Reset() - _, err = erasure.Decode(context.Background(), writer, bitrotReaders, test.offset, test.length, test.data, nil) + _, err = erasure.Decode(t.Context(), writer, bitrotReaders, test.offset, test.length, test.data, nil) closeBitrotReaders(bitrotReaders) if err != nil && !test.shouldFailQuorum { t.Errorf("Test %d: should pass but failed with: %v", i, err) @@ -211,7 +211,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { return } disks := setup.disks - erasure, err := NewErasure(context.Background(), dataBlocks, parityBlocks, blockSize) + erasure, err := NewErasure(t.Context(), dataBlocks, parityBlocks, blockSize) if err != nil { t.Fatalf("failed to create ErasureStorage: %v", err) } @@ -236,7 +236,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { // Create a test file to read from. buffer := make([]byte, blockSize, 2*blockSize) - n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil { t.Fatal(err) @@ -251,7 +251,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { buf := &bytes.Buffer{} // Verify erasure.Decode() for random offsets and lengths. - for i := 0; i < iterations; i++ { + for range iterations { offset := r.Int63n(length) readLen := r.Int63n(length - offset) @@ -266,7 +266,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) { tillOffset := erasure.ShardFileOffset(offset, readLen, length) bitrotReaders[index] = newStreamingBitrotReader(disk, nil, "testbucket", "object", tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) } - _, err = erasure.Decode(context.Background(), buf, bitrotReaders, offset, readLen, length, nil) + _, err = erasure.Decode(t.Context(), buf, bitrotReaders, offset, readLen, length, nil) closeBitrotReaders(bitrotReaders) if err != nil { t.Fatal(err, offset, readLen) @@ -308,17 +308,16 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64, b.Fatalf("failed to create erasure test file: %v", err) } - for i := 0; i < dataDown; i++ { + for i := range dataDown { writers[i] = nil } for i := data; i < data+parityDown; i++ { writers[i] = nil } - b.ResetTimer() b.SetBytes(size) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { bitrotReaders := make([]io.ReaderAt, len(disks)) for index, disk := range disks { if writers[index] == nil { diff --git a/cmd/erasure-encode.go b/cmd/erasure-encode.go index 56f5869b000e2..215ac172e3c4b 100644 --- a/cmd/erasure-encode.go +++ b/cmd/erasure-encode.go @@ -21,44 +21,36 @@ import ( "context" "fmt" "io" - "sync" ) -// Writes in parallel to writers -type parallelWriter struct { +// Writes to multiple writers +type multiWriter struct { writers []io.Writer writeQuorum int errs []error } -// Write writes data to writers in parallel. -func (p *parallelWriter) Write(ctx context.Context, blocks [][]byte) error { - var wg sync.WaitGroup - +// Write writes data to writers. +func (p *multiWriter) Write(ctx context.Context, blocks [][]byte) error { for i := range p.writers { - if p.writers[i] == nil { - p.errs[i] = errDiskNotFound + if p.errs[i] != nil { continue } - if p.errs[i] != nil { + if p.writers[i] == nil { + p.errs[i] = errDiskNotFound continue } - wg.Add(1) - go func(i int) { - defer wg.Done() - var n int - n, p.errs[i] = p.writers[i].Write(blocks[i]) - if p.errs[i] == nil { - if n != len(blocks[i]) { - p.errs[i] = io.ErrShortWrite - p.writers[i] = nil - } - } else { + var n int + n, p.errs[i] = p.writers[i].Write(blocks[i]) + if p.errs[i] == nil { + if n != len(blocks[i]) { + p.errs[i] = io.ErrShortWrite p.writers[i] = nil } - }(i) + } else { + p.writers[i] = nil + } } - wg.Wait() // If nilCount >= p.writeQuorum, we return nil. This is because HealFile() uses // CreateFile with p.writeQuorum=1 to accommodate healing of single disk. @@ -75,7 +67,7 @@ func (p *parallelWriter) Write(ctx context.Context, blocks [][]byte) error { // Encode reads from the reader, erasure-encodes the data and writes to the writers. func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer, buf []byte, quorum int) (total int64, err error) { - writer := ¶llelWriter{ + writer := &multiWriter{ writers: writers, writeQuorum: quorum, errs: make([]error, len(writers)), diff --git a/cmd/erasure-encode_test.go b/cmd/erasure-encode_test.go index c301afcce50a9..54a37b71abd6a 100644 --- a/cmd/erasure-encode_test.go +++ b/cmd/erasure-encode_test.go @@ -88,7 +88,7 @@ func TestErasureEncode(t *testing.T) { t.Fatalf("Test %d: failed to create test setup: %v", i, err) } disks := setup.disks - erasure, err := NewErasure(context.Background(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(t.Context(), test.dataBlocks, test.onDisks-test.dataBlocks, test.blocksize) if err != nil { t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) } @@ -105,7 +105,7 @@ func TestErasureEncode(t *testing.T) { } writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize()) } - n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) + n, err := erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil && !test.shouldFail { t.Errorf("Test %d: should pass but failed with: %v", i, err) @@ -140,7 +140,7 @@ func TestErasureEncode(t *testing.T) { if test.offDisks > 0 { writers[0] = nil } - n, err = erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) + n, err = erasure.Encode(t.Context(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil && !test.shouldFailQuorum { t.Errorf("Test %d: should pass but failed with: %v", i, err) @@ -172,17 +172,16 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64, buffer := make([]byte, blockSizeV2, 2*blockSizeV2) content := make([]byte, size) - for i := 0; i < dataDown; i++ { + for i := range dataDown { disks[i] = OfflineDisk } for i := data; i < data+parityDown; i++ { disks[i] = OfflineDisk } - b.ResetTimer() b.SetBytes(size) b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { writers := make([]io.Writer, len(disks)) for i, disk := range disks { if disk == OfflineDisk { diff --git a/cmd/erasure-heal_test.go b/cmd/erasure-heal_test.go index 70f648f136fd7..cb6b25fd04384 100644 --- a/cmd/erasure-heal_test.go +++ b/cmd/erasure-heal_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" - "context" "crypto/rand" "io" "os" @@ -75,7 +74,7 @@ func TestErasureHeal(t *testing.T) { t.Fatalf("Test %d: failed to setup Erasure environment: %v", i, err) } disks := setup.disks - erasure, err := NewErasure(context.Background(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) + erasure, err := NewErasure(t.Context(), test.dataBlocks, test.disks-test.dataBlocks, test.blocksize) if err != nil { t.Fatalf("Test %d: failed to create ErasureStorage: %v", i, err) } @@ -88,7 +87,7 @@ func TestErasureHeal(t *testing.T) { for i, disk := range disks { writers[i] = newBitrotWriter(disk, "", "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize()) } - _, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) + _, err = erasure.Encode(t.Context(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) closeBitrotWriters(writers) if err != nil { t.Fatalf("Test %d: failed to create random test data: %v", i, err) @@ -103,7 +102,7 @@ func TestErasureHeal(t *testing.T) { // setup stale disks for the test case staleDisks := make([]StorageAPI, len(disks)) copy(staleDisks, disks) - for j := 0; j < len(staleDisks); j++ { + for j := range staleDisks { if j < test.offDisks { readers[j] = nil } else { @@ -132,7 +131,7 @@ func TestErasureHeal(t *testing.T) { } // test case setup is complete - now call Heal() - err = erasure.Heal(context.Background(), staleWriters, readers, test.size, nil) + err = erasure.Heal(t.Context(), staleWriters, readers, test.size, nil) closeBitrotReaders(readers) closeBitrotWriters(staleWriters) if err != nil && !test.shouldFail { diff --git a/cmd/erasure-healing-common.go b/cmd/erasure-healing-common.go index aee231ef8379d..0bde3107d9058 100644 --- a/cmd/erasure-healing-common.go +++ b/cmd/erasure-healing-common.go @@ -20,6 +20,7 @@ package cmd import ( "bytes" "context" + "slices" "time" "github.com/minio/madmin-go/v3" @@ -253,19 +254,53 @@ func listOnlineDisks(disks []StorageAPI, partsMetadata []FileInfo, errs []error, return onlineDisks, modTime, "" } -// disksWithAllParts - This function needs to be called with -// []StorageAPI returned by listOnlineDisks. Returns, -// -// - disks which have all parts specified in the latest xl.meta. -// -// - slice of errors about the state of data files on disk - can have -// a not-found error or a hash-mismatch error. -func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, - errs []error, latestMeta FileInfo, bucket, object string, - scanMode madmin.HealScanMode) ([]StorageAPI, []error, time.Time, -) { - availableDisks := make([]StorageAPI, len(onlineDisks)) - dataErrs := make([]error, len(onlineDisks)) +// Convert verify or check parts returned error to integer representation +func convPartErrToInt(err error) int { + err = unwrapAll(err) + switch err { + case nil: + return checkPartSuccess + case errFileNotFound, errFileVersionNotFound: + return checkPartFileNotFound + case errFileCorrupt: + return checkPartFileCorrupt + case errVolumeNotFound: + return checkPartVolumeNotFound + case errDiskNotFound: + return checkPartDiskNotFound + default: + return checkPartUnknown + } +} + +func partNeedsHealing(partErrs []int) bool { + return slices.IndexFunc(partErrs, func(i int) bool { return i != checkPartSuccess && i != checkPartUnknown }) > -1 +} + +func countPartNotSuccess(partErrs []int) (c int) { + for _, pe := range partErrs { + if pe != checkPartSuccess { + c++ + } + } + return c +} + +// checkObjectWithAllParts sets partsMetadata and onlineDisks when xl.meta is inexistant/corrupted or outdated +// it also checks if the status of each part (corrupted, missing, ok) in each drive +func checkObjectWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetadata []FileInfo, + errs []error, latestMeta FileInfo, filterByETag bool, bucket, object string, + scanMode madmin.HealScanMode, +) (dataErrsByDisk map[int][]int, dataErrsByPart map[int][]int) { + dataErrsByDisk = make(map[int][]int, len(onlineDisks)) + for i := range onlineDisks { + dataErrsByDisk[i] = make([]int, len(latestMeta.Parts)) + } + + dataErrsByPart = make(map[int][]int, len(latestMeta.Parts)) + for i := range latestMeta.Parts { + dataErrsByPart[i] = make([]int, len(onlineDisks)) + } inconsistent := 0 for i, meta := range partsMetadata { @@ -288,34 +323,40 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad } } - erasureDistributionReliable := true - if inconsistent > len(partsMetadata)/2 { - // If there are too many inconsistent files, then we can't trust erasure.Distribution (most likely - // because of bugs found in CopyObject/PutObjectTags) https://github.com/minio/minio/pull/10772 - erasureDistributionReliable = false - } + erasureDistributionReliable := inconsistent <= len(partsMetadata)/2 - for i, onlineDisk := range onlineDisks { + metaErrs := make([]error, len(errs)) + + for i := range onlineDisks { if errs[i] != nil { - dataErrs[i] = errs[i] + metaErrs[i] = errs[i] continue } - if onlineDisk == OfflineDisk { - dataErrs[i] = errDiskNotFound + if onlineDisks[i] == OfflineDisk { + metaErrs[i] = errDiskNotFound continue } meta := partsMetadata[i] - if !meta.ModTime.Equal(latestMeta.ModTime) || meta.DataDir != latestMeta.DataDir { - dataErrs[i] = errFileCorrupt + corrupted := false + if filterByETag { + corrupted = meta.Metadata["etag"] != latestMeta.Metadata["etag"] + } else { + corrupted = !meta.ModTime.Equal(latestMeta.ModTime) || meta.DataDir != latestMeta.DataDir + } + + if corrupted { + metaErrs[i] = errFileCorrupt partsMetadata[i] = FileInfo{} + onlineDisks[i] = nil continue } if erasureDistributionReliable { if !meta.IsValid() { partsMetadata[i] = FileInfo{} - dataErrs[i] = errFileCorrupt + metaErrs[i] = errFileCorrupt + onlineDisks[i] = nil continue } @@ -325,53 +366,75 @@ func disksWithAllParts(ctx context.Context, onlineDisks []StorageAPI, partsMetad // attempt a fix if possible, assuming other entries // might have the right erasure distribution. partsMetadata[i] = FileInfo{} - dataErrs[i] = errFileCorrupt + metaErrs[i] = errFileCorrupt + onlineDisks[i] = nil continue } } } + } + + // Copy meta errors to part errors + for i, err := range metaErrs { + if err != nil { + partErr := convPartErrToInt(err) + for p := range latestMeta.Parts { + dataErrsByPart[p][i] = partErr + } + } + } + + for i, onlineDisk := range onlineDisks { + if metaErrs[i] != nil { + continue + } + + meta := partsMetadata[i] + if meta.Deleted || meta.IsRemote() { + continue + } // Always check data, if we got it. if (len(meta.Data) > 0 || meta.Size == 0) && len(meta.Parts) > 0 { checksumInfo := meta.Erasure.GetChecksumInfo(meta.Parts[0].Number) - dataErrs[i] = bitrotVerify(bytes.NewReader(meta.Data), + verifyErr := bitrotVerify(bytes.NewReader(meta.Data), int64(len(meta.Data)), meta.Erasure.ShardFileSize(meta.Size), checksumInfo.Algorithm, checksumInfo.Hash, meta.Erasure.ShardSize()) - if dataErrs[i] == nil { - // All parts verified, mark it as all data available. - availableDisks[i] = onlineDisk - } else { - // upon errors just make that disk's fileinfo invalid - partsMetadata[i] = FileInfo{} - } + dataErrsByPart[0][i] = convPartErrToInt(verifyErr) continue } - meta.DataDir = latestMeta.DataDir + var ( + verifyErr error + verifyResp *CheckPartsResp + ) + switch scanMode { case madmin.HealDeepScan: // disk has a valid xl.meta but may not have all the // parts. This is considered an outdated disk, since // it needs healing too. - if !meta.Deleted && !meta.IsRemote() { - dataErrs[i] = onlineDisk.VerifyFile(ctx, bucket, object, meta) - } - case madmin.HealNormalScan: - if !meta.Deleted && !meta.IsRemote() { - dataErrs[i] = onlineDisk.CheckParts(ctx, bucket, object, meta) - } + verifyResp, verifyErr = onlineDisk.VerifyFile(ctx, bucket, object, meta) + default: + verifyResp, verifyErr = onlineDisk.CheckParts(ctx, bucket, object, meta) } - if dataErrs[i] == nil { - // All parts verified, mark it as all data available. - availableDisks[i] = onlineDisk - } else { - // upon errors just make that disk's fileinfo invalid - partsMetadata[i] = FileInfo{} + for p := range latestMeta.Parts { + if verifyErr != nil { + dataErrsByPart[p][i] = convPartErrToInt(verifyErr) + } else { + dataErrsByPart[p][i] = verifyResp.Results[p] + } } } - return availableDisks, dataErrs, timeSentinel + // Build dataErrs by disk from dataErrs by part + for part, disks := range dataErrsByPart { + for disk := range disks { + dataErrsByDisk[disk][part] = dataErrsByPart[part][disk] + } + } + return dataErrsByDisk, dataErrsByPart } diff --git a/cmd/erasure-healing-common_test.go b/cmd/erasure-healing-common_test.go index abcb9ff3db0ac..04909b2dfc378 100644 --- a/cmd/erasure-healing-common_test.go +++ b/cmd/erasure-healing-common_test.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "testing" "time" @@ -147,7 +148,11 @@ func TestCommonTime(t *testing.T) { // TestListOnlineDisks - checks if listOnlineDisks and outDatedDisks // are consistent with each other. func TestListOnlineDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + if runtime.GOOS == globalWindowsOSName { + t.Skip() + } + + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, disks, err := prepareErasure16(ctx) @@ -155,7 +160,7 @@ func TestListOnlineDisks(t *testing.T) { t.Fatalf("Prepare Erasure backend failed - %v", err) } setObjectLayer(obj) - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(disks) type tamperKind int @@ -170,7 +175,7 @@ func TestListOnlineDisks(t *testing.T) { fourNanoSecs := time.Unix(4, 0).UTC() modTimesThreeNone := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16) - for i := 0; i < 16; i++ { + for i := range 16 { // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // to be tampered with. if i > 12 { @@ -230,7 +235,7 @@ func TestListOnlineDisks(t *testing.T) { } object := "object" - data := bytes.Repeat([]byte("a"), smallFileThreshold*16) + data := bytes.Repeat([]byte("a"), smallFileThreshold*32) z := obj.(*erasureServerPools) erasureDisks, err := z.GetDisks(0, 0) @@ -239,7 +244,6 @@ func TestListOnlineDisks(t *testing.T) { } for i, test := range testCases { - test := test t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { _, err = obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) if err != nil { @@ -271,7 +275,7 @@ func TestListOnlineDisks(t *testing.T) { // and check if that disk // appears in outDatedDisks. tamperedIndex = index - dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ + dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -299,7 +303,6 @@ func TestListOnlineDisks(t *testing.T) { f.Close() break } - } rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount @@ -308,12 +311,11 @@ func TestListOnlineDisks(t *testing.T) { t.Fatalf("Expected modTime to be equal to %v but was found to be %v", test.expectedTime, modTime) } - availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata, - test.errs, fi, bucket, object, madmin.HealDeepScan) - test.errs = newErrs + _, _ = checkObjectWithAllParts(ctx, onlineDisks, partsMetadata, + test.errs, fi, false, bucket, object, madmin.HealDeepScan) if test._tamperBackend != noTamper { - if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { + if tamperedIndex != -1 && onlineDisks[tamperedIndex] != nil { t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data", erasureDisks[tamperedIndex]) } @@ -325,7 +327,7 @@ func TestListOnlineDisks(t *testing.T) { // TestListOnlineDisksSmallObjects - checks if listOnlineDisks and outDatedDisks // are consistent with each other. func TestListOnlineDisksSmallObjects(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, disks, err := prepareErasure16(ctx) @@ -333,7 +335,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { t.Fatalf("Prepare Erasure backend failed - %v", err) } setObjectLayer(obj) - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(disks) type tamperKind int @@ -347,7 +349,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { fourNanoSecs := time.Unix(4, 0).UTC() modTimesThreeNone := make([]time.Time, 16) modTimesThreeFour := make([]time.Time, 16) - for i := 0; i < 16; i++ { + for i := range 16 { // Have 13 good xl.meta, 12 for default parity count = 4 (EC:4) and one // to be tampered with. if i > 12 { @@ -416,7 +418,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { } for i, test := range testCases { - test := test t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { _, err := obj.PutObject(ctx, bucket, object, mustGetPutObjReader(t, bytes.NewReader(data), int64(len(data)), "", ""), ObjectOptions{}) @@ -453,7 +454,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { // and check if that disk // appears in outDatedDisks. tamperedIndex = index - dErr := erasureDisks[index].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + dErr := erasureDisks[index].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -481,7 +482,6 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { f.Close() break } - } rQuorum := len(errs) - z.serverPools[0].sets[0].defaultParityCount @@ -491,12 +491,11 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { test.expectedTime, modTime) } - availableDisks, newErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata, - test.errs, fi, bucket, object, madmin.HealDeepScan) - test.errs = newErrs + _, _ = checkObjectWithAllParts(ctx, onlineDisks, partsMetadata, + test.errs, fi, false, bucket, object, madmin.HealDeepScan) if test._tamperBackend != noTamper { - if tamperedIndex != -1 && availableDisks[tamperedIndex] != nil { + if tamperedIndex != -1 && onlineDisks[tamperedIndex] != nil { t.Fatalf("Drive (%v) with part.1 missing is not a drive with available data", erasureDisks[tamperedIndex]) } @@ -506,14 +505,14 @@ func TestListOnlineDisksSmallObjects(t *testing.T) { } func TestDisksWithAllParts(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, disks, err := prepareErasure16(ctx) if err != nil { t.Fatalf("Prepare Erasure backend failed - %v", err) } setObjectLayer(obj) - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(disks) bucket := "bucket" @@ -542,6 +541,7 @@ func TestDisksWithAllParts(t *testing.T) { // Test 1: Test that all disks are returned without any failures with // unmodified meta data + erasureDisks = s.getDisks() partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true) if err != nil { t.Fatalf("Failed to read xl meta data %v", err) @@ -554,16 +554,12 @@ func TestDisksWithAllParts(t *testing.T) { erasureDisks, _, _ = listOnlineDisks(erasureDisks, partsMetadata, errs, readQuorum) - filteredDisks, errs, _ := disksWithAllParts(ctx, erasureDisks, partsMetadata, - errs, fi, bucket, object, madmin.HealDeepScan) + dataErrsPerDisk, _ := checkObjectWithAllParts(ctx, erasureDisks, partsMetadata, + errs, fi, false, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(erasureDisks) { - t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) - } - - for diskIndex, disk := range filteredDisks { - if errs[diskIndex] != nil { - t.Errorf("Unexpected error %s", errs[diskIndex]) + for diskIndex, disk := range erasureDisks { + if partNeedsHealing(dataErrsPerDisk[diskIndex]) { + t.Errorf("Unexpected error: %v", dataErrsPerDisk[diskIndex]) } if disk == nil { @@ -572,17 +568,15 @@ func TestDisksWithAllParts(t *testing.T) { } // Test 2: Not synchronized modtime + erasureDisks = s.getDisks() partsMetadataBackup := partsMetadata[0] partsMetadata[0].ModTime = partsMetadata[0].ModTime.Add(-1 * time.Hour) errs = make([]error, len(erasureDisks)) - filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, - errs, fi, bucket, object, madmin.HealDeepScan) + _, _ = checkObjectWithAllParts(ctx, erasureDisks, partsMetadata, + errs, fi, false, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(erasureDisks) { - t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) - } - for diskIndex, disk := range filteredDisks { + for diskIndex, disk := range erasureDisks { if diskIndex == 0 && disk != nil { t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) } @@ -593,17 +587,15 @@ func TestDisksWithAllParts(t *testing.T) { partsMetadata[0] = partsMetadataBackup // Revert before going to the next test // Test 3: Not synchronized DataDir + erasureDisks = s.getDisks() partsMetadataBackup = partsMetadata[1] partsMetadata[1].DataDir = "foo-random" errs = make([]error, len(erasureDisks)) - filteredDisks, _, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, - errs, fi, bucket, object, madmin.HealDeepScan) + _, _ = checkObjectWithAllParts(ctx, erasureDisks, partsMetadata, + errs, fi, false, bucket, object, madmin.HealDeepScan) - if len(filteredDisks) != len(erasureDisks) { - t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) - } - for diskIndex, disk := range filteredDisks { + for diskIndex, disk := range erasureDisks { if diskIndex == 1 && disk != nil { t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) } @@ -614,6 +606,7 @@ func TestDisksWithAllParts(t *testing.T) { partsMetadata[1] = partsMetadataBackup // Revert before going to the next test // Test 4: key = disk index, value = part name with hash mismatch + erasureDisks = s.getDisks() diskFailures := make(map[int]string) diskFailures[0] = "part.1" diskFailures[3] = "part.1" @@ -634,29 +627,18 @@ func TestDisksWithAllParts(t *testing.T) { } errs = make([]error, len(erasureDisks)) - filteredDisks, errs, _ = disksWithAllParts(ctx, erasureDisks, partsMetadata, - errs, fi, bucket, object, madmin.HealDeepScan) - - if len(filteredDisks) != len(erasureDisks) { - t.Errorf("Unexpected number of drives: %d", len(filteredDisks)) - } + dataErrsPerDisk, _ = checkObjectWithAllParts(ctx, erasureDisks, partsMetadata, + errs, fi, false, bucket, object, madmin.HealDeepScan) - for diskIndex, disk := range filteredDisks { + for diskIndex := range erasureDisks { if _, ok := diskFailures[diskIndex]; ok { - if disk != nil { - t.Errorf("Drive not filtered as expected, drive: %d", diskIndex) - } - if errs[diskIndex] == nil { - t.Errorf("Expected error not received, driveIndex: %d", diskIndex) + if !partNeedsHealing(dataErrsPerDisk[diskIndex]) { + t.Errorf("Disk expected to be healed, driveIndex: %d", diskIndex) } } else { - if disk == nil { - t.Errorf("Drive erroneously filtered, driveIndex: %d", diskIndex) - } - if errs[diskIndex] != nil { - t.Errorf("Unexpected error, %s, driveIndex: %d", errs[diskIndex], diskIndex) + if partNeedsHealing(dataErrsPerDisk[diskIndex]) { + t.Errorf("Disk not expected to be healed, driveIndex: %d", diskIndex) } - } } } @@ -769,7 +751,7 @@ func TestCommonParities(t *testing.T) { } for idx, test := range tests { var metaArr []FileInfo - for i := 0; i < 12; i++ { + for i := range 12 { fi := test.fi1 if i%2 == 0 { fi = test.fi2 diff --git a/cmd/erasure-healing.go b/cmd/erasure-healing.go index ad254e7c70e3e..9ea507f7a3a32 100644 --- a/cmd/erasure-healing.go +++ b/cmd/erasure-healing.go @@ -29,8 +29,10 @@ import ( "time" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/puzpuzpuz/xsync/v3" ) //go:generate stringer -type=healingMetric -trimprefix=healingMetric $GOFILE @@ -43,8 +45,9 @@ const ( healingMetricCheckAbandonedParts ) -func (er erasureObjects) listAndHeal(bucket, prefix string, scanMode madmin.HealScanMode, healEntry func(string, metaCacheEntry, madmin.HealScanMode) error) error { - ctx, cancel := context.WithCancel(context.Background()) +// List a prefix or a single object versions and heal +func (er erasureObjects) listAndHeal(ctx context.Context, bucket, prefix string, recursive bool, scanMode madmin.HealScanMode, healEntry func(string, metaCacheEntry, madmin.HealScanMode) error) error { + ctx, cancel := context.WithCancel(ctx) defer cancel() disks, _ := er.getOnlineDisksWithHealing(false) @@ -71,11 +74,14 @@ func (er erasureObjects) listAndHeal(bucket, prefix string, scanMode madmin.Heal bucket: bucket, path: path, filterPrefix: filterPrefix, - recursive: true, + recursive: recursive, forwardTo: "", minDisks: 1, reportNotFound: false, agreed: func(entry metaCacheEntry) { + if !recursive && prefix != entry.name { + return + } if err := healEntry(bucket, entry, scanMode); err != nil { cancel() } @@ -87,7 +93,9 @@ func (er erasureObjects) listAndHeal(bucket, prefix string, scanMode madmin.Heal // proceed to heal nonetheless. entry, _ = entries.firstFound() } - + if !recursive && prefix != entry.name { + return + } if err := healEntry(bucket, *entry, scanMode); err != nil { cancel() return @@ -105,24 +113,19 @@ func (er erasureObjects) listAndHeal(bucket, prefix string, scanMode madmin.Heal // listAllBuckets lists all buckets from all disks. It also // returns the occurrence of each buckets in all disks -func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets map[string]VolInfo, readQuorum int) error { +func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { g := errgroup.WithNErrs(len(storageDisks)) - var mu sync.Mutex for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { // we ignore disk not found errors return nil } - if storageDisks[index].Healing() != nil { - // we ignore disks under healing - return nil - } volsInfo, err := storageDisks[index].ListVols(ctx) if err != nil { return err } + for _, volInfo := range volsInfo { // StorageAPI can send volume names which are // incompatible with buckets - these are @@ -130,48 +133,75 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets if isReservedOrInvalidBucket(volInfo.Name, false) { continue } - mu.Lock() - if _, ok := healBuckets[volInfo.Name]; !ok { - healBuckets[volInfo.Name] = volInfo - } - mu.Unlock() + + healBuckets.Compute(volInfo.Name, func(oldValue VolInfo, loaded bool) (newValue VolInfo, del bool) { + if loaded { + newValue = oldValue + newValue.count = oldValue.count + 1 + return newValue, false + } + return VolInfo{ + Name: volInfo.Name, + Created: volInfo.Created, + count: 1, + }, false + }) } + return nil }, index) } - return reduceReadQuorumErrs(ctx, g.Wait(), bucketMetadataOpIgnoredErrs, readQuorum) + + if err := reduceReadQuorumErrs(ctx, g.Wait(), bucketMetadataOpIgnoredErrs, readQuorum); err != nil { + return err + } + + healBuckets.Range(func(volName string, volInfo VolInfo) bool { + if volInfo.count < readQuorum { + healBuckets.Delete(volName) + } + return true + }) + + return nil } +var ( + errLegacyXLMeta = errors.New("legacy XL meta") + errOutdatedXLMeta = errors.New("outdated XL meta") + errPartCorrupt = errors.New("part corrupt") + errPartMissing = errors.New("part missing") +) + // Only heal on disks where we are sure that healing is needed. We can expand // this list as and when we figure out more errors can be added to this list safely. -func shouldHealObjectOnDisk(erErr, dataErr error, meta FileInfo, latestMeta FileInfo) bool { - switch { - case errors.Is(erErr, errFileNotFound) || errors.Is(erErr, errFileVersionNotFound): - return true - case errors.Is(erErr, errFileCorrupt): - return true +func shouldHealObjectOnDisk(erErr error, partsErrs []int, meta FileInfo, latestMeta FileInfo) (bool, bool, error) { + if errors.Is(erErr, errFileNotFound) || errors.Is(erErr, errFileVersionNotFound) || errors.Is(erErr, errFileCorrupt) { + return true, true, erErr } if erErr == nil { if meta.XLV1 { // Legacy means heal always // always check first. - return true + return true, true, errLegacyXLMeta + } + if !latestMeta.Equals(meta) { + return true, true, errOutdatedXLMeta } if !meta.Deleted && !meta.IsRemote() { // If xl.meta was read fine but there may be problem with the part.N files. - if IsErr(dataErr, []error{ - errFileNotFound, - errFileVersionNotFound, - errFileCorrupt, - }...) { - return true + for _, partErr := range partsErrs { + if partErr == checkPartFileNotFound { + return true, false, errPartMissing + } + if partErr == checkPartFileCorrupt { + return true, false, errPartCorrupt + } } } - if !latestMeta.Equals(meta) { - return true - } + return false, false, nil } - return false + return false, false, erErr } const ( @@ -210,6 +240,57 @@ func (fi FileInfo) DataMov() bool { return ok } +func (er *erasureObjects) auditHealObject(ctx context.Context, bucket, object, versionID string, result madmin.HealResultItem, err error) { + if len(logger.AuditTargets()) == 0 { + return + } + + opts := AuditLogOptions{ + Event: "HealObject", + Bucket: bucket, + Object: decodeDirObject(object), + VersionID: versionID, + } + if err != nil { + opts.Error = err.Error() + } + + b, a := result.GetCorruptedCounts() + if b > 0 && b == a { + opts.Error = fmt.Sprintf("unable to heal %d corrupted blocks on drives", b) + } + + b, a = result.GetMissingCounts() + if b > 0 && b == a { + opts.Error = fmt.Sprintf("unable to heal %d missing blocks on drives", b) + } + + opts.Tags = map[string]string{ + "healObject": auditObjectOp{ + Name: opts.Object, + Pool: er.poolIndex + 1, + Set: er.setIndex + 1, + }.String(), + } + + auditLogInternal(ctx, opts) +} + +func objectErrToDriveState(reason error) string { + switch { + case reason == nil: + return madmin.DriveStateOk + case IsErr(reason, errDiskNotFound): + return madmin.DriveStateOffline + case IsErr(reason, errFileNotFound, errFileVersionNotFound, errVolumeNotFound, errPartMissing, errOutdatedXLMeta, errLegacyXLMeta): + return madmin.DriveStateMissing + case IsErr(reason, errFileCorrupt, errPartCorrupt): + return madmin.DriveStateCorrupt + default: + return fmt.Sprintf("%s (%s)", madmin.DriveStateUnknown, reason.Error()) + } +} + // Heals an object by re-writing corrupt/missing erasure blocks. func (er *erasureObjects) healObject(ctx context.Context, bucket string, object string, versionID string, opts madmin.HealOpts) (result madmin.HealResultItem, err error) { dryRun := opts.DryRun @@ -218,12 +299,17 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object storageDisks := er.getDisks() storageEndpoints := er.getEndpoints() + defer func() { + er.auditHealObject(ctx, bucket, object, versionID, result, err) + }() + if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 { startTime := time.Now() defer func() { healTrace(healingMetricObject, startTime, bucket, object, &opts, err, &result) }() } + // Initialize heal result object result = madmin.HealResultItem{ Type: madmin.HealItemObject, @@ -257,21 +343,18 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount) if err != nil { - m, err := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{ + m, derr := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{ VersionID: versionID, }) errs = make([]error, len(errs)) - for i := range errs { - errs[i] = err - } - if err == nil { - // Dangling object successfully purged, size is '0' - m.Size = 0 - } - // Generate file/version not found with default heal result - err = errFileNotFound - if versionID != "" { - err = errFileVersionNotFound + if derr == nil { + derr = errFileNotFound + if versionID != "" { + derr = errFileVersionNotFound + } + // We did find a new danging object + return er.defaultHealResult(m, storageDisks, storageEndpoints, + errs, bucket, object, versionID), derr } return er.defaultHealResult(m, storageDisks, storageEndpoints, errs, bucket, object, versionID), err @@ -282,26 +365,20 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object // List of disks having latest version of the object xl.meta // (by modtime). - onlineDisks, modTime, etag := listOnlineDisks(storageDisks, partsMetadata, errs, readQuorum) + onlineDisks, quorumModTime, quorumETag := listOnlineDisks(storageDisks, partsMetadata, errs, readQuorum) // Latest FileInfo for reference. If a valid metadata is not // present, it is as good as object not found. - latestMeta, err := pickValidFileInfo(ctx, partsMetadata, modTime, etag, readQuorum) + latestMeta, err := pickValidFileInfo(ctx, partsMetadata, quorumModTime, quorumETag, readQuorum) if err != nil { return result, err } - // List of disks having all parts as per latest metadata. - // NOTE: do not pass in latestDisks to diskWithAllParts since - // the diskWithAllParts needs to reach the drive to ensure - // validity of the metadata content, we should make sure that - // we pass in disks as is for it to be verified. Once verified - // the disksWithAllParts() returns the actual disks that can be - // used here for reconstruction. This is done to ensure that - // we do not skip drives that have inconsistent metadata to be - // skipped from purging when they are stale. - availableDisks, dataErrs, _ := disksWithAllParts(ctx, onlineDisks, partsMetadata, - errs, latestMeta, bucket, object, scanMode) + // No modtime quorum + filterDisksByETag := quorumETag != "" + + dataErrsByDisk, dataErrsByPart := checkObjectWithAllParts(ctx, onlineDisks, partsMetadata, + errs, latestMeta, filterDisksByETag, bucket, object, scanMode) var erasure Erasure if !latestMeta.Deleted && !latestMeta.IsRemote() { @@ -322,38 +399,19 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object // data state and a list of outdated disks on which data needs // to be healed. outDatedDisks := make([]StorageAPI, len(storageDisks)) - disksToHealCount := 0 - for i, v := range availableDisks { - driveState := "" - switch { - case v != nil: - driveState = madmin.DriveStateOk - case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound: - driveState = madmin.DriveStateOffline - case errs[i] == errFileNotFound, errs[i] == errFileVersionNotFound, errs[i] == errVolumeNotFound: - fallthrough - case dataErrs[i] == errFileNotFound, dataErrs[i] == errFileVersionNotFound, dataErrs[i] == errVolumeNotFound: - driveState = madmin.DriveStateMissing - default: - // all remaining cases imply corrupt data/metadata - driveState = madmin.DriveStateCorrupt - } - - if shouldHealObjectOnDisk(errs[i], dataErrs[i], partsMetadata[i], latestMeta) { + disksToHealCount, xlMetaToHealCount := 0, 0 + for i := range onlineDisks { + yes, isMeta, reason := shouldHealObjectOnDisk(errs[i], dataErrsByDisk[i], partsMetadata[i], latestMeta) + if yes { outDatedDisks[i] = storageDisks[i] disksToHealCount++ - result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{ - UUID: "", - Endpoint: storageEndpoints[i].String(), - State: driveState, - }) - result.After.Drives = append(result.After.Drives, madmin.HealDriveInfo{ - UUID: "", - Endpoint: storageEndpoints[i].String(), - State: driveState, - }) - continue + if isMeta { + xlMetaToHealCount++ + } } + + driveState := objectErrToDriveState(reason) + result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{ UUID: "", Endpoint: storageEndpoints[i].String(), @@ -366,16 +424,6 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object }) } - if isAllNotFound(errs) { - // File is fully gone, fileInfo is empty. - err := errFileNotFound - if versionID != "" { - err = errFileVersionNotFound - } - return er.defaultHealResult(FileInfo{}, storageDisks, storageEndpoints, errs, - bucket, object, versionID), err - } - if disksToHealCount == 0 { // Nothing to heal! return result, nil @@ -387,24 +435,41 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object return result, nil } - if !latestMeta.XLV1 && !latestMeta.Deleted && disksToHealCount > latestMeta.Erasure.ParityBlocks { + cannotHeal := !latestMeta.XLV1 && !latestMeta.Deleted && xlMetaToHealCount > latestMeta.Erasure.ParityBlocks + if cannotHeal && quorumETag != "" { + // This is an object that is supposed to be removed by the dangling code + // but we noticed that ETag is the same for all objects, let's give it a shot + cannotHeal = false + } + + if !latestMeta.Deleted && !latestMeta.IsRemote() { + // check if there is a part that lost its quorum + for _, partErrs := range dataErrsByPart { + if countPartNotSuccess(partErrs) > latestMeta.Erasure.ParityBlocks { + cannotHeal = true + break + } + } + } + + if cannotHeal { // Allow for dangling deletes, on versions that have DataDir missing etc. // this would end up restoring the correct readable versions. - m, err := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, dataErrs, ObjectOptions{ + m, err := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, dataErrsByPart, ObjectOptions{ VersionID: versionID, }) errs = make([]error, len(errs)) - for i := range errs { - errs[i] = err - } if err == nil { - // Dangling object successfully purged, size is '0' - m.Size = 0 + err = errFileNotFound + if versionID != "" { + err = errFileVersionNotFound + } + // We did find a new danging object + return er.defaultHealResult(m, storageDisks, storageEndpoints, + errs, bucket, object, versionID), err } - // Generate file/version not found with default heal result - err = errFileNotFound - if versionID != "" { - err = errFileVersionNotFound + for i := range errs { + errs[i] = err } return er.defaultHealResult(m, storageDisks, storageEndpoints, errs, bucket, object, versionID), err @@ -425,21 +490,20 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object tmpID := mustGetUUID() migrateDataDir := mustGetUUID() - // Reorder so that we have data disks first and parity disks next. - if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(availableDisks) { - err := fmt.Errorf("unexpected file distribution (%v) from available disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", - latestMeta.Erasure.Distribution, availableDisks, bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-available-disks") + if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(onlineDisks) { + err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", + latestMeta.Erasure.Distribution, onlineDisks, bucket, object, versionID) + healingLogOnceIf(ctx, err, "heal-object-online-disks") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } - latestDisks := shuffleDisks(availableDisks, latestMeta.Erasure.Distribution) + latestDisks := shuffleDisks(onlineDisks, latestMeta.Erasure.Distribution) if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(outDatedDisks) { err := fmt.Errorf("unexpected file distribution (%v) from outdated disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", latestMeta.Erasure.Distribution, outDatedDisks, bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-outdated-disks") + healingLogOnceIf(ctx, err, "heal-object-outdated-disks") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } @@ -449,7 +513,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if !latestMeta.Deleted && len(latestMeta.Erasure.Distribution) != len(partsMetadata) { err := fmt.Errorf("unexpected file distribution (%v) from metadata entries (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", latestMeta.Erasure.Distribution, len(partsMetadata), bucket, object, versionID) - logger.LogOnceIf(ctx, err, "heal-object-metadata-entries") + healingLogOnceIf(ctx, err, "heal-object-metadata-entries") return er.defaultHealResult(latestMeta, storageDisks, storageEndpoints, errs, bucket, object, versionID), err } @@ -505,12 +569,15 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if disk == OfflineDisk { continue } + thisPartErrs := shuffleCheckParts(dataErrsByPart[partIndex], latestMeta.Erasure.Distribution) + if thisPartErrs[i] != checkPartSuccess { + continue + } checksumInfo := copyPartsMetadata[i].Erasure.GetChecksumInfo(partNumber) partPath := pathJoin(object, srcDataDir, fmt.Sprintf("part.%d", partNumber)) readers[i] = newBitrotReader(disk, copyPartsMetadata[i].Data, bucket, partPath, tillOffset, checksumAlgo, checksumInfo.Hash, erasure.ShardSize()) prefer[i] = disk.Hostname() == "" - } writers := make([]io.Writer, len(outDatedDisks)) for i, disk := range outDatedDisks { @@ -519,7 +586,10 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object } partPath := pathJoin(tmpID, dstDataDir, fmt.Sprintf("part.%d", partNumber)) if len(inlineBuffers) > 0 { - inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, erasure.ShardFileSize(latestMeta.Size)+32)) + buf := grid.GetByteBufferCap(int(erasure.ShardFileSize(latestMeta.Size)) + 64) + inlineBuffers[i] = bytes.NewBuffer(buf[:0]) + defer grid.PutByteBuffer(buf) + writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) } else { writers[i] = newBitrotWriter(disk, bucket, minioMetaTmpBucket, partPath, @@ -532,7 +602,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object // later to the final location. err = erasure.Heal(ctx, writers, readers, partSize, prefer) closeBitrotReaders(readers) - closeBitrotWriters(writers) + closeErrs := closeBitrotWriters(writers) if err != nil { return result, err } @@ -552,6 +622,13 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object continue } + // A non-nil stale disk which got error on Close() + if closeErrs[i] != nil { + outDatedDisks[i] = nil + disksToHealCount-- + continue + } + partsMetadata[i].DataDir = dstDataDir partsMetadata[i].AddObjectPart(partNumber, "", partSize, partActualSize, partModTime, partIdx, partChecksums) if len(inlineBuffers) > 0 && inlineBuffers[i] != nil { @@ -566,9 +643,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object if disksToHealCount == 0 { return result, fmt.Errorf("all drives had write errors, unable to heal %s/%s", bucket, object) } - } - } defer er.deleteAll(context.Background(), minioMetaTmpBucket, tmpID) @@ -599,7 +674,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object } for i, v := range result.Before.Drives { - if v.Endpoint == disk.String() { + if v.Endpoint == disk.Endpoint().String() { result.After.Drives[i].State = madmin.DriveStateOk } } @@ -615,6 +690,7 @@ func (er *erasureObjects) checkAbandonedParts(ctx context.Context, bucket string if !opts.Remove || opts.DryRun { return nil } + if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 { startTime := time.Now() defer func() { @@ -768,13 +844,7 @@ func (er *erasureObjects) defaultHealResult(lfi FileInfo, storageDisks []Storage }) continue } - driveState := madmin.DriveStateCorrupt - switch errs[index] { - case errFileNotFound, errVolumeNotFound: - driveState = madmin.DriveStateMissing - case nil: - driveState = madmin.DriveStateOk - } + driveState := objectErrToDriveState(errs[index]) result.Before.Drives = append(result.Before.Drives, madmin.HealDriveInfo{ UUID: "", Endpoint: storageEndpoints[index].String(), @@ -867,12 +937,12 @@ func isObjectDirDangling(errs []error) (ok bool) { var foundNotEmpty int var otherFound int for _, readErr := range errs { - switch { - case readErr == nil: + switch readErr { + case nil: found++ - case readErr == errFileNotFound || readErr == errVolumeNotFound: + case errFileNotFound, errVolumeNotFound: notFound++ - case readErr == errVolumeNotEmpty: + case errVolumeNotEmpty: foundNotEmpty++ default: otherFound++ @@ -882,36 +952,53 @@ func isObjectDirDangling(errs []error) (ok bool) { return found < notFound && found > 0 } +func danglingMetaErrsCount(cerrs []error) (notFoundCount int, nonActionableCount int) { + for _, readErr := range cerrs { + if readErr == nil { + continue + } + switch { + case errors.Is(readErr, errFileNotFound) || errors.Is(readErr, errFileVersionNotFound): + notFoundCount++ + default: + // All other errors are non-actionable + nonActionableCount++ + } + } + return notFoundCount, nonActionableCount +} + +func danglingPartErrsCount(results []int) (notFoundCount int, nonActionableCount int) { + for _, partResult := range results { + switch partResult { + case checkPartSuccess: + continue + case checkPartFileNotFound: + notFoundCount++ + default: + // All other errors are non-actionable + nonActionableCount++ + } + } + return notFoundCount, nonActionableCount +} + // Object is considered dangling/corrupted if and only // if total disks - a combination of corrupted and missing // files is lesser than number of data blocks. -func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (validMeta FileInfo, ok bool) { +func isObjectDangling(metaArr []FileInfo, errs []error, dataErrsByPart map[int][]int) (validMeta FileInfo, ok bool) { // We can consider an object data not reliable // when xl.meta is not found in read quorum disks. // or when xl.meta is not readable in read quorum disks. - danglingErrsCount := func(cerrs []error) (int, int) { - var ( - notFoundCount int - nonActionableCount int - ) - for _, readErr := range cerrs { - if readErr == nil { - continue - } - switch { - case errors.Is(readErr, errFileNotFound) || errors.Is(readErr, errFileVersionNotFound): - notFoundCount++ - default: - // All other errors are non-actionable - nonActionableCount++ - } + notFoundMetaErrs, nonActionableMetaErrs := danglingMetaErrsCount(errs) + + notFoundPartsErrs, nonActionablePartsErrs := 0, 0 + for _, dataErrs := range dataErrsByPart { + if nf, na := danglingPartErrsCount(dataErrs); nf > notFoundPartsErrs { + notFoundPartsErrs, nonActionablePartsErrs = nf, na } - return notFoundCount, nonActionableCount } - notFoundMetaErrs, nonActionableMetaErrs := danglingErrsCount(errs) - notFoundPartsErrs, nonActionablePartsErrs := danglingErrsCount(dataErrs) - for _, m := range metaArr { if m.IsValid() { validMeta = m @@ -922,7 +1009,7 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid if !validMeta.IsValid() { // validMeta is invalid because all xl.meta is missing apparently // we should figure out if dataDirs are also missing > dataBlocks. - dataBlocks := (len(dataErrs) + 1) / 2 + dataBlocks := (len(metaArr) + 1) / 2 if notFoundPartsErrs > dataBlocks { // Not using parity to ensure that we do not delete // any valid content, if any is recoverable. But if @@ -957,12 +1044,12 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid // However this requires a bit of a rewrite, leave this up for // future work. if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks { - // All xl.meta is beyond data blocks missing, this is dangling + // All xl.meta is beyond parity blocks missing, this is dangling return validMeta, true } if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks { - // All data-dir is beyond data blocks missing, this is dangling + // All data-dir is beyond parity blocks missing, this is dangling return validMeta, true } @@ -1039,12 +1126,12 @@ func healTrace(funcName healingMetric, startTime time.Time, bucket, object strin if result != nil { tr.Custom["version-id"] = result.VersionID tr.Custom["disks"] = strconv.Itoa(result.DiskCount) + tr.Bytes = result.ObjectSize } } if err != nil { tr.Error = err.Error() - } else { - tr.HealResult = result } + tr.HealResult = result globalTrace.Publish(tr) } diff --git a/cmd/erasure-healing_test.go b/cmd/erasure-healing_test.go index 9759e01c69d01..c19fddd024e19 100644 --- a/cmd/erasure-healing_test.go +++ b/cmd/erasure-healing_test.go @@ -49,7 +49,7 @@ func TestIsObjectDangling(t *testing.T) { name string metaArr []FileInfo errs []error - dataErrs []error + dataErrs map[int][]int expectedMeta FileInfo expectedDangling bool }{ @@ -165,11 +165,8 @@ func TestIsObjectDangling(t *testing.T) { nil, nil, }, - dataErrs: []error{ - errFileCorrupt, - errFileNotFound, - nil, - errFileCorrupt, + dataErrs: map[int][]int{ + 0: {checkPartFileCorrupt, checkPartFileNotFound, checkPartSuccess, checkPartFileCorrupt}, }, expectedMeta: fi, expectedDangling: false, @@ -188,11 +185,8 @@ func TestIsObjectDangling(t *testing.T) { errFileNotFound, nil, }, - dataErrs: []error{ - errFileNotFound, - errFileCorrupt, - nil, - nil, + dataErrs: map[int][]int{ + 0: {checkPartFileNotFound, checkPartFileCorrupt, checkPartSuccess, checkPartSuccess}, }, expectedMeta: fi, expectedDangling: false, @@ -247,19 +241,61 @@ func TestIsObjectDangling(t *testing.T) { nil, nil, }, - dataErrs: []error{ + dataErrs: map[int][]int{ + 0: {checkPartFileNotFound, checkPartFileNotFound, checkPartSuccess, checkPartFileNotFound}, + }, + expectedMeta: fi, + expectedDangling: true, + }, + { + name: "FileInfoDecided-case4-(missing data-dir for part 2)", + metaArr: []FileInfo{ + {}, + {}, + {}, + fi, + }, + errs: []error{ errFileNotFound, errFileNotFound, nil, - errFileNotFound, + nil, + }, + dataErrs: map[int][]int{ + 0: {checkPartSuccess, checkPartSuccess, checkPartSuccess, checkPartSuccess}, + 1: {checkPartSuccess, checkPartFileNotFound, checkPartFileNotFound, checkPartFileNotFound}, }, expectedMeta: fi, expectedDangling: true, }, + + { + name: "FileInfoDecided-case4-(enough data-dir existing for each part)", + metaArr: []FileInfo{ + {}, + {}, + {}, + fi, + }, + errs: []error{ + errFileNotFound, + errFileNotFound, + nil, + nil, + }, + dataErrs: map[int][]int{ + 0: {checkPartFileNotFound, checkPartSuccess, checkPartSuccess, checkPartSuccess}, + 1: {checkPartSuccess, checkPartFileNotFound, checkPartSuccess, checkPartSuccess}, + 2: {checkPartSuccess, checkPartSuccess, checkPartFileNotFound, checkPartSuccess}, + 3: {checkPartSuccess, checkPartSuccess, checkPartSuccess, checkPartFileNotFound}, + }, + expectedMeta: fi, + expectedDangling: false, + }, + // Add new cases as seen } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.name, func(t *testing.T) { gotMeta, dangling := isObjectDangling(testCase.metaArr, testCase.errs, testCase.dataErrs) if !gotMeta.Equals(testCase.expectedMeta) { @@ -274,14 +310,14 @@ func TestIsObjectDangling(t *testing.T) { // Tests both object and bucket healing. func TestHealing(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. @@ -316,7 +352,7 @@ func TestHealing(t *testing.T) { } disk := er.getDisks()[0] - fileInfoPreHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -339,7 +375,7 @@ func TestHealing(t *testing.T) { t.Fatal(err) } - fileInfoPostHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal, err := disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -358,7 +394,7 @@ func TestHealing(t *testing.T) { // gone down when an object was replaced by a new object. fileInfoOutDated := fileInfoPreHeal fileInfoOutDated.ModTime = time.Now() - err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated) + err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated) if err != nil { t.Fatal(err) } @@ -368,7 +404,7 @@ func TestHealing(t *testing.T) { t.Fatal(err) } - fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -420,7 +456,7 @@ func TestHealing(t *testing.T) { t.Fatal(err) } // Stat the bucket to make sure that it was created. - _, err = er.getDisks()[0].StatVol(context.Background(), bucket) + _, err = er.getDisks()[0].StatVol(t.Context(), bucket) if err != nil { t.Fatal(err) } @@ -428,14 +464,14 @@ func TestHealing(t *testing.T) { // Tests both object and bucket healing. func TestHealingVersioned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDirs, err := prepareErasure16(ctx) if err != nil { t.Fatal(err) } - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. @@ -476,11 +512,11 @@ func TestHealingVersioned(t *testing.T) { } disk := er.getDisks()[0] - fileInfoPreHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } - fileInfoPreHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -503,11 +539,11 @@ func TestHealingVersioned(t *testing.T) { t.Fatal(err) } - fileInfoPostHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal1, err := disk.ReadVersion(t.Context(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } - fileInfoPostHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal2, err := disk.ReadVersion(t.Context(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -529,7 +565,7 @@ func TestHealingVersioned(t *testing.T) { // gone down when an object was replaced by a new object. fileInfoOutDated := fileInfoPreHeal1 fileInfoOutDated.ModTime = time.Now() - err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated) + err = disk.WriteMetadata(t.Context(), "", bucket, object, fileInfoOutDated) if err != nil { t.Fatal(err) } @@ -539,7 +575,7 @@ func TestHealingVersioned(t *testing.T) { t.Fatal(err) } - fileInfoPostHeal1, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal1, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -549,7 +585,7 @@ func TestHealingVersioned(t *testing.T) { t.Fatal("HealObject failed") } - fileInfoPostHeal2, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal2, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -601,14 +637,14 @@ func TestHealingVersioned(t *testing.T) { t.Fatal(err) } // Stat the bucket to make sure that it was created. - _, err = er.getDisks()[0].StatVol(context.Background(), bucket) + _, err = er.getDisks()[0].StatVol(t.Context(), bucket) if err != nil { t.Fatal(err) } } func TestHealingDanglingObject(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() resetGlobalHealState() @@ -634,7 +670,7 @@ func TestHealingDanglingObject(t *testing.T) { defer removeRoots(fsDirs) // Everything is fine, should return nil - objLayer, disks, err := initObjectLayer(ctx, mustGetPoolEndpoints(0, fsDirs...)) + objLayer, _, err := initObjectLayer(ctx, mustGetPoolEndpoints(0, fsDirs...)) if err != nil { t.Fatal(err) } @@ -650,7 +686,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatalf("Failed to make a bucket - %v", err) } - disks = objLayer.(*erasureServerPools).serverPools[0].erasureDisks[0] + disks := objLayer.(*erasureServerPools).serverPools[0].erasureDisks[0] orgDisks := append([]StorageAPI{}, disks...) // Enable versioning. @@ -687,7 +723,7 @@ func TestHealingDanglingObject(t *testing.T) { // Restore... setDisks(orgDisks[:4]...) - fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -696,7 +732,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatalf("Expected versions 1, got %d", fileInfoPreHeal.NumVersions) } - if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true}, + if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Recursive: true, Remove: true}, func(bucket, object, vid string, scanMode madmin.HealScanMode) error { _, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{ScanMode: scanMode, Remove: true}) return err @@ -704,7 +740,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatal(err) } - fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal, err := disks[0].ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -734,7 +770,7 @@ func TestHealingDanglingObject(t *testing.T) { setDisks(orgDisks[:4]...) disk := getDisk(0) - fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -743,7 +779,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatalf("Expected versions 1, got %d", fileInfoPreHeal.NumVersions) } - if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true}, + if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Recursive: true, Remove: true}, func(bucket, object, vid string, scanMode madmin.HealScanMode) error { _, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{ScanMode: scanMode, Remove: true}) return err @@ -752,7 +788,7 @@ func TestHealingDanglingObject(t *testing.T) { } disk = getDisk(0) - fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -783,7 +819,7 @@ func TestHealingDanglingObject(t *testing.T) { setDisks(orgDisks[:4]...) disk = getDisk(0) - fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPreHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -792,7 +828,7 @@ func TestHealingDanglingObject(t *testing.T) { t.Fatalf("Expected versions 3, got %d", fileInfoPreHeal.NumVersions) } - if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Remove: true}, + if err = objLayer.HealObjects(ctx, bucket, "", madmin.HealOpts{Recursive: true, Remove: true}, func(bucket, object, vid string, scanMode madmin.HealScanMode) error { _, err := objLayer.HealObject(ctx, bucket, object, vid, madmin.HealOpts{ScanMode: scanMode, Remove: true}) return err @@ -801,7 +837,7 @@ func TestHealingDanglingObject(t *testing.T) { } disk = getDisk(0) - fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) + fileInfoPostHeal, err = disk.ReadVersion(t.Context(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true}) if err != nil { t.Fatal(err) } @@ -812,7 +848,7 @@ func TestHealingDanglingObject(t *testing.T) { } func TestHealCorrectQuorum(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() resetGlobalHealState() @@ -896,7 +932,7 @@ func TestHealCorrectQuorum(t *testing.T) { } for i := 0; i < nfi.Erasure.ParityBlocks; i++ { - erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -923,7 +959,7 @@ func TestHealCorrectQuorum(t *testing.T) { } for i := 0; i < nfi.Erasure.ParityBlocks; i++ { - erasureDisks[i].Delete(context.Background(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{ + erasureDisks[i].Delete(t.Context(), minioMetaBucket, pathJoin(cfgFile, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -943,7 +979,7 @@ func TestHealCorrectQuorum(t *testing.T) { } func TestHealObjectCorruptedPools(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() resetGlobalHealState() @@ -1007,7 +1043,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { er := set.sets[0] erasureDisks := er.getDisks() firstDisk := erasureDisks[0] - err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1026,11 +1062,11 @@ func TestHealObjectCorruptedPools(t *testing.T) { t.Fatalf("Failed to getLatestFileInfo - %v", err) } - if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { + if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { t.Errorf("Expected xl.meta file to be present but stat failed - %v", err) } - err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1038,7 +1074,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { t.Errorf("Failure during deleting part.1 - %v", err) } - err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{}) + err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte{}) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } @@ -1058,7 +1094,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { t.Fatalf("FileInfo not equal after healing: %v != %v", fi, nfi) } - err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1067,7 +1103,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { } bdata := bytes.Repeat([]byte("b"), int(nfi.Size)) - err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata) + err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), bdata) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } @@ -1090,7 +1126,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { // Test 4: checks if HealObject returns an error when xl.meta is not found // in more than read quorum number of disks, to create a corrupted situation. for i := 0; i <= nfi.Erasure.DataBlocks; i++ { - erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1111,7 +1147,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { } for i := 0; i < (nfi.Erasure.DataBlocks + nfi.Erasure.ParityBlocks); i++ { - stats, _ := erasureDisks[i].StatInfoFile(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), false) + stats, _ := erasureDisks[i].StatInfoFile(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), false) if len(stats) != 0 { t.Errorf("Expected xl.meta file to be not present, but succeeded") } @@ -1119,7 +1155,7 @@ func TestHealObjectCorruptedPools(t *testing.T) { } func TestHealObjectCorruptedXLMeta(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() resetGlobalHealState() @@ -1185,7 +1221,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) { t.Fatalf("Failed to getLatestFileInfo - %v", err) } - err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1198,7 +1234,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) { t.Fatalf("Failed to heal object - %v", err) } - if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { + if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { t.Errorf("Expected xl.meta file to be present but stat failed - %v", err) } @@ -1213,7 +1249,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) { } // Test 2: Test with a corrupted xl.meta - err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd")) + err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), []byte("abcd")) if err != nil { t.Errorf("Failure during creating part.1 - %v", err) } @@ -1236,7 +1272,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) { // Test 3: checks if HealObject returns an error when xl.meta is not found // in more than read quorum number of disks, to create a corrupted situation. for i := 0; i <= nfi2.Erasure.DataBlocks; i++ { - erasureDisks[i].Delete(context.Background(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ + erasureDisks[i].Delete(t.Context(), bucket, pathJoin(object, xlStorageFormatFile), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1258,7 +1294,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) { } func TestHealObjectCorruptedParts(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() resetGlobalHealState() @@ -1325,18 +1361,18 @@ func TestHealObjectCorruptedParts(t *testing.T) { t.Fatalf("Failed to getLatestFileInfo - %v", err) } - part1Disk1Origin, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + part1Disk1Origin, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } - part1Disk2Origin, err := secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + part1Disk2Origin, err := secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } // Test 1, remove part.1 - err = firstDisk.Delete(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1349,7 +1385,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { t.Fatalf("Failed to heal object - %v", err) } - part1Replaced, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + part1Replaced, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } @@ -1359,7 +1395,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { } // Test 2, Corrupt part.1 - err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes")) + err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes")) if err != nil { t.Fatalf("Failed to write a file - %v", err) } @@ -1369,7 +1405,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { t.Fatalf("Failed to heal object - %v", err) } - part1Replaced, err = firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + part1Replaced, err = firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } @@ -1379,12 +1415,12 @@ func TestHealObjectCorruptedParts(t *testing.T) { } // Test 3, Corrupt one part and remove data in another disk - err = firstDisk.WriteAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes")) + err = firstDisk.WriteAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1"), []byte("foobytes")) if err != nil { t.Fatalf("Failed to write a file - %v", err) } - err = secondDisk.Delete(context.Background(), bucket, object, DeleteOptions{ + err = secondDisk.Delete(t.Context(), bucket, object, DeleteOptions{ Recursive: true, Immediate: false, }) @@ -1397,7 +1433,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { t.Fatalf("Failed to heal object - %v", err) } - partReconstructed, err := firstDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + partReconstructed, err := firstDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } @@ -1406,7 +1442,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { t.Fatalf("part.1 not healed correctly") } - partReconstructed, err = secondDisk.ReadAll(context.Background(), bucket, pathJoin(object, fi.DataDir, "part.1")) + partReconstructed, err = secondDisk.ReadAll(t.Context(), bucket, pathJoin(object, fi.DataDir, "part.1")) if err != nil { t.Fatalf("Failed to read a file - %v", err) } @@ -1418,7 +1454,7 @@ func TestHealObjectCorruptedParts(t *testing.T) { // Tests healing of object. func TestHealObjectErasure(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 @@ -1475,7 +1511,7 @@ func TestHealObjectErasure(t *testing.T) { } // Delete the whole object folder - err = firstDisk.Delete(context.Background(), bucket, object, DeleteOptions{ + err = firstDisk.Delete(t.Context(), bucket, object, DeleteOptions{ Recursive: true, Immediate: false, }) @@ -1488,7 +1524,7 @@ func TestHealObjectErasure(t *testing.T) { t.Fatalf("Failed to heal object - %v", err) } - if _, err = firstDisk.StatInfoFile(context.Background(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { + if _, err = firstDisk.StatInfoFile(t.Context(), bucket, object+"/"+xlStorageFormatFile, false); err != nil { t.Errorf("Expected xl.meta file to be present but stat failed - %v", err) } @@ -1497,7 +1533,13 @@ func TestHealObjectErasure(t *testing.T) { er.getDisks = func() []StorageAPI { // Nil more than half the disks, to remove write quorum. for i := 0; i <= len(erasureDisks)/2; i++ { - erasureDisks[i] = nil + err := erasureDisks[i].Delete(t.Context(), bucket, object, DeleteOptions{ + Recursive: true, + Immediate: false, + }) + if err != nil { + t.Fatalf("Failed to delete a file - %v", err) + } } return erasureDisks } @@ -1517,7 +1559,7 @@ func TestHealObjectErasure(t *testing.T) { // Tests healing of empty directories func TestHealEmptyDirectoryErasure(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 @@ -1553,7 +1595,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) { z := obj.(*erasureServerPools) er := z.serverPools[0].sets[0] firstDisk := er.getDisks()[0] - err = firstDisk.DeleteVol(context.Background(), pathJoin(bucket, encodeDirObject(object)), true) + err = firstDisk.DeleteVol(t.Context(), pathJoin(bucket, encodeDirObject(object)), true) if err != nil { t.Fatalf("Failed to delete a file - %v", err) } @@ -1565,7 +1607,7 @@ func TestHealEmptyDirectoryErasure(t *testing.T) { } // Check if the empty directory is restored in the first disk - _, err = firstDisk.StatVol(context.Background(), pathJoin(bucket, encodeDirObject(object))) + _, err = firstDisk.StatVol(t.Context(), pathJoin(bucket, encodeDirObject(object))) if err != nil { t.Fatalf("Expected object to be present but stat failed - %v", err) } @@ -1613,7 +1655,7 @@ func TestHealLastDataShard(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 diff --git a/cmd/erasure-metadata-utils.go b/cmd/erasure-metadata-utils.go index 82f91d5e8da40..1409d99ecaf3f 100644 --- a/cmd/erasure-metadata-utils.go +++ b/cmd/erasure-metadata-utils.go @@ -19,35 +19,80 @@ package cmd import ( "context" + "encoding/binary" "errors" "hash/crc32" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" ) +// counterMap type adds GetValueWithQuorum method to a map[T]int used to count occurrences of values of type T. +type counterMap[T comparable] map[T]int + +// GetValueWithQuorum returns the first key which occurs >= quorum number of times. +func (c counterMap[T]) GetValueWithQuorum(quorum int) (T, bool) { + var zero T + for x, count := range c { + if count >= quorum { + return x, true + } + } + return zero, false +} + // figure out the most commonVersions across disk that satisfies -// the 'writeQuorum' this function returns '0' if quorum cannot +// the 'writeQuorum' this function returns "" if quorum cannot // be achieved and disks have too many inconsistent versions. -func reduceCommonVersions(diskVersions []uint64, writeQuorum int) (commonVersions uint64) { +func reduceCommonVersions(diskVersions [][]byte, writeQuorum int) (versions []byte) { diskVersionsCount := make(map[uint64]int) for _, versions := range diskVersions { - diskVersionsCount[versions]++ + if len(versions) > 0 { + diskVersionsCount[binary.BigEndian.Uint64(versions)]++ + } } - max := 0 + var commonVersions uint64 + maxCnt := 0 for versions, count := range diskVersionsCount { - if max < count { - max = count + if maxCnt < count { + maxCnt = count commonVersions = versions } } - if max >= writeQuorum { - return commonVersions + if maxCnt >= writeQuorum { + for _, versions := range diskVersions { + if binary.BigEndian.Uint64(versions) == commonVersions { + return versions + } + } + } + + return []byte{} +} + +// figure out the most commonVersions across disk that satisfies +// the 'writeQuorum' this function returns '0' if quorum cannot +// be achieved and disks have too many inconsistent versions. +func reduceCommonDataDir(dataDirs []string, writeQuorum int) (dataDir string) { + dataDirsCount := make(map[string]int) + for _, ddir := range dataDirs { + dataDirsCount[ddir]++ + } + + maxCnt := 0 + for ddir, count := range dataDirsCount { + if maxCnt < count { + maxCnt = count + dataDir = ddir + } + } + + if maxCnt >= writeQuorum { + return dataDir } - return 0 + return "" } // Returns number of errors that occurred the most (incl. nil) and the @@ -70,20 +115,20 @@ func reduceErrs(errs []error, ignoredErrs []error) (maxCount int, maxErr error) errorCounts[err]++ } - max := 0 + maxCnt := 0 for err, count := range errorCounts { switch { - case max < count: - max = count + case maxCnt < count: + maxCnt = count maxErr = err // Prefer `nil` over other error values with the same // number of occurrences. - case max == count && err == nil: + case maxCnt == count && err == nil: maxErr = err } } - return max, maxErr + return maxCnt, maxErr } // reduceQuorumErrs behaves like reduceErrs by only for returning @@ -159,7 +204,6 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, g := errgroup.WithNErrs(len(disks)) // Read `xl.meta` in parallel across disks. for index := range disks { - index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound @@ -250,41 +294,41 @@ func shuffleDisksAndPartsMetadata(disks []StorageAPI, partsMetadata []FileInfo, return shuffledDisks, shuffledPartsMetadata } -// Return shuffled partsMetadata depending on distribution. -func shufflePartsMetadata(partsMetadata []FileInfo, distribution []int) (shuffledPartsMetadata []FileInfo) { +func shuffleWithDist[T any](input []T, distribution []int) []T { if distribution == nil { - return partsMetadata + return input } - shuffledPartsMetadata = make([]FileInfo, len(partsMetadata)) - // Shuffle slice xl metadata for expected distribution. - for index := range partsMetadata { + shuffled := make([]T, len(input)) + for index := range input { blockIndex := distribution[index] - shuffledPartsMetadata[blockIndex-1] = partsMetadata[index] + shuffled[blockIndex-1] = input[index] } - return shuffledPartsMetadata + return shuffled +} + +// Return shuffled partsMetadata depending on distribution. +func shufflePartsMetadata(partsMetadata []FileInfo, distribution []int) []FileInfo { + return shuffleWithDist[FileInfo](partsMetadata, distribution) +} + +// shuffleCheckParts - shuffle CheckParts slice depending on the +// erasure distribution. +func shuffleCheckParts(parts []int, distribution []int) []int { + return shuffleWithDist[int](parts, distribution) } // shuffleDisks - shuffle input disks slice depending on the // erasure distribution. Return shuffled slice of disks with // their expected distribution. -func shuffleDisks(disks []StorageAPI, distribution []int) (shuffledDisks []StorageAPI) { - if distribution == nil { - return disks - } - shuffledDisks = make([]StorageAPI, len(disks)) - // Shuffle disks for expected distribution. - for index := range disks { - blockIndex := distribution[index] - shuffledDisks[blockIndex-1] = disks[index] - } - return shuffledDisks +func shuffleDisks(disks []StorageAPI, distribution []int) []StorageAPI { + return shuffleWithDist[StorageAPI](disks, distribution) } // evalDisks - returns a new slice of disks where nil is set if // the corresponding error in errs slice is not nil func evalDisks(disks []StorageAPI, errs []error) []StorageAPI { if len(errs) != len(disks) { - logger.LogIf(GlobalContext, errors.New("unexpected drives/errors slice length")) + bugLogIf(GlobalContext, errors.New("unexpected drives/errors slice length")) return nil } newDisks := make([]StorageAPI, len(disks)) diff --git a/cmd/erasure-metadata-utils_test.go b/cmd/erasure-metadata-utils_test.go index 242acc90f92f1..b14cc25dfa276 100644 --- a/cmd/erasure-metadata-utils_test.go +++ b/cmd/erasure-metadata-utils_test.go @@ -55,7 +55,7 @@ func TestDiskCount(t *testing.T) { // of errors into a single maximal error with in the list. func TestReduceErrs(t *testing.T) { canceledErrs := make([]error, 0, 5) - for i := 0; i < 5; i++ { + for i := range 5 { canceledErrs = append(canceledErrs, fmt.Errorf("error %d: %w", i, context.Canceled)) } // List all of all test cases to validate various cases of reduce errors. @@ -99,11 +99,11 @@ func TestReduceErrs(t *testing.T) { } // Validates list of all the testcases for returning valid errors. for i, testCase := range testCases { - gotErr := reduceReadQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 5) + gotErr := reduceReadQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 5) if gotErr != testCase.err { t.Errorf("Test %d : expected %s, got %s", i+1, testCase.err, gotErr) } - gotNewErr := reduceWriteQuorumErrs(context.Background(), testCase.errs, testCase.ignoredErrs, 6) + gotNewErr := reduceWriteQuorumErrs(t.Context(), testCase.errs, testCase.ignoredErrs, 6) if gotNewErr != errErasureWriteQuorum { t.Errorf("Test %d : expected %s, got %s", i+1, errErasureWriteQuorum, gotErr) } @@ -148,7 +148,7 @@ func TestHashOrder(t *testing.T) { } func TestShuffleDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 @@ -196,7 +196,7 @@ func testShuffleDisks(t *testing.T, z *erasureServerPools) { // TestEvalDisks tests the behavior of evalDisks func TestEvalDisks(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 @@ -222,7 +222,7 @@ func Test_hashOrder(t *testing.T) { var tmp [16]byte rng.Read(tmp[:]) prefix := hex.EncodeToString(tmp[:]) - for i := 0; i < 10000; i++ { + for range 10000 { rng.Read(tmp[:]) y := hashOrder(fmt.Sprintf("%s/%x", prefix, hex.EncodeToString(tmp[:3])), x) diff --git a/cmd/erasure-metadata.go b/cmd/erasure-metadata.go index 35d7419cb3ddb..f4b8798ba6c9c 100644 --- a/cmd/erasure-metadata.go +++ b/cmd/erasure-metadata.go @@ -26,12 +26,12 @@ import ( "time" "github.com/minio/minio/internal/amztime" + "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" ) // Object was stored with additional erasure codes due to degraded system at upload time @@ -174,6 +174,7 @@ func (fi FileInfo) ToObjectInfo(bucket, object string, versioned bool) ObjectInf } } objInfo.Checksum = fi.Checksum + objInfo.decryptPartsChecksums(nil) objInfo.Inlined = fi.InlineData() // Success. return objInfo @@ -215,6 +216,7 @@ func (fi FileInfo) ReplicationInfoEquals(ofi FileInfo) bool { } // objectPartIndex - returns the index of matching object part number. +// Returns -1 if the part cannot be found. func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { for i, part := range parts { if partNumber == part.Number { @@ -224,6 +226,17 @@ func objectPartIndex(parts []ObjectPartInfo, partNumber int) int { return -1 } +// objectPartIndexNums returns the index of the specified part number. +// Returns -1 if the part cannot be found. +func objectPartIndexNums(parts []int, partNumber int) int { + for i, part := range parts { + if part != 0 && partNumber == part { + return i + } + } + return -1 +} + // AddObjectPart - add a new object part in order. func (fi *FileInfo) AddObjectPart(partNumber int, partETag string, partSize, actualSize int64, modTime time.Time, idx []byte, checksums map[string]string) { partInfo := ObjectPartInfo{ @@ -268,7 +281,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn // Continue to towards the next part. partOffset -= part.Size } - logger.LogIf(ctx, InvalidRange{}) + internalLogIf(ctx, InvalidRange{}) // Offset beyond the size of the object return InvalidRange. return 0, 0, InvalidRange{} } @@ -276,7 +289,7 @@ func (fi FileInfo) ObjectToPartOffset(ctx context.Context, offset int64) (partIn func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time.Time, etag string, quorum int) (FileInfo, error) { // with less quorum return error. if quorum < 1 { - return FileInfo{}, errErasureReadQuorum + return FileInfo{}, InsufficientReadQuorum{Err: errErasureReadQuorum, Type: RQInsufficientOnlineDrives} } metaHashes := make([]string, len(metaArr)) h := sha256.New() @@ -288,13 +301,14 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time. mtimeValid := meta.ModTime.Equal(modTime) if mtimeValid || etagOnly { fmt.Fprintf(h, "%v", meta.XLV1) - if !etagOnly { - // Verify dataDir is same only when mtime is valid and etag is not considered. - fmt.Fprintf(h, "%v", meta.GetDataDir()) - } for _, part := range meta.Parts { fmt.Fprintf(h, "part.%d", part.Number) + fmt.Fprintf(h, "part.%d", part.Size) } + // Previously we checked if we had quorum on DataDir value. + // We have removed this check to allow reading objects with different DataDir + // values in a few drives (due to a rebalance-stop race bug) + // provided their their etags or ModTimes match. if !meta.Deleted && meta.Size != 0 { fmt.Fprintf(h, "%v+%v", meta.Erasure.DataBlocks, meta.Erasure.ParityBlocks) @@ -342,12 +356,17 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time. } if maxCount < quorum { - return FileInfo{}, errErasureReadQuorum + return FileInfo{}, InsufficientReadQuorum{Err: errErasureReadQuorum, Type: RQInconsistentMeta} } - // Find the successor mod time in quorum, otherwise leave the - // candidate's successor modTime as found - succModTimeMap := make(map[time.Time]int) + // objProps represents properties that go beyond a single version + type objProps struct { + succModTime time.Time + numVersions int + } + // Find the successor mod time and numVersions in quorum, otherwise leave the + // candidate as found + otherPropsMap := make(counterMap[objProps]) var candidate FileInfo var found bool for i, hash := range metaHashes { @@ -357,28 +376,25 @@ func findFileInfoInQuorum(ctx context.Context, metaArr []FileInfo, modTime time. candidate = metaArr[i] found = true } - succModTimeMap[metaArr[i].SuccessorModTime]++ + props := objProps{ + succModTime: metaArr[i].SuccessorModTime, + numVersions: metaArr[i].NumVersions, + } + otherPropsMap[props]++ } } } - var succModTime time.Time - var smodTimeQuorum bool - for smodTime, count := range succModTimeMap { - if count >= quorum { - smodTimeQuorum = true - succModTime = smodTime - break - } - } if found { - if smodTimeQuorum { - candidate.SuccessorModTime = succModTime - candidate.IsLatest = succModTime.IsZero() + // Update candidate FileInfo with succModTime and numVersions in quorum when available + if props, ok := otherPropsMap.GetValueWithQuorum(quorum); ok { + candidate.SuccessorModTime = props.succModTime + candidate.IsLatest = props.succModTime.IsZero() + candidate.NumVersions = props.numVersions } return candidate, nil } - return FileInfo{}, errErasureReadQuorum + return FileInfo{}, InsufficientReadQuorum{Err: errErasureReadQuorum, Type: RQInconsistentMeta} } // pickValidFileInfo - picks one valid FileInfo content and returns from a @@ -387,13 +403,11 @@ func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Tim return findFileInfoInQuorum(ctx, metaArr, modTime, etag, quorum) } -// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. -func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, origbucket, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { +func writeAllMetadataWithRevert(ctx context.Context, disks []StorageAPI, origbucket, bucket, prefix string, files []FileInfo, quorum int, revert bool) ([]StorageAPI, error) { g := errgroup.WithNErrs(len(disks)) // Start writing `xl.meta` to all disks in parallel. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -404,7 +418,7 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, origbucket, bu if fi.IsValid() { return disks[index].WriteMetadata(ctx, origbucket, bucket, prefix, fi) } - return errCorruptedFormat + return errFileCorrupt }, index) } @@ -412,9 +426,37 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, origbucket, bu mErrs := g.Wait() err := reduceWriteQuorumErrs(ctx, mErrs, objectOpIgnoredErrs, quorum) + if err != nil && revert { + ng := errgroup.WithNErrs(len(disks)) + for index := range disks { + if mErrs[index] != nil { + continue + } + index := index + ng.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + return disks[index].Delete(ctx, bucket, pathJoin(prefix, xlStorageFormatFile), DeleteOptions{ + Recursive: true, + }) + }, index) + } + ng.Wait() + } + return evalDisks(disks, mErrs), err } +func writeAllMetadata(ctx context.Context, disks []StorageAPI, origbucket, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { + return writeAllMetadataWithRevert(ctx, disks, origbucket, bucket, prefix, files, quorum, true) +} + +// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. +func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, origbucket, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { + return writeAllMetadataWithRevert(ctx, disks, origbucket, bucket, prefix, files, quorum, false) +} + func commonParity(parities []int, defaultParityCount int) int { N := len(parities) @@ -455,6 +497,7 @@ func commonParity(parities []int, defaultParityCount int) int { } func listObjectParities(partsMetadata []FileInfo, errs []error) (parities []int) { + totalShards := len(partsMetadata) parities = make([]int, len(partsMetadata)) for index, metadata := range partsMetadata { if errs[index] != nil { @@ -465,14 +508,20 @@ func listObjectParities(partsMetadata []FileInfo, errs []error) (parities []int) parities[index] = -1 continue } + //nolint:gocritic // Delete marker or zero byte objects take highest parity. if metadata.Deleted || metadata.Size == 0 { - parities[index] = len(partsMetadata) / 2 + parities[index] = totalShards / 2 + } else if metadata.TransitionStatus == lifecycle.TransitionComplete { + // For tiered objects, read quorum is N/2+1 to ensure simple majority on xl.meta. + // It is not equal to EcM because the data integrity is entrusted with the warm tier. + // However, we never go below EcM, in case of a EcM=EcN setup. + parities[index] = max(totalShards-(totalShards/2+1), metadata.Erasure.ParityBlocks) } else { parities[index] = metadata.Erasure.ParityBlocks } } - return + return parities } // Returns per object readQuorum and writeQuorum @@ -499,15 +548,7 @@ func objectQuorumFromMeta(ctx context.Context, partsMetaData []FileInfo, errs [] parities := listObjectParities(partsMetaData, errs) parityBlocks := commonParity(parities, defaultParityCount) if parityBlocks < 0 { - return -1, -1, errErasureReadQuorum - } - - if parityBlocks == 0 { - // For delete markers do not use 'defaultParityCount' as it is not expected to be the case. - // Use maximum allowed read quorum instead, writeQuorum+1 is returned for compatibility sake - // but there are no callers that shall be using this. - readQuorum := len(partsMetaData) / 2 - return readQuorum, readQuorum + 1, nil + return -1, -1, InsufficientReadQuorum{Err: errErasureReadQuorum, Type: RQInsufficientOnlineDrives} } dataBlocks := len(partsMetaData) - parityBlocks diff --git a/cmd/erasure-metadata_test.go b/cmd/erasure-metadata_test.go index 6eb518ae42a57..76ee2e1024167 100644 --- a/cmd/erasure-metadata_test.go +++ b/cmd/erasure-metadata_test.go @@ -18,7 +18,8 @@ package cmd import ( - "context" + "fmt" + "slices" "strconv" "testing" "time" @@ -144,7 +145,7 @@ func TestObjectToPartOffset(t *testing.T) { // Test them. for _, testCase := range testCases { - index, offset, err := fi.ObjectToPartOffset(context.Background(), testCase.offset) + index, offset, err := fi.ObjectToPartOffset(t.Context(), testCase.offset) if err != testCase.expectedErr { t.Fatalf("%+v: expected = %s, got: %s", testCase, testCase.expectedErr, err) } @@ -158,7 +159,7 @@ func TestObjectToPartOffset(t *testing.T) { } func TestFindFileInfoInQuorum(t *testing.T) { - getNFInfo := func(n int, quorum int, t int64, dataDir string, succModTimes []time.Time) []FileInfo { + getNFInfo := func(n int, quorum int, t int64, dataDir string, succModTimes []time.Time, numVersions []int) []FileInfo { fi := newFileInfo("test", 8, 8) fi.AddObjectPart(1, "etag", 100, 100, UTCNow(), nil, nil) fi.ModTime = time.Unix(t, 0) @@ -171,6 +172,9 @@ func TestFindFileInfoInQuorum(t *testing.T) { fis[i].SuccessorModTime = succModTimes[i] fis[i].IsLatest = succModTimes[i].IsZero() } + if numVersions != nil { + fis[i].NumVersions = numVersions[i] + } quorum-- if quorum == 0 { break @@ -182,66 +186,94 @@ func TestFindFileInfoInQuorum(t *testing.T) { commonSuccModTime := time.Date(2023, time.August, 25, 0, 0, 0, 0, time.UTC) succModTimesInQuorum := make([]time.Time, 16) succModTimesNoQuorum := make([]time.Time, 16) - for i := 0; i < 16; i++ { + commonNumVersions := 2 + numVersionsInQuorum := make([]int, 16) + numVersionsNoQuorum := make([]int, 16) + for i := range 16 { if i < 4 { continue } succModTimesInQuorum[i] = commonSuccModTime + numVersionsInQuorum[i] = commonNumVersions if i < 9 { continue } succModTimesNoQuorum[i] = commonSuccModTime + numVersionsNoQuorum[i] = commonNumVersions } tests := []struct { fis []FileInfo modTime time.Time succmodTimes []time.Time + numVersions []int expectedErr error expectedQuorum int expectedSuccModTime time.Time + expectedNumVersions int expectedIsLatest bool }{ { - fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil), + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil, nil), modTime: time.Unix(1603863445, 0), expectedErr: nil, expectedQuorum: 8, }, { - fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil), + fis: getNFInfo(16, 7, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil, nil), modTime: time.Unix(1603863445, 0), - expectedErr: errErasureReadQuorum, + expectedErr: InsufficientReadQuorum{}, expectedQuorum: 8, }, { - fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil), + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil, nil), modTime: time.Unix(1603863445, 0), - expectedErr: errErasureReadQuorum, + expectedErr: InsufficientReadQuorum{}, expectedQuorum: 0, }, { - fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", succModTimesInQuorum), + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", succModTimesInQuorum, nil), modTime: time.Unix(1603863445, 0), + succmodTimes: succModTimesInQuorum, expectedErr: nil, expectedQuorum: 12, expectedSuccModTime: commonSuccModTime, expectedIsLatest: false, }, { - fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", succModTimesNoQuorum), + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", succModTimesNoQuorum, nil), modTime: time.Unix(1603863445, 0), + succmodTimes: succModTimesNoQuorum, expectedErr: nil, expectedQuorum: 12, expectedSuccModTime: time.Time{}, expectedIsLatest: true, }, + { + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil, numVersionsInQuorum), + modTime: time.Unix(1603863445, 0), + numVersions: numVersionsInQuorum, + expectedErr: nil, + expectedQuorum: 12, + expectedIsLatest: true, + expectedNumVersions: 2, + }, + { + fis: getNFInfo(16, 16, 1603863445, "36a21454-a2ca-11eb-bbaa-93a81c686f21", nil, numVersionsNoQuorum), + modTime: time.Unix(1603863445, 0), + numVersions: numVersionsNoQuorum, + expectedErr: nil, + expectedQuorum: 12, + expectedIsLatest: true, + expectedNumVersions: 0, + }, } for _, test := range tests { - test := test t.Run("", func(t *testing.T) { - fi, err := findFileInfoInQuorum(context.Background(), test.fis, test.modTime, "", test.expectedQuorum) - if err != test.expectedErr { + fi, err := findFileInfoInQuorum(t.Context(), test.fis, test.modTime, "", test.expectedQuorum) + _, ok1 := err.(InsufficientReadQuorum) + _, ok2 := test.expectedErr.(InsufficientReadQuorum) + if ok1 != ok2 { t.Errorf("Expected %s, got %s", test.expectedErr, err) } if test.succmodTimes != nil { @@ -252,6 +284,11 @@ func TestFindFileInfoInQuorum(t *testing.T) { t.Errorf("Expected IsLatest to be %v but got %v", test.expectedIsLatest, fi.IsLatest) } } + if test.numVersions != nil && test.expectedNumVersions > 0 { + if test.expectedNumVersions != fi.NumVersions { + t.Errorf("Expected Numversions to be %d but got %d", test.expectedNumVersions, fi.NumVersions) + } + } }) } } @@ -278,7 +315,7 @@ func TestTransitionInfoEquals(t *testing.T) { } var i uint - for i = 0; i < 8; i++ { + for i = range uint(8) { fi := FileInfo{ TransitionTier: inputs[0].tier, TransitionedObjName: inputs[0].remoteObjName, @@ -322,3 +359,124 @@ func TestSkipTierFreeVersion(t *testing.T) { t.Fatal("Expected SkipTierFreeVersion to be set on FileInfo but wasn't") } } + +func TestListObjectParities(t *testing.T) { + mkMetaArr := func(N, parity, agree int) []FileInfo { + fi := newFileInfo("obj-1", N-parity, parity) + fi.TransitionTier = "WARM-TIER" + fi.TransitionedObjName = mustGetUUID() + fi.TransitionStatus = "complete" + fi.Size = 1 << 20 + + metaArr := make([]FileInfo, N) + for i := range N { + fi.Erasure.Index = i + 1 + metaArr[i] = fi + if i < agree { + continue + } + metaArr[i].TransitionTier, metaArr[i].TransitionedObjName = "", "" + metaArr[i].TransitionStatus = "" + } + return metaArr + } + mkParities := func(N, agreedParity, disagreedParity, agree int) []int { + ps := make([]int, N) + for i := range N { + if i < agree { + ps[i] = agreedParity + continue + } + ps[i] = disagreedParity // disagree + } + return ps + } + + mkTest := func(N, parity, agree int) (res struct { + metaArr []FileInfo + errs []error + parities []int + parity int + }, + ) { + res.metaArr = mkMetaArr(N, parity, agree) + res.parities = mkParities(N, N-(N/2+1), parity, agree) + res.errs = make([]error, N) + if agree >= N/2+1 { // simple majority consensus + res.parity = N - (N/2 + 1) + } else { + res.parity = -1 + } + return res + } + + nonTieredTest := func(N, parity, agree int) (res struct { + metaArr []FileInfo + errs []error + parities []int + parity int + }, + ) { + fi := newFileInfo("obj-1", N-parity, parity) + fi.Size = 1 << 20 + metaArr := make([]FileInfo, N) + parities := make([]int, N) + for i := range N { + fi.Erasure.Index = i + 1 + metaArr[i] = fi + parities[i] = parity + if i < agree { + continue + } + metaArr[i].Erasure.Index = 0 // creates invalid fi on remaining drives + parities[i] = -1 // invalid fi are assigned parity -1 + } + res.metaArr = metaArr + res.parities = parities + res.errs = make([]error, N) + if agree >= N-parity { + res.parity = parity + } else { + res.parity = -1 + } + + return res + } + tests := []struct { + metaArr []FileInfo + errs []error + parities []int + parity int + }{ + // More than simple majority consensus + mkTest(15, 3, 11), + // No simple majority consensus + mkTest(15, 3, 7), + // Exact simple majority consensus + mkTest(15, 3, 8), + // More than simple majority consensus + mkTest(16, 4, 11), + // No simple majority consensus + mkTest(16, 4, 8), + // Exact simple majority consensus + mkTest(16, 4, 9), + // non-tiered object require read quorum of EcM + nonTieredTest(15, 3, 12), + // non-tiered object with fewer than EcM in consensus + nonTieredTest(15, 3, 11), + // non-tiered object require read quorum of EcM + nonTieredTest(16, 4, 12), + // non-tiered object with fewer than EcM in consensus + nonTieredTest(16, 4, 11), + } + for i, test := range tests { + t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { + if got := listObjectParities(test.metaArr, test.errs); !slices.Equal(got, test.parities) { + t.Fatalf("Expected parities %v but got %v", test.parities, got) + } + if got := commonParity(test.parities, len(test.metaArr)/2); got != test.parity { + t.Fatalf("Expected common parity %v but got %v", test.parity, got) + } + }) + } +} diff --git a/cmd/erasure-multipart-conditional_test.go b/cmd/erasure-multipart-conditional_test.go new file mode 100644 index 0000000000000..e3c59eebe4a5a --- /dev/null +++ b/cmd/erasure-multipart-conditional_test.go @@ -0,0 +1,225 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "bytes" + "context" + "testing" + + "github.com/dustin/go-humanize" + xhttp "github.com/minio/minio/internal/http" +) + +// TestNewMultipartUploadConditionalWithReadQuorumFailure tests that conditional +// multipart uploads (with if-match/if-none-match) behave correctly when read quorum +// cannot be reached. +// +// Related to: https://github.com/minio/minio/issues/21603 +// +// Should return an error when read quorum cannot +// be reached, as we cannot reliably determine if the precondition is met. +func TestNewMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) { + ctx := context.Background() + + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal(err) + } + defer obj.Shutdown(context.Background()) + defer removeRoots(fsDirs) + + z := obj.(*erasureServerPools) + xl := z.serverPools[0].sets[0] + + bucket := "test-bucket" + object := "test-object" + + err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{}) + if err != nil { + t.Fatal(err) + } + + // Put an initial object so it exists + _, err = obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")), + int64(len("initial-value")), "", ""), ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // Get object info to capture the ETag + objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + existingETag := objInfo.ETag + + // Simulate read quorum failure by taking enough disks offline + // With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8, + // which is below read quorum. + erasureDisks := xl.getDisks() + z.serverPools[0].erasureDisksMu.Lock() + xl.getDisks = func() []StorageAPI { + for i := range erasureDisks[:8] { + erasureDisks[i] = nil + } + return erasureDisks + } + z.serverPools[0].erasureDisksMu.Unlock() + + t.Run("if-none-match with read quorum failure", func(t *testing.T) { + // Test Case 1: if-none-match (create only if doesn't exist) + // With if-none-match: *, this should only succeed if object doesn't exist. + // Since read quorum fails, we can't determine if object exists. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfNoneMatch: "*", + }, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if object exists (ETag is not empty) + return oi.ETag != "" + }, + } + + _, err := obj.NewMultipartUpload(ctx, bucket, object, opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err) + } + }) + + t.Run("if-match with wrong ETag and read quorum failure", func(t *testing.T) { + // Test Case 2: if-match with WRONG ETag + // This should fail even without quorum issues, but with quorum failure + // we can't verify the ETag at all. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfMatch: "wrong-etag-12345", + }, + HasIfMatch: true, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if ETags don't match + return oi.ETag != "wrong-etag-12345" + }, + } + + _, err := obj.NewMultipartUpload(ctx, bucket, object, opts) + if !isErrReadQuorum(err) { + t.Logf("Got error (as expected): %v", err) + t.Logf("But expected read quorum error, not object-not-found error") + } + }) + + t.Run("if-match with correct ETag and read quorum failure", func(t *testing.T) { + // Test Case 3: if-match with CORRECT ETag but read quorum failure + // Even with the correct ETag, we shouldn't proceed if we can't verify it. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfMatch: existingETag, + }, + HasIfMatch: true, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if ETags don't match + return oi.ETag != existingETag + }, + } + + _, err := obj.NewMultipartUpload(ctx, bucket, object, opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err) + } + }) +} + +// TestCompleteMultipartUploadConditionalWithReadQuorumFailure tests that conditional +// complete multipart upload operations behave correctly when read quorum cannot be reached. +func TestCompleteMultipartUploadConditionalWithReadQuorumFailure(t *testing.T) { + ctx := context.Background() + + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal(err) + } + defer obj.Shutdown(context.Background()) + defer removeRoots(fsDirs) + + z := obj.(*erasureServerPools) + xl := z.serverPools[0].sets[0] + + bucket := "test-bucket" + object := "test-object" + + err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{}) + if err != nil { + t.Fatal(err) + } + + // Put an initial object + _, err = obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")), + int64(len("initial-value")), "", ""), ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // Start a multipart upload WITHOUT conditional checks (this should work) + res, err := obj.NewMultipartUpload(ctx, bucket, object, ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // Upload a part + partData := bytes.Repeat([]byte("a"), 5*humanize.MiByte) + md5Hex := getMD5Hash(partData) + _, err = obj.PutObjectPart(ctx, bucket, object, res.UploadID, 1, + mustGetPutObjReader(t, bytes.NewReader(partData), int64(len(partData)), md5Hex, ""), + ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // Now simulate read quorum failure + erasureDisks := xl.getDisks() + z.serverPools[0].erasureDisksMu.Lock() + xl.getDisks = func() []StorageAPI { + for i := range erasureDisks[:8] { + erasureDisks[i] = nil + } + return erasureDisks + } + z.serverPools[0].erasureDisksMu.Unlock() + + t.Run("complete multipart with if-none-match and read quorum failure", func(t *testing.T) { + // Try to complete the multipart upload with if-none-match + // This should fail because we can't verify the condition due to read quorum failure + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfNoneMatch: "*", + }, + CheckPrecondFn: func(oi ObjectInfo) bool { + return oi.ETag != "" + }, + } + + parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} + _, err := obj.CompleteMultipartUpload(ctx, bucket, object, res.UploadID, parts, opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error, got: %v", err) + } + }) +} diff --git a/cmd/erasure-multipart.go b/cmd/erasure-multipart.go index 21c48c5929a3c..52d63698af28b 100644 --- a/cmd/erasure-multipart.go +++ b/cmd/erasure-multipart.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2025 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -39,8 +39,9 @@ import ( xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/mimedb" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/mimedb" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/minio/sio" ) func (er erasureObjects) getUploadIDDir(bucket, object, uploadID string) string { @@ -80,6 +81,14 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object return fi, nil, err } + if readQuorum < 0 { + return fi, nil, errErasureReadQuorum + } + + if writeQuorum < 0 { + return fi, nil, errErasureWriteQuorum + } + quorum := readQuorum if write { quorum = writeQuorum @@ -88,14 +97,13 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object // List all online disks. _, modTime, etag := listOnlineDisks(storageDisks, partsMetadata, errs, quorum) - var reducedErr error if write { - reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) + err = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) } else { - reducedErr = reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum) + err = reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum) } - if reducedErr != nil { - return fi, nil, reducedErr + if err != nil { + return fi, nil, err } // Pick one from the first valid metadata. @@ -103,34 +111,9 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object return fi, partsMetadata, err } -// Removes part.meta given by partName belonging to a multipart upload from minioMetaBucket -func (er erasureObjects) removePartMeta(bucket, object, uploadID, dataDir string, partNumber int) { - uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber)) - storageDisks := er.getDisks() - - g := errgroup.WithNErrs(len(storageDisks)) - for index, disk := range storageDisks { - if disk == nil { - continue - } - index := index - g.Go(func() error { - _ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath+".meta", DeleteOptions{ - Recursive: false, - Immediate: false, - }) - - return nil - }, index) - } - g.Wait() -} - -// Removes part given by partName belonging to a multipart upload from minioMetaBucket -func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir string, partNumber int) { - uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) - curpartPath := pathJoin(uploadIDPath, dataDir, fmt.Sprintf("part.%d", partNumber)) +// cleanupMultipartPath removes all extraneous files and parts from the multipart folder, this is used per CompleteMultipart. +// do not use this function outside of completeMultipartUpload() +func (er erasureObjects) cleanupMultipartPath(ctx context.Context, paths ...string) { storageDisks := er.getDisks() g := errgroup.WithNErrs(len(storageDisks)) @@ -140,18 +123,7 @@ func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir stri } index := index g.Go(func() error { - // Ignoring failure to remove parts that weren't present in CompleteMultipartUpload - // requests. xl.meta is the authoritative source of truth on which parts constitute - // the object. The presence of parts that don't belong in the object doesn't affect correctness. - _ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath, DeleteOptions{ - Recursive: false, - Immediate: false, - }) - _ = storageDisks[index].Delete(context.TODO(), minioMetaMultipartBucket, curpartPath+".meta", DeleteOptions{ - Recursive: false, - Immediate: false, - }) - + _ = storageDisks[index].DeleteBulk(ctx, minioMetaMultipartBucket, paths...) return nil }, index) } @@ -159,7 +131,7 @@ func (er erasureObjects) removeObjectPart(bucket, object, uploadID, dataDir stri } // Clean-up the old multipart uploads. Should be run in a Go routine. -func (er erasureObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) { +func (er erasureObjects) cleanupStaleUploads(ctx context.Context) { // run multiple cleanup's local to this server. var wg sync.WaitGroup for _, disk := range er.getLocalDisks() { @@ -167,7 +139,7 @@ func (er erasureObjects) cleanupStaleUploads(ctx context.Context, expiry time.Du wg.Add(1) go func(disk StorageAPI) { defer wg.Done() - er.cleanupStaleUploadsOnDisk(ctx, disk, expiry) + er.cleanupStaleUploadsOnDisk(ctx, disk) }(disk) } } @@ -193,46 +165,71 @@ func (er erasureObjects) deleteAll(ctx context.Context, bucket, prefix string) { } // Remove the old multipart uploads on the given disk. -func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk StorageAPI, expiry time.Duration) { - now := time.Now() - diskPath := disk.Endpoint().Path +func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk StorageAPI) { + drivePath := disk.Endpoint().Path - readDirFn(pathJoin(diskPath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error { - readDirFn(pathJoin(diskPath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error { + readDirFn(pathJoin(drivePath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error { + readDirFn(pathJoin(drivePath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error { uploadIDPath := pathJoin(shaDir, uploadIDDir) - fi, err := disk.ReadVersion(ctx, "", minioMetaMultipartBucket, uploadIDPath, "", ReadOptions{}) - if err != nil { + var modTime time.Time + // Upload IDs are of the form base64_url(x), we can extract the time from the UUID. + if b64, err := base64.RawURLEncoding.DecodeString(uploadIDDir); err == nil { + if split := strings.Split(string(b64), "x"); len(split) == 2 { + t, err := strconv.ParseInt(split[1], 10, 64) + if err == nil { + modTime = time.Unix(0, t) + } + } + } + // Fallback for older uploads without time in the ID. + if modTime.IsZero() { + wait := deleteMultipartCleanupSleeper.Timer(ctx) + fi, err := disk.ReadVersion(ctx, "", minioMetaMultipartBucket, uploadIDPath, "", ReadOptions{}) + if err != nil { + return nil + } + modTime = fi.ModTime + wait() + } + if time.Since(modTime) < globalAPIConfig.getStaleUploadsExpiry() { return nil } w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { - wait := deletedCleanupSleeper.Timer(ctx) - if now.Sub(fi.ModTime) > expiry { - removeAll(pathJoin(diskPath, minioMetaMultipartBucket, uploadIDPath)) - } + wait := deleteMultipartCleanupSleeper.Timer(ctx) + pathUUID := mustGetUUID() + targetPath := pathJoin(drivePath, minioMetaTmpDeletedBucket, pathUUID) + renameAll(pathJoin(drivePath, minioMetaMultipartBucket, uploadIDPath), targetPath, pathJoin(drivePath, minioMetaBucket)) wait() return nil }) }) + // Get the modtime of the shaDir. vi, err := disk.StatVol(ctx, pathJoin(minioMetaMultipartBucket, shaDir)) if err != nil { return nil } + // Modtime is returned in the Created field. See (*xlStorage).StatVol + if time.Since(vi.Created) < globalAPIConfig.getStaleUploadsExpiry() { + return nil + } w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { - wait := deletedCleanupSleeper.Timer(ctx) - if now.Sub(vi.Created) > expiry { - // We are not deleting shaDir recursively here, if shaDir is empty - // and its older then we can happily delete it. - Remove(pathJoin(diskPath, minioMetaMultipartBucket, shaDir)) - } + wait := deleteMultipartCleanupSleeper.Timer(ctx) + pathUUID := mustGetUUID() + targetPath := pathJoin(drivePath, minioMetaTmpDeletedBucket, pathUUID) + + // We are not deleting shaDir recursively here, if shaDir is empty + // and its older then we can happily delete it. + Rename(pathJoin(drivePath, minioMetaMultipartBucket, shaDir), targetPath) wait() return nil }) }) - readDirFn(pathJoin(diskPath, minioMetaTmpBucket), func(tmpDir string, typ os.FileMode) error { - if tmpDir == ".trash/" { // do not remove .trash/ here, it has its own routines + readDirFn(pathJoin(drivePath, minioMetaTmpBucket), func(tmpDir string, typ os.FileMode) error { + if strings.HasPrefix(tmpDir, ".trash") { + // do not remove .trash/ here, it has its own routines return nil } vi, err := disk.StatVol(ctx, pathJoin(minioMetaTmpBucket, tmpDir)) @@ -241,9 +238,12 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto } w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { - wait := deletedCleanupSleeper.Timer(ctx) - if now.Sub(vi.Created) > expiry { - removeAll(pathJoin(diskPath, minioMetaTmpBucket, tmpDir)) + wait := deleteMultipartCleanupSleeper.Timer(ctx) + if time.Since(vi.Created) > globalAPIConfig.getStaleUploadsExpiry() { + pathUUID := mustGetUUID() + targetPath := pathJoin(drivePath, minioMetaTmpDeletedBucket, pathUUID) + + renameAll(pathJoin(drivePath, minioMetaTmpBucket, tmpDir), targetPath, pathJoin(drivePath, minioMetaBucket)) } wait() return nil @@ -259,7 +259,7 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto // towards simplification of multipart APIs. // The resulting ListMultipartsInfo structure is unmarshalled directly as XML. func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, err error) { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "ListMultipartUploads", object, &er) result.MaxUploads = maxUploads result.KeyMarker = keyMarker @@ -270,10 +270,13 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec var disk StorageAPI disks := er.getOnlineLocalDisks() if len(disks) == 0 { - // using er.getOnlineLocalDisks() has one side-affect where - // on a pooled setup all disks are remote, add a fallback - disks = er.getOnlineDisks() + // If no local, get non-healing disks. + var ok bool + if disks, ok = er.getOnlineDisksWithHealing(false); !ok { + disks = er.getOnlineDisks() + } } + for _, disk = range disks { if disk == nil { continue @@ -319,7 +322,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec uploads = append(uploads, MultipartInfo{ Bucket: bucket, Object: object, - UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))), + UploadID: base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadID)), Initiated: startTime, }) populatedUploadIDs.Add(uploadID) @@ -372,20 +375,29 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec // operation(s) on the object. func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) { if opts.CheckPrecondFn != nil { - // Lock the object before reading. - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return nil, err + if !opts.NoLock { + ns := er.NewNSLock(bucket, object) + lkctx, err := ns.GetLock(ctx, globalOperationTimeout) + if err != nil { + return nil, err + } + ctx = lkctx.Context() + defer ns.Unlock(lkctx) + opts.NoLock = true } - rctx := lkctx.Context() - obj, err := er.getObjectInfo(rctx, bucket, object, opts) - lk.RUnlock(lkctx) - if err != nil && !isErrVersionNotFound(err) { + + obj, err := er.getObjectInfo(ctx, bucket, object, opts) + if err == nil && opts.CheckPrecondFn(obj) { + return nil, PreConditionFailed{} + } + if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { return nil, err } - if opts.CheckPrecondFn(obj) { - return nil, PreConditionFailed{} + + // if object doesn't exist return error for If-Match conditional requests + // If-None-Match should be allowed to proceed for non-existent objects + if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) { + return nil, err } } @@ -401,36 +413,33 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, parityDrives = er.defaultParityCount } - // If we have offline disks upgrade the number of erasure codes for this object. - parityOrig := parityDrives + if globalStorageClass.AvailabilityOptimized() { + // If we have offline disks upgrade the number of erasure codes for this object. + parityOrig := parityDrives - var offlineDrives int - for _, disk := range onlineDisks { - if disk == nil { - parityDrives++ - offlineDrives++ - continue - } - if !disk.IsOnline() { - parityDrives++ - offlineDrives++ - continue + var offlineDrives int + for _, disk := range onlineDisks { + if disk == nil || !disk.IsOnline() { + parityDrives++ + offlineDrives++ + continue + } } - } - if offlineDrives >= (len(onlineDisks)+1)/2 { - // if offline drives are more than 50% of the drives - // we have no quorum, we shouldn't proceed just - // fail at that point. - return nil, toObjectErr(errErasureWriteQuorum, bucket, object) - } + if offlineDrives >= (len(onlineDisks)+1)/2 { + // if offline drives are more than 50% of the drives + // we have no quorum, we shouldn't proceed just + // fail at that point. + return nil, toObjectErr(errErasureWriteQuorum, bucket, object) + } - if parityDrives >= len(onlineDisks)/2 { - parityDrives = len(onlineDisks) / 2 - } + if parityDrives >= len(onlineDisks)/2 { + parityDrives = len(onlineDisks) / 2 + } - if parityOrig != parityDrives { - userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives) + if parityOrig != parityDrives { + userDefined[minIOErasureUpgraded] = strconv.Itoa(parityOrig) + "->" + strconv.Itoa(parityDrives) + } } dataDrives := len(onlineDisks) - parityDrives @@ -452,6 +461,14 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, } fi.DataDir = mustGetUUID() + if ckSum := userDefined[ReplicationSsecChecksumHeader]; ckSum != "" { + v, err := base64.StdEncoding.DecodeString(ckSum) + if err == nil { + fi.Checksum = v + } + delete(userDefined, ReplicationSsecChecksumHeader) + } + // Initialize erasure metadata. for index := range partsMetadata { partsMetadata[index] = fi @@ -469,6 +486,7 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, if opts.WantChecksum != nil && opts.WantChecksum.Type.IsSet() { userDefined[hash.MinIOMultipartChecksum] = opts.WantChecksum.Type.String() + userDefined[hash.MinIOMultipartChecksumType] = opts.WantChecksum.Type.ObjType() } modTime := opts.MTime @@ -485,17 +503,19 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, partsMetadata[index].ModTime = modTime partsMetadata[index].Metadata = userDefined } - uploadUUID := mustGetUUID() - uploadID := base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadUUID))) + uploadUUID := fmt.Sprintf("%sx%d", mustGetUUID(), modTime.UnixNano()) + uploadID := base64.RawURLEncoding.EncodeToString(fmt.Appendf(nil, "%s.%s", globalDeploymentID(), uploadUUID)) uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID) // Write updated `xl.meta` to all disks. - if _, err := writeUniqueFileInfo(ctx, onlineDisks, bucket, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { + if _, err := writeAllMetadata(ctx, onlineDisks, bucket, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { return nil, toObjectErr(err, bucket, object) } + return &NewMultipartUploadResult{ UploadID: uploadID, ChecksumAlgo: userDefined[hash.MinIOMultipartChecksum], + ChecksumType: userDefined[hash.MinIOMultipartChecksumType], }, nil } @@ -506,77 +526,44 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string, // Implements S3 compatible initiate multipart API. func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "NewMultipartUpload", object, &er) } return er.newMultipartUpload(ctx, bucket, object, opts) } // renamePart - renames multipart part to its relevant location under uploadID. -func renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) { - g := errgroup.WithNErrs(len(disks)) - - // Rename file on all underlying storage disks. - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound - } - return disks[index].RenameFile(ctx, srcBucket, srcEntry, dstBucket, dstEntry) - }, index) +func (er erasureObjects) renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, optsMeta []byte, writeQuorum int, skipParent string) ([]StorageAPI, error) { + paths := []string{ + dstEntry, + dstEntry + ".meta", } - // Wait for all renames to finish. - errs := g.Wait() - - // Do not need to undo partial successful operation since those will be cleaned up - // in 24hrs via multipart cleaner, never rename() back to `.minio.sys/tmp` as there - // is no way to clean them. - - // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum - // otherwise return failure. Cleanup successful renames. - return evalDisks(disks, errs), reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) -} + // cleanup existing paths first across all drives. + er.cleanupMultipartPath(ctx, paths...) -// writeAllDisks - writes 'b' to all provided disks. -// If write cannot reach quorum, the files will be deleted from all disks. -func writeAllDisks(ctx context.Context, disks []StorageAPI, dstBucket, dstEntry string, b []byte, writeQuorum int) ([]StorageAPI, error) { g := errgroup.WithNErrs(len(disks)) - // Write file to all underlying storage disks. + // Rename file on all underlying storage disks. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound } - return disks[index].WriteAll(ctx, dstBucket, dstEntry, b) + return disks[index].RenamePart(ctx, srcBucket, srcEntry, dstBucket, dstEntry, optsMeta, skipParent) }, index) } // Wait for all renames to finish. errs := g.Wait() - // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum - // otherwise return failure. Cleanup successful renames. err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) - if errors.Is(err, errErasureWriteQuorum) { - // Remove all written - g := errgroup.WithNErrs(len(disks)) - for index := range disks { - if disks[index] == nil || errs[index] != nil { - continue - } - index := index - g.Go(func() error { - return disks[index].Delete(ctx, dstBucket, dstEntry, DeleteOptions{Immediate: true}) - }, index) - } - // Ignore these errors. - g.WaitErr() + if err != nil { + er.cleanupMultipartPath(ctx, paths...) } + // We can safely allow RenameFile errors up to len(er.getDisks()) - writeQuorum + // otherwise return failure. Cleanup successful renames. return evalDisks(disks, errs), err } @@ -587,29 +574,19 @@ func writeAllDisks(ctx context.Context, disks []StorageAPI, dstBucket, dstEntry // Implements S3 compatible Upload Part API. func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "PutObjectPart", object, &er) } data := r.Reader // Validate input data size and it can never be less than zero. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return pi, toObjectErr(errInvalidArgument) } - // Read lock for upload id. - // Only held while reading the upload metadata. - uploadIDRLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) - rlkctx, err := uploadIDRLock.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return PartInfo{}, err - } - rctx := rlkctx.Context() - defer uploadIDRLock.RUnlock(rlkctx) - uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) // Validates if upload ID exists. - fi, _, err := er.checkUploadIDExists(rctx, bucket, object, uploadID, true) + fi, _, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true) if err != nil { if errors.Is(err, errVolumeNotFound) { return pi, toObjectErr(err, bucket) @@ -617,18 +594,6 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo return pi, toObjectErr(err, bucket, object, uploadID) } - // Write lock for this part ID, only hold it if we are planning to read from the - // streamto avoid any concurrent updates. - // - // Must be held throughout this call. - partIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID, strconv.Itoa(partID))) - plkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout) - if err != nil { - return PartInfo{}, err - } - pctx := plkctx.Context() - defer partIDLock.Unlock(plkctx) - onlineDisks := er.getDisks() writeQuorum := fi.WriteQuorum(er.defaultWQuorum()) @@ -658,7 +623,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo } }() - erasure, err := NewErasure(pctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) + erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize) if err != nil { return pi, toObjectErr(err, bucket, object) } @@ -668,17 +633,13 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo switch size := data.Size(); { case size == 0: buffer = make([]byte, 1) // Allocate at least a byte to reach EOF - case size == -1: - if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize { - // Account for padding and forced compression overhead and encryption. - buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512) + case size >= fi.Erasure.BlockSize || size == -1: + if int64(globalBytePoolCap.Load().Width()) < fi.Erasure.BlockSize { + buffer = make([]byte, fi.Erasure.BlockSize, 2*fi.Erasure.BlockSize) } else { - buffer = globalBytePoolCap.Get() - defer globalBytePoolCap.Put(buffer) + buffer = globalBytePoolCap.Load().Get() + defer globalBytePoolCap.Load().Put(buffer) } - case size >= fi.Erasure.BlockSize: - buffer = globalBytePoolCap.Get() - defer globalBytePoolCap.Put(buffer) case size < fi.Erasure.BlockSize: // No need to allocate fully fi.Erasure.BlockSize buffer if the incoming data is smaller. buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) @@ -699,10 +660,11 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo if data.Size() > bigFileThreshold { // Add input readahead. // We use 2 buffers, so we always have a full buffer of input. - bufA := globalBytePoolCap.Get() - bufB := globalBytePoolCap.Get() - defer globalBytePoolCap.Put(bufA) - defer globalBytePoolCap.Put(bufB) + pool := globalBytePoolCap.Load() + bufA := pool.Get() + bufB := pool.Get() + defer pool.Put(bufA) + defer pool.Put(bufB) ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]}) if err == nil { toEncode = ra @@ -710,11 +672,14 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo } } - n, err := erasure.Encode(pctx, toEncode, writers, buffer, writeQuorum) - closeBitrotWriters(writers) + n, err := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) + closeErrs := closeBitrotWriters(writers) if err != nil { return pi, toObjectErr(err, bucket, object) } + if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil { + return pi, toObjectErr(closeErr, bucket, object) + } // Should return IncompleteBody{} error when reader has fewer bytes // than specified in request header. @@ -730,10 +695,6 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo // Rename temporary part file to its final location. partPath := pathJoin(uploadIDPath, fi.DataDir, partSuffix) - onlineDisks, err = renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, writeQuorum) - if err != nil { - return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) - } md5hex := r.MD5CurrentHexString() if opts.PreserveETag != "" { @@ -745,39 +706,88 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo index = opts.IndexCB() } + actualSize := data.ActualSize() + if actualSize < 0 { + _, encrypted := crypto.IsEncrypted(fi.Metadata) + compressed := fi.IsCompressed() + switch { + case compressed: + // ... nothing changes for compressed stream. + // if actualSize is -1 we have no known way to + // determine what is the actualSize. + case encrypted: + decSize, err := sio.DecryptedSize(uint64(n)) + if err == nil { + actualSize = int64(decSize) + } + default: + actualSize = n + } + } + partInfo := ObjectPartInfo{ Number: partID, ETag: md5hex, Size: n, - ActualSize: data.ActualSize(), + ActualSize: actualSize, ModTime: UTCNow(), Index: index, Checksums: r.ContentCRC(), } - fi.Parts = []ObjectPartInfo{partInfo} - partFI, err := fi.MarshalMsg(nil) + partFI, err := partInfo.MarshalMsg(nil) if err != nil { return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } - // Write part metadata to all disks. - onlineDisks, err = writeAllDisks(ctx, onlineDisks, minioMetaMultipartBucket, partPath+".meta", partFI, writeQuorum) + // Serialize concurrent part uploads. + partIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID, strconv.Itoa(partID))) + plkctx, err := partIDLock.GetLock(ctx, globalOperationTimeout) + if err != nil { + return PartInfo{}, err + } + + ctx = plkctx.Context() + defer partIDLock.Unlock(plkctx) + + // Read lock for upload id, only held while reading the upload metadata. + uploadIDRLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) + rlkctx, err := uploadIDRLock.GetRLock(ctx, globalOperationTimeout) if err != nil { + return PartInfo{}, err + } + ctx = rlkctx.Context() + defer uploadIDRLock.RUnlock(rlkctx) + + onlineDisks, err = er.renamePart(ctx, onlineDisks, minioMetaTmpBucket, tmpPartPath, minioMetaMultipartBucket, partPath, partFI, writeQuorum, uploadIDPath) + if err != nil { + if errors.Is(err, errUploadIDNotFound) { + return pi, toObjectErr(errUploadIDNotFound, bucket, object, uploadID) + } + if errors.Is(err, errFileNotFound) { + // An in-quorum errFileNotFound means that client stream + // prematurely closed and we do not find any xl.meta or + // part.1's - in such a scenario we must return as if client + // disconnected. This means that erasure.Encode() CreateFile() + // did not do anything. + return pi, IncompleteBody{Bucket: bucket, Object: object} + } + return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) } // Return success. return PartInfo{ - PartNumber: partInfo.Number, - ETag: partInfo.ETag, - LastModified: partInfo.ModTime, - Size: partInfo.Size, - ActualSize: partInfo.ActualSize, - ChecksumCRC32: partInfo.Checksums["CRC32"], - ChecksumCRC32C: partInfo.Checksums["CRC32C"], - ChecksumSHA1: partInfo.Checksums["SHA1"], - ChecksumSHA256: partInfo.Checksums["SHA256"], + PartNumber: partInfo.Number, + ETag: partInfo.ETag, + LastModified: partInfo.ModTime, + Size: partInfo.Size, + ActualSize: partInfo.ActualSize, + ChecksumCRC32: partInfo.Checksums["CRC32"], + ChecksumCRC32C: partInfo.Checksums["CRC32C"], + ChecksumSHA1: partInfo.Checksums["SHA1"], + ChecksumSHA256: partInfo.Checksums["SHA256"], + ChecksumCRC64NVME: partInfo.Checksums["CRC64NVME"], }, nil } @@ -788,7 +798,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo // Does not contain currently uploaded parts by design. func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "GetMultipartInfo", object, &er) } result := MultipartInfo{ @@ -797,14 +807,6 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u UploadID: uploadID, } - uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) - lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return MultipartInfo{}, err - } - ctx = lkctx.Context() - defer uploadIDLock.RUnlock(lkctx) - fi, _, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, false) if err != nil { if errors.Is(err, errVolumeNotFound) { @@ -817,6 +819,60 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u return result, nil } +func (er erasureObjects) listParts(ctx context.Context, onlineDisks []StorageAPI, partPath string, readQuorum int) ([]int, error) { + g := errgroup.WithNErrs(len(onlineDisks)) + + objectParts := make([][]string, len(onlineDisks)) + // List uploaded parts from drives. + for index := range onlineDisks { + g.Go(func() (err error) { + if onlineDisks[index] == nil { + return errDiskNotFound + } + objectParts[index], err = onlineDisks[index].ListDir(ctx, minioMetaMultipartBucket, minioMetaMultipartBucket, partPath, -1) + return err + }, index) + } + + if err := reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum); err != nil { + return nil, err + } + + partQuorumMap := make(map[int]int) + for _, driveParts := range objectParts { + partsWithMetaCount := make(map[int]int, len(driveParts)) + // part files can be either part.N or part.N.meta + for _, partPath := range driveParts { + var partNum int + if _, err := fmt.Sscanf(partPath, "part.%d", &partNum); err == nil { + partsWithMetaCount[partNum]++ + continue + } + if _, err := fmt.Sscanf(partPath, "part.%d.meta", &partNum); err == nil { + partsWithMetaCount[partNum]++ + } + } + // Include only part.N.meta files with corresponding part.N + for partNum, cnt := range partsWithMetaCount { + if cnt < 2 { + continue + } + partQuorumMap[partNum]++ + } + } + + var partNums []int + for partNum, count := range partQuorumMap { + if count < readQuorum { + continue + } + partNums = append(partNums, partNum) + } + + sort.Ints(partNums) + return partNums, nil +} + // ListObjectParts - lists all previously uploaded parts for a given // object and uploadID. Takes additional input of part-number-marker // to indicate where the listing should begin from. @@ -826,16 +882,8 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u // replied back to the client. func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) - } - - uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) - lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return ListPartsInfo{}, err + auditObjectErasureSet(ctx, "ListObjectParts", object, &er) } - ctx = lkctx.Context() - defer uploadIDLock.RUnlock(lkctx) fi, _, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, false) if err != nil { @@ -843,6 +891,14 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up } uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) + if partNumberMarker < 0 { + partNumberMarker = 0 + } + + // Limit output to maxPartsList. + if maxParts > maxPartsList { + maxParts = maxPartsList + } // Populate the result stub. result.Bucket = bucket @@ -852,128 +908,183 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up result.PartNumberMarker = partNumberMarker result.UserDefined = cloneMSS(fi.Metadata) result.ChecksumAlgorithm = fi.Metadata[hash.MinIOMultipartChecksum] + result.ChecksumType = fi.Metadata[hash.MinIOMultipartChecksumType] - if partNumberMarker < 0 { - partNumberMarker = 0 + if maxParts == 0 { + return result, nil } - // Limit output to maxPartsList. - if maxParts > maxPartsList-partNumberMarker { - maxParts = maxPartsList - partNumberMarker + onlineDisks := er.getDisks() + readQuorum := fi.ReadQuorum(er.defaultRQuorum()) + // Read Part info for all parts + partPath := pathJoin(uploadIDPath, fi.DataDir) + SlashSeparator + + // List parts in quorum + partNums, err := er.listParts(ctx, onlineDisks, partPath, readQuorum) + if err != nil { + // This means that fi.DataDir, is not yet populated so we + // return an empty response. + if errors.Is(err, errFileNotFound) { + return result, nil + } + return result, toObjectErr(err, bucket, object, uploadID) } - if maxParts == 0 { + if len(partNums) == 0 { return result, nil } - // Read Part info for all parts - partPath := pathJoin(uploadIDPath, fi.DataDir) + "/" - req := ReadMultipleReq{ - Bucket: minioMetaMultipartBucket, - Prefix: partPath, - MaxSize: 1 << 20, // Each part should realistically not be > 1MiB. - MaxResults: maxParts + 1, - MetadataOnly: true, + start := objectPartIndexNums(partNums, partNumberMarker) + if partNumberMarker > 0 && start == -1 { + // Marker not present among what is present on the + // server, we return an empty list. + return result, nil } - start := partNumberMarker + 1 - end := start + maxParts + if partNumberMarker > 0 && start != -1 { + if start+1 >= len(partNums) { + // Marker indicates that we are the end + // of the list, so we simply return empty + return result, nil + } - // Parts are 1 based, so index 0 is part one, etc. - for i := start; i <= end; i++ { - req.Files = append(req.Files, fmt.Sprintf("part.%d.meta", i)) + partNums = partNums[start+1:] } - var disk StorageAPI - disks := er.getOnlineLocalDisks() - if len(disks) == 0 { - // using er.getOnlineLocalDisks() has one side-affect where - // on a pooled setup all disks are remote, add a fallback - disks = er.getOnlineDisks() + result.Parts = make([]PartInfo, 0, len(partNums)) + partMetaPaths := make([]string, len(partNums)) + for i, part := range partNums { + partMetaPaths[i] = pathJoin(partPath, fmt.Sprintf("part.%d.meta", part)) } - for _, disk = range disks { - if disk == nil { - continue - } + // Read parts in quorum + objParts, err := readParts(ctx, onlineDisks, minioMetaMultipartBucket, partMetaPaths, + partNums, readQuorum) + if err != nil { + return result, toObjectErr(err, bucket, object, uploadID) + } - if !disk.IsOnline() { - continue + count := maxParts + for _, objPart := range objParts { + result.Parts = append(result.Parts, PartInfo{ + PartNumber: objPart.Number, + LastModified: objPart.ModTime, + ETag: objPart.ETag, + Size: objPart.Size, + ActualSize: objPart.ActualSize, + ChecksumCRC32: objPart.Checksums["CRC32"], + ChecksumCRC32C: objPart.Checksums["CRC32C"], + ChecksumSHA1: objPart.Checksums["SHA1"], + ChecksumSHA256: objPart.Checksums["SHA256"], + ChecksumCRC64NVME: objPart.Checksums["CRC64NVME"], + }) + count-- + if count == 0 { + break } + } - break + if len(objParts) > len(result.Parts) { + result.IsTruncated = true + // Make sure to fill next part number marker if IsTruncated is true for subsequent listing. + result.NextPartNumberMarker = result.Parts[len(result.Parts)-1].PartNumber } - g := errgroup.WithNErrs(len(req.Files)).WithConcurrency(32) + return result, nil +} - partsInfo := make([]ObjectPartInfo, len(req.Files)) - for i, file := range req.Files { - file := file - partN := i + start - i := i +func readParts(ctx context.Context, disks []StorageAPI, bucket string, partMetaPaths []string, partNumbers []int, readQuorum int) ([]ObjectPartInfo, error) { + g := errgroup.WithNErrs(len(disks)) - g.Go(func() error { - buf, err := disk.ReadAll(ctx, minioMetaMultipartBucket, pathJoin(partPath, file)) - if err != nil { - return err + objectPartInfos := make([][]*ObjectPartInfo, len(disks)) + // Rename file on all underlying storage disks. + for index := range disks { + g.Go(func() (err error) { + if disks[index] == nil { + return errDiskNotFound } + objectPartInfos[index], err = disks[index].ReadParts(ctx, bucket, partMetaPaths...) + return err + }, index) + } - var pfi FileInfo - _, err = pfi.UnmarshalMsg(buf) - if err != nil { - return err - } + if err := reduceReadQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, readQuorum); err != nil { + return nil, err + } - if len(pfi.Parts) != 1 { - return errors.New("invalid number of parts expected 1, got 0") + partInfosInQuorum := make([]ObjectPartInfo, len(partMetaPaths)) + for pidx := range partMetaPaths { + // partMetaQuorumMap uses + // - path/to/part.N as key to collate errors from failed drives. + // - part ETag to collate part metadata + partMetaQuorumMap := make(map[string]int, len(partNumbers)) + var pinfos []*ObjectPartInfo + for idx := range disks { + if len(objectPartInfos[idx]) != len(partMetaPaths) { + partMetaQuorumMap[partMetaPaths[pidx]]++ + continue } - if partN != pfi.Parts[0].Number { - return fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partN, partN, pfi.Parts[0].Number) + pinfo := objectPartInfos[idx][pidx] + if pinfo != nil && pinfo.ETag != "" { + pinfos = append(pinfos, pinfo) + partMetaQuorumMap[pinfo.ETag]++ + continue } + partMetaQuorumMap[partMetaPaths[pidx]]++ + } - partsInfo[i] = pfi.Parts[0] - return nil - }, i) - } - - g.Wait() - - for _, part := range partsInfo { - if part.Number != 0 && !part.ModTime.IsZero() { - fi.AddObjectPart(part.Number, part.ETag, part.Size, part.ActualSize, part.ModTime, part.Index, part.Checksums) + var maxQuorum int + var maxETag string + var maxPartMeta string + for etag, quorum := range partMetaQuorumMap { + if maxQuorum < quorum { + maxQuorum = quorum + maxETag = etag + maxPartMeta = etag + } + } + // found is a representative ObjectPartInfo which either has the maximally occurring ETag or an error. + var found *ObjectPartInfo + for _, pinfo := range pinfos { + if pinfo == nil { + continue + } + if maxETag != "" && pinfo.ETag == maxETag { + found = pinfo + break + } + if pinfo.ETag == "" && maxPartMeta != "" && path.Base(maxPartMeta) == fmt.Sprintf("part.%d.meta", pinfo.Number) { + found = pinfo + break + } } - } - // Only parts with higher part numbers will be listed. - parts := fi.Parts - result.Parts = make([]PartInfo, 0, len(parts)) - for _, part := range parts { - result.Parts = append(result.Parts, PartInfo{ - PartNumber: part.Number, - ETag: part.ETag, - LastModified: part.ModTime, - ActualSize: part.ActualSize, - Size: part.Size, - ChecksumCRC32: part.Checksums["CRC32"], - ChecksumCRC32C: part.Checksums["CRC32C"], - ChecksumSHA1: part.Checksums["SHA1"], - ChecksumSHA256: part.Checksums["SHA256"], - }) - if len(result.Parts) >= maxParts { - break + if found != nil && found.ETag != "" && partMetaQuorumMap[maxETag] >= readQuorum { + partInfosInQuorum[pidx] = *found + continue + } + partInfosInQuorum[pidx] = ObjectPartInfo{ + Number: partNumbers[pidx], + Error: InvalidPart{ + PartNumber: partNumbers[pidx], + }.Error(), } } + return partInfosInQuorum, nil +} - // If listed entries are more than maxParts, we set IsTruncated as true. - if len(parts) > len(result.Parts) { - result.IsTruncated = true - // Make sure to fill next part number marker if IsTruncated is - // true for subsequent listing. - nextPartNumberMarker := result.Parts[len(result.Parts)-1].PartNumber - result.NextPartNumberMarker = nextPartNumberMarker +func objPartToPartErr(part ObjectPartInfo) error { + if strings.Contains(part.Error, "file not found") { + return InvalidPart{PartNumber: part.Number} } - return result, nil + if strings.Contains(part.Error, "Specified part could not be found") { + return InvalidPart{PartNumber: part.Number} + } + if strings.Contains(part.Error, errErasureReadQuorum.Error()) { + return errErasureReadQuorum + } + return errors.New(part.Error) } // CompleteMultipartUpload - completes an ongoing multipart @@ -984,18 +1095,35 @@ func (er erasureObjects) ListObjectParts(ctx context.Context, bucket, object, up // Implements S3 compatible Complete multipart API. func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "CompleteMultipartUpload", object, &er) } - // Hold write locks to verify uploaded parts, also disallows any - // parallel PutObjectPart() requests. - uploadIDLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) - wlkctx, err := uploadIDLock.GetLock(ctx, globalOperationTimeout) - if err != nil { - return oi, err + if opts.CheckPrecondFn != nil { + if !opts.NoLock { + ns := er.NewNSLock(bucket, object) + lkctx, err := ns.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer ns.Unlock(lkctx) + opts.NoLock = true + } + + obj, err := er.getObjectInfo(ctx, bucket, object, opts) + if err == nil && opts.CheckPrecondFn(obj) { + return ObjectInfo{}, PreConditionFailed{} + } + if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { + return ObjectInfo{}, err + } + + // if object doesn't exist return error for If-Match conditional requests + // If-None-Match should be allowed to proceed for non-existent objects + if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) { + return ObjectInfo{}, err + } } - ctx = wlkctx.Context() - defer uploadIDLock.Unlock(wlkctx) fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true) if err != nil { @@ -1008,42 +1136,41 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str uploadIDPath := er.getUploadIDDir(bucket, object, uploadID) onlineDisks := er.getDisks() writeQuorum := fi.WriteQuorum(er.defaultWQuorum()) + readQuorum := fi.ReadQuorum(er.defaultRQuorum()) // Read Part info for all parts - partPath := pathJoin(uploadIDPath, fi.DataDir) + "/" - req := ReadMultipleReq{ - Bucket: minioMetaMultipartBucket, - Prefix: partPath, - MaxSize: 1 << 20, // Each part should realistically not be > 1MiB. - Files: make([]string, 0, len(parts)), - AbortOn404: true, - MetadataOnly: true, - } - for _, part := range parts { - req.Files = append(req.Files, fmt.Sprintf("part.%d.meta", part.PartNumber)) - } - partInfoFiles, err := readMultipleFiles(ctx, onlineDisks, req, writeQuorum) + partPath := pathJoin(uploadIDPath, fi.DataDir) + SlashSeparator + partMetaPaths := make([]string, len(parts)) + partNumbers := make([]int, len(parts)) + for idx, part := range parts { + partMetaPaths[idx] = pathJoin(partPath, fmt.Sprintf("part.%d.meta", part.PartNumber)) + partNumbers[idx] = part.PartNumber + } + + partInfoFiles, err := readParts(ctx, onlineDisks, minioMetaMultipartBucket, partMetaPaths, partNumbers, readQuorum) if err != nil { return oi, err } + if len(partInfoFiles) != len(parts) { // Should only happen through internal error err := fmt.Errorf("unexpected part result count: %d, want %d", len(partInfoFiles), len(parts)) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return oi, toObjectErr(err, bucket, object) } // Checksum type set when upload started. var checksumType hash.ChecksumType if cs := fi.Metadata[hash.MinIOMultipartChecksum]; cs != "" { - checksumType = hash.NewChecksumType(cs) + checksumType = hash.NewChecksumType(cs, fi.Metadata[hash.MinIOMultipartChecksumType]) if opts.WantChecksum != nil && !opts.WantChecksum.Type.Is(checksumType) { return oi, InvalidArgument{ Bucket: bucket, Object: fi.Name, - Err: fmt.Errorf("checksum type mismatch"), + Err: fmt.Errorf("checksum type mismatch. got %q (%s) expected %q (%s)", checksumType.String(), checksumType.ObjType(), opts.WantChecksum.Type.String(), opts.WantChecksum.Type.ObjType()), } } + checksumType |= hash.ChecksumMultipart | hash.ChecksumIncludesMultipart } var checksumCombined []byte @@ -1051,7 +1178,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str // However, in case of encryption, the persisted part ETags don't match // what we have sent to the client during PutObjectPart. The reason is // that ETags are encrypted. Hence, the client will send a list of complete - // part ETags of which non can match the ETag of any part. For example + // part ETags of which may not match the ETag of any part. For example // ETag (client): 30902184f4e62dd8f98f0aaff810c626 // ETag (server-internal): 20000f00ce5dc16e3f3b124f586ae1d88e9caa1c598415c2759bbb50e84a59f630902184f4e62dd8f98f0aaff810c626 // @@ -1087,35 +1214,22 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str opts.EncryptFn = metadataEncrypter(key) } - for i, part := range partInfoFiles { - partID := parts[i].PartNumber - if part.Error != "" || !part.Exists { - return oi, InvalidPart{ - PartNumber: partID, - } - } - - var pfi FileInfo - _, err := pfi.UnmarshalMsg(part.Data) - if err != nil { - // Maybe crash or similar. - logger.LogIf(ctx, err) - return oi, InvalidPart{ - PartNumber: partID, - } + for idx, part := range partInfoFiles { + if part.Error != "" { + err = objPartToPartErr(part) + bugLogIf(ctx, err) + return oi, err } - partI := pfi.Parts[0] - partNumber := partI.Number - if partID != partNumber { - logger.LogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", partID, partID, partI.Number)) + if parts[idx].PartNumber != part.Number { + internalLogIf(ctx, fmt.Errorf("part.%d.meta has incorrect corresponding part number: expected %d, got %d", parts[idx].PartNumber, parts[idx].PartNumber, part.Number)) return oi, InvalidPart{ - PartNumber: partID, + PartNumber: part.Number, } } // Add the current part. - fi.AddObjectPart(partI.Number, partI.ETag, partI.Size, partI.ActualSize, partI.ModTime, partI.Index, partI.Checksums) + fi.AddObjectPart(part.Number, part.ETag, part.Size, part.ActualSize, part.ModTime, part.Index, part.Checksums) } // Calculate full object size. @@ -1134,6 +1248,9 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str // Allocate parts similar to incoming slice. fi.Parts = make([]ObjectPartInfo, len(parts)) + var checksum hash.Checksum + checksum.Type = checksumType + // Validate each part and then commit to disk. for i, part := range parts { partIdx := objectPartIndex(currentFI.Parts, part.PartNumber) @@ -1167,10 +1284,11 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } } wantCS := map[string]string{ - hash.ChecksumCRC32.String(): part.ChecksumCRC32, - hash.ChecksumCRC32C.String(): part.ChecksumCRC32C, - hash.ChecksumSHA1.String(): part.ChecksumSHA1, - hash.ChecksumSHA256.String(): part.ChecksumSHA256, + hash.ChecksumCRC32.String(): part.ChecksumCRC32, + hash.ChecksumCRC32C.String(): part.ChecksumCRC32C, + hash.ChecksumSHA1.String(): part.ChecksumSHA1, + hash.ChecksumSHA256.String(): part.ChecksumSHA256, + hash.ChecksumCRC64NVME.String(): part.ChecksumCRC64NVME, } if wantCS[checksumType.String()] != crc { return oi, InvalidPart{ @@ -1185,6 +1303,15 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str PartNumber: part.PartNumber, } } + if checksumType.FullObjectRequested() { + if err := checksum.AddPart(*cs, expPart.ActualSize); err != nil { + return oi, InvalidPart{ + PartNumber: part.PartNumber, + ExpETag: "", + GotETag: err.Error(), + } + } + } checksumCombined = append(checksumCombined, cs.Raw...) } @@ -1215,30 +1342,44 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } if opts.WantChecksum != nil { - err := opts.WantChecksum.Matches(checksumCombined) - if err != nil { - return oi, err + if checksumType.FullObjectRequested() { + if opts.WantChecksum.Encoded != checksum.Encoded { + err := hash.ChecksumMismatch{ + Want: opts.WantChecksum.Encoded, + Got: checksum.Encoded, + } + return oi, err + } + } else { + err := opts.WantChecksum.Matches(checksumCombined, len(parts)) + if err != nil { + return oi, err + } } } - // Hold namespace to complete the transaction - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return oi, err + // Accept encrypted checksum from incoming request. + if opts.UserDefined[ReplicationSsecChecksumHeader] != "" { + if v, err := base64.StdEncoding.DecodeString(opts.UserDefined[ReplicationSsecChecksumHeader]); err == nil { + fi.Checksum = v + } + delete(opts.UserDefined, ReplicationSsecChecksumHeader) } - ctx = lkctx.Context() - defer lk.Unlock(lkctx) if checksumType.IsSet() { checksumType |= hash.ChecksumMultipart | hash.ChecksumIncludesMultipart - cs := hash.NewChecksumFromData(checksumType, checksumCombined) - fi.Checksum = cs.AppendTo(nil, checksumCombined) + checksum.Type = checksumType + if !checksumType.FullObjectRequested() { + checksum = *hash.NewChecksumFromData(checksumType, checksumCombined) + } + fi.Checksum = checksum.AppendTo(nil, checksumCombined) if opts.EncryptFn != nil { fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum) } } - delete(fi.Metadata, hash.MinIOMultipartChecksum) // Not needed in final object. + // Remove superfluous internal headers. + delete(fi.Metadata, hash.MinIOMultipartChecksum) + delete(fi.Metadata, hash.MinIOMultipartChecksumType) // Save the final object size and modtime. fi.Size = objectSize @@ -1258,7 +1399,13 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } // Save the consolidated actual size. - fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) + if opts.ReplicationRequest { + if v := opts.UserDefined[ReservedMetadataPrefix+"Actual-Object-Size"]; v != "" { + fi.Metadata[ReservedMetadataPrefix+"actual-size"] = v + } + } else { + fi.Metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) + } if opts.DataMovement { fi.SetDataMov() @@ -1277,10 +1424,10 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } } + paths := make([]string, 0, len(currentFI.Parts)) // Remove parts that weren't present in CompleteMultipartUpload request. for _, curpart := range currentFI.Parts { - // Remove part.meta which is not needed anymore. - er.removePartMeta(bucket, object, uploadID, currentFI.DataDir, curpart.Number) + paths = append(paths, pathJoin(uploadIDPath, currentFI.DataDir, fmt.Sprintf("part.%d.meta", curpart.Number))) if objectPartIndex(fi.Parts, curpart.Number) == -1 { // Delete the missing part files. e.g, @@ -1289,10 +1436,22 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str // Request 3: PutObjectPart 2 // Request 4: CompleteMultipartUpload --part 2 // N.B. 1st part is not present. This part should be removed from the storage. - er.removeObjectPart(bucket, object, uploadID, currentFI.DataDir, curpart.Number) + paths = append(paths, pathJoin(uploadIDPath, currentFI.DataDir, fmt.Sprintf("part.%d", curpart.Number))) } } + if !opts.NoLock { + lk := er.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + + er.cleanupMultipartPath(ctx, paths...) // cleanup all part.N.meta, and skipped part.N's before final rename(). + defer func() { if err == nil { er.deleteAll(context.Background(), minioMetaMultipartBucket, uploadIDPath) @@ -1300,24 +1459,28 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str }() // Rename the multipart object to final location. - onlineDisks, versionsDisparity, err := renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, + onlineDisks, versions, oldDataDir, err := renameData(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, bucket, object, writeQuorum) if err != nil { - return oi, toObjectErr(err, bucket, object) + return oi, toObjectErr(err, bucket, object, uploadID) + } + + if err = er.commitRenameDataDir(ctx, bucket, object, oldDataDir, onlineDisks, writeQuorum); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object, uploadID) } - if !opts.Speedtest && versionsDisparity { - globalMRFState.addPartialOp(partialOperation{ - bucket: bucket, - object: object, - queued: time.Now(), - allVersions: true, - setIndex: er.setIndex, - poolIndex: er.poolIndex, + if !opts.Speedtest && len(versions) > 0 { + globalMRFState.addPartialOp(PartialOperation{ + Bucket: bucket, + Object: object, + Queued: time.Now(), + Versions: versions, + SetIndex: er.setIndex, + PoolIndex: er.poolIndex, }) } - if !opts.Speedtest && !versionsDisparity { + if !opts.Speedtest && len(versions) == 0 { // Check if there is any offline disk and add it to the MRF list for _, disk := range onlineDisks { if disk != nil && disk.IsOnline() { @@ -1328,7 +1491,7 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str } } - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk @@ -1353,28 +1516,13 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str // operation. func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) - } - - lk := er.NewNSLock(bucket, pathJoin(object, uploadID)) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx) - - // Validates if upload ID exists. - if _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false); err != nil { - if errors.Is(err, errVolumeNotFound) { - return toObjectErr(err, bucket) - } - return toObjectErr(err, bucket, object, uploadID) + auditObjectErasureSet(ctx, "AbortMultipartUpload", object, &er) } // Cleanup all uploaded parts. - er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID)) + defer er.deleteAll(ctx, minioMetaMultipartBucket, er.getUploadIDDir(bucket, object, uploadID)) - // Successfully purged. - return nil + // Validates if upload ID exists. + _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false) + return toObjectErr(err, bucket, object, uploadID) } diff --git a/cmd/erasure-object-conditional_test.go b/cmd/erasure-object-conditional_test.go new file mode 100644 index 0000000000000..8268d40c3abfb --- /dev/null +++ b/cmd/erasure-object-conditional_test.go @@ -0,0 +1,150 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "bytes" + "context" + "testing" + + xhttp "github.com/minio/minio/internal/http" +) + +// TestPutObjectConditionalWithReadQuorumFailure tests that conditional +// PutObject operations (with if-match/if-none-match) behave correctly when read quorum +// cannot be reached. +// +// Related to: https://github.com/minio/minio/issues/21603 +// +// Should return an error when read quorum cannot +// be reached, as we cannot reliably determine if the precondition is met. +func TestPutObjectConditionalWithReadQuorumFailure(t *testing.T) { + ctx := context.Background() + + obj, fsDirs, err := prepareErasure16(ctx) + if err != nil { + t.Fatal(err) + } + defer obj.Shutdown(context.Background()) + defer removeRoots(fsDirs) + + z := obj.(*erasureServerPools) + xl := z.serverPools[0].sets[0] + + bucket := "test-bucket" + object := "test-object" + + err = obj.MakeBucket(ctx, bucket, MakeBucketOptions{}) + if err != nil { + t.Fatal(err) + } + + // Put an initial object so it exists + _, err = obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("initial-value")), + int64(len("initial-value")), "", ""), ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + + // Get object info to capture the ETag + objInfo, err := obj.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) + if err != nil { + t.Fatal(err) + } + existingETag := objInfo.ETag + + // Simulate read quorum failure by taking enough disks offline + // With 16 disks (EC 8+8), read quorum is 9. Taking 8 disks offline leaves only 8, + // which is below read quorum. + erasureDisks := xl.getDisks() + z.serverPools[0].erasureDisksMu.Lock() + xl.getDisks = func() []StorageAPI { + for i := range erasureDisks[:8] { + erasureDisks[i] = nil + } + return erasureDisks + } + z.serverPools[0].erasureDisksMu.Unlock() + + t.Run("if-none-match with read quorum failure", func(t *testing.T) { + // Test Case 1: if-none-match (create only if doesn't exist) + // With if-none-match: *, this should only succeed if object doesn't exist. + // Since read quorum fails, we can't determine if object exists. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfNoneMatch: "*", + }, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if object exists (ETag is not empty) + return oi.ETag != "" + }, + } + + _, err := obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("new-value")), + int64(len("new-value")), "", ""), opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error when if-none-match is used with quorum failure, got: %v", err) + } + }) + + t.Run("if-match with read quorum failure", func(t *testing.T) { + // Test Case 2: if-match (update only if ETag matches) + // With if-match: , this should only succeed if object exists with matching ETag. + // Since read quorum fails, we can't determine if object exists or ETag matches. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfMatch: existingETag, + }, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if ETag doesn't match + return oi.ETag != existingETag + }, + } + + _, err := obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("updated-value")), + int64(len("updated-value")), "", ""), opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error when if-match is used with quorum failure, got: %v", err) + } + }) + + t.Run("if-match wrong etag with read quorum failure", func(t *testing.T) { + // Test Case 3: if-match with wrong ETag + // Even if the ETag doesn't match, we should still get read quorum error + // because we can't read the object to check the condition. + opts := ObjectOptions{ + UserDefined: map[string]string{ + xhttp.IfMatch: "wrong-etag", + }, + CheckPrecondFn: func(oi ObjectInfo) bool { + // Precondition fails if ETag doesn't match + return oi.ETag != "wrong-etag" + }, + } + + _, err := obj.PutObject(ctx, bucket, object, + mustGetPutObjReader(t, bytes.NewReader([]byte("should-fail")), + int64(len("should-fail")), "", ""), opts) + if !isErrReadQuorum(err) { + t.Errorf("Expected read quorum error when if-match is used with quorum failure (even with wrong ETag), got: %v", err) + } + }) +} diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index a22d0416ee4f0..78fbe6f098819 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -20,12 +20,16 @@ package cmd import ( "bytes" "context" + "encoding/base64" "errors" "fmt" "io" + "maps" "net/http" "path" "runtime" + "slices" + "sort" "strconv" "strings" "sync" @@ -40,13 +44,14 @@ import ( "github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/event" + "github.com/minio/minio/internal/grid" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/mimedb" - "github.com/minio/pkg/v2/sync/errgroup" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/mimedb" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/minio/sio" ) // list all errors which can be ignored in object operations. @@ -68,7 +73,7 @@ func countOnlineDisks(onlineDisks []StorageAPI) (online int) { // update metadata. func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) { if !dstOpts.NoAuditLog { - auditObjectErasureSet(ctx, dstObject, &er) + auditObjectErasureSet(ctx, "CopyObject", dstObject, &er) } // This call shouldn't be used for anything other than metadata updates or adding self referential versions. @@ -95,15 +100,19 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d if srcOpts.VersionID != "" { metaArr, errs = readAllFileInfo(ctx, storageDisks, "", srcBucket, srcObject, srcOpts.VersionID, true, false) } else { - metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true, false, true) + metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true, false) } readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) if err != nil { - if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(srcBucket, minioMetaBucket) { + if shouldCheckForDangling(err, errs, srcBucket) { _, derr := er.deleteIfDangling(context.Background(), srcBucket, srcObject, metaArr, errs, nil, srcOpts) - if derr != nil { - err = derr + if derr == nil { + if srcOpts.VersionID != "" { + err = errFileVersionNotFound + } else { + err = errFileNotFound + } } } return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject) @@ -145,11 +154,12 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d modTime = dstOpts.MTime fi.ModTime = dstOpts.MTime } + // check inline before overwriting metadata. + inlineData := fi.InlineData() fi.Metadata = srcInfo.UserDefined srcInfo.UserDefined["etag"] = srcInfo.ETag - inlineData := fi.InlineData() freeVersionID := fi.TierFreeVersionID() freeVersionMarker := fi.TierFreeVersion() @@ -192,7 +202,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d // Read(Closer). When err != nil, the returned reader is always nil. func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "GetObject", object, &er) } var unlockOnDefer bool @@ -244,6 +254,23 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri }, toObjectErr(errMethodNotAllowed, bucket, object) } + // Set NoDecryption for SSE-C objects and if replication request + if crypto.SSEC.IsEncrypted(objInfo.UserDefined) && opts.ReplicationRequest { + opts.NoDecryption = true + } + + if objInfo.Size == 0 { + if _, _, err := rs.GetOffsetLength(objInfo.Size); err != nil { + // Make sure to return object info to provide extra information. + return &GetObjectReader{ + ObjInfo: objInfo, + }, err + } + + // Zero byte objects don't even need to further initialize pipes etc. + return NewGetObjectReaderFromReader(bytes.NewReader(nil), objInfo, opts) + } + if objInfo.IsRemote() { gr, err := getTransitionedObjectReader(ctx, bucket, object, rs, h, objInfo, opts) if err != nil { @@ -253,18 +280,13 @@ func (er erasureObjects) GetObjectNInfo(ctx context.Context, bucket, object stri return gr.WithCleanupFuncs(nsUnlocker), nil } - if objInfo.Size == 0 { - // Zero byte objects don't even need to further initialize pipes etc. - return NewGetObjectReaderFromReader(bytes.NewReader(nil), objInfo, opts) - } - - fn, off, length, err := NewGetObjectReader(rs, objInfo, opts) + fn, off, length, err := NewGetObjectReader(rs, objInfo, opts, h) if err != nil { return nil, err } if unlockOnDefer { - unlockOnDefer = fi.InlineData() + unlockOnDefer = fi.InlineData() || len(fi.Data) > 0 } pr, pw := xioutil.WaitPipe() @@ -376,24 +398,16 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje // that we have some parts or data blocks missing or corrupted // - attempt a heal to successfully heal them for future calls. if written == partLength { - var scan madmin.HealScanMode - switch { - case errors.Is(err, errFileNotFound): - scan = madmin.HealNormalScan - case errors.Is(err, errFileCorrupt): - scan = madmin.HealDeepScan - } - switch scan { - case madmin.HealNormalScan, madmin.HealDeepScan: + if errors.Is(err, errFileNotFound) || errors.Is(err, errFileCorrupt) { healOnce.Do(func() { - globalMRFState.addPartialOp(partialOperation{ - bucket: bucket, - object: object, - versionID: fi.VersionID, - queued: time.Now(), - setIndex: er.setIndex, - poolIndex: er.poolIndex, - scanMode: scan, + globalMRFState.addPartialOp(PartialOperation{ + Bucket: bucket, + Object: object, + VersionID: fi.VersionID, + Queued: time.Now(), + SetIndex: er.setIndex, + PoolIndex: er.poolIndex, + BitrotScan: errors.Is(err, errFileCorrupt), }) }) // Healing is triggered and we have written @@ -407,11 +421,6 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje return toObjectErr(err, bucket, object) } } - for i, r := range readers { - if r == nil { - onlineDisks[i] = OfflineDisk - } - } // Track total bytes read from disk and written to the client. totalBytesRead += partLength // partOffset will be valid only for the first part, hence reset it to 0 for @@ -425,7 +434,7 @@ func (er erasureObjects) getObjectWithFileInfo(ctx context.Context, bucket, obje // GetObjectInfo - reads object metadata and replies back ObjectInfo. func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (info ObjectInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "GetObjectInfo", object, &er) } if !opts.NoLock { @@ -442,7 +451,7 @@ func (er erasureObjects) GetObjectInfo(ctx context.Context, bucket, object strin return er.getObjectInfo(ctx, bucket, object, opts) } -func auditDanglingObjectDeletion(ctx context.Context, bucket, object, versionID string, tags map[string]interface{}) { +func auditDanglingObjectDeletion(ctx context.Context, bucket, object, versionID string, tags map[string]string) { if len(logger.AuditTargets()) == 0 { return } @@ -458,137 +467,114 @@ func auditDanglingObjectDeletion(ctx context.Context, bucket, object, versionID auditLogInternal(ctx, opts) } -func joinErrs(errs []error) []string { - s := make([]string, len(errs)) +func joinErrs(errs []error) string { + var s string for i := range s { + if s != "" { + s += "," + } if errs[i] == nil { - s[i] = "" + s += "" } else { - s[i] = errs[i].Error() + s += errs[i].Error() } } return s } -func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrs []error, opts ObjectOptions) (FileInfo, error) { - var err error - m, ok := isObjectDangling(metaArr, errs, dataErrs) - if ok { - tags := make(map[string]interface{}, 4) - tags["set"] = er.setIndex - tags["pool"] = er.poolIndex - tags["merrs"] = joinErrs(errs) - tags["derrs"] = joinErrs(dataErrs) - if m.IsValid() { - tags["size"] = m.Size - tags["mtime"] = m.ModTime.Format(http.TimeFormat) - tags["data"] = m.Erasure.DataBlocks - tags["parity"] = m.Erasure.ParityBlocks - } else { - tags["invalid-meta"] = true - tags["data"] = er.setDriveCount - er.defaultParityCount - tags["parity"] = er.defaultParityCount - } - - // count the number of offline disks - offline := 0 - for i := 0; i < max(len(errs), len(dataErrs)); i++ { - if i < len(errs) && errors.Is(errs[i], errDiskNotFound) || i < len(dataErrs) && errors.Is(dataErrs[i], errDiskNotFound) { - offline++ - } - } - if offline > 0 { - tags["offline"] = offline - } - - _, file, line, cok := runtime.Caller(1) - if cok { - tags["caller"] = fmt.Sprintf("%s:%d", file, line) - } - - defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags) - - err = errFileNotFound - if opts.VersionID != "" { - err = errFileVersionNotFound - } - - fi := FileInfo{ - VersionID: m.VersionID, - } - if opts.VersionID != "" { - fi.VersionID = opts.VersionID - } - fi.SetTierFreeVersionID(mustGetUUID()) - disks := er.getDisks() - g := errgroup.WithNErrs(len(disks)) - for index := range disks { - index := index - g.Go(func() error { - if disks[index] == nil { - return errDiskNotFound +func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrsByPart map[int][]int, opts ObjectOptions) (FileInfo, error) { + m, ok := isObjectDangling(metaArr, errs, dataErrsByPart) + if !ok { + // We only come here if we cannot figure out if the object + // can be deleted safely, in such a scenario return ReadQuorum error. + return FileInfo{}, errErasureReadQuorum + } + tags := make(map[string]string, 16) + tags["set"] = strconv.Itoa(er.setIndex) + tags["pool"] = strconv.Itoa(er.poolIndex) + tags["merrs"] = joinErrs(errs) + tags["derrs"] = fmt.Sprintf("%v", dataErrsByPart) + if m.IsValid() { + tags["sz"] = strconv.FormatInt(m.Size, 10) + tags["mt"] = m.ModTime.Format(iso8601Format) + tags["d:p"] = fmt.Sprintf("%d:%d", m.Erasure.DataBlocks, m.Erasure.ParityBlocks) + } else { + tags["invalid"] = "1" + tags["d:p"] = fmt.Sprintf("%d:%d", er.setDriveCount-er.defaultParityCount, er.defaultParityCount) + } + + // count the number of offline disks + offline := 0 + for i := range len(errs) { + var found bool + switch { + case errors.Is(errs[i], errDiskNotFound): + found = true + default: + for p := range dataErrsByPart { + if dataErrsByPart[p][i] == checkPartDiskNotFound { + found = true + break } - return disks[index].DeleteVersion(ctx, bucket, object, fi, false, DeleteOptions{}) - }, index) - } - - rmDisks := make(map[string]string, len(disks)) - for index, err := range g.Wait() { - var errStr, diskName string - if err != nil { - errStr = err.Error() - } else { - errStr = "" } - if disks[index] != nil { - diskName = disks[index].String() - } else { - diskName = fmt.Sprintf("disk-%d", index) - } - rmDisks[diskName] = errStr } - tags["cleanupResult"] = rmDisks + if found { + offline++ + } } - return m, err -} - -func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) { - var xl xlMetaV2 - if err := xl.LoadOrConvert(ri.Buf); err != nil { - return FileInfo{}, err + if offline > 0 { + tags["offline"] = strconv.Itoa(offline) } - fi, err := xl.ToFileInfo(bucket, object, "", inclFreeVers, allParts) - if err != nil { - return FileInfo{}, err + _, file, line, cok := runtime.Caller(1) + if cok { + tags["caller"] = fmt.Sprintf("%s:%d", file, line) } - if !fi.IsValid() { - return FileInfo{}, errCorruptedFormat - } + defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags) - versionID := fi.VersionID - if versionID == "" { - versionID = nullVersionID + fi := FileInfo{ + VersionID: m.VersionID, } - - fileInfo, err := xl.ToFileInfo(bucket, object, versionID, inclFreeVers, allParts) - if err != nil { - return FileInfo{}, err + if opts.VersionID != "" { + fi.VersionID = opts.VersionID + } + fi.SetTierFreeVersionID(mustGetUUID()) + disks := er.getDisks() + g := errgroup.WithNErrs(len(disks)) + for index := range disks { + g.Go(func() error { + if disks[index] == nil { + return errDiskNotFound + } + return disks[index].DeleteVersion(ctx, bucket, object, fi, false, DeleteOptions{}) + }, index) } - if readData { - fileInfo.Data = xl.data.find(versionID) + for index, err := range g.Wait() { + var errStr string + if err != nil { + errStr = err.Error() + } else { + errStr = "" + } + tags[fmt.Sprintf("ddisk-%d", index)] = errStr } - return fileInfo, nil + return m, nil +} + +func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers bool) (FileInfo, error) { + return getFileInfo(ri.Buf, bucket, object, "", fileInfoOpts{ + Data: readData, + InclFreeVersions: inclFreeVers, + }) } func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object string, readData bool) ([]RawFileInfo, []error) { rawFileInfos := make([]RawFileInfo, len(disks)) g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() (err error) { if disks[index] == nil { return errDiskNotFound @@ -605,7 +591,7 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object return rawFileInfos, g.Wait() } -func pickLatestQuorumFilesInfo(ctx context.Context, rawFileInfos []RawFileInfo, errs []error, bucket, object string, readData, inclFreeVers, allParts bool) ([]FileInfo, []error) { +func pickLatestQuorumFilesInfo(ctx context.Context, rawFileInfos []RawFileInfo, errs []error, bucket, object string, readData, inclFreeVers bool) ([]FileInfo, []error) { metadataArray := make([]*xlMetaV2, len(rawFileInfos)) metaFileInfos := make([]FileInfo, len(rawFileInfos)) metadataShallowVersions := make([][]xlMetaV2ShallowVersion, len(rawFileInfos)) @@ -642,7 +628,7 @@ func pickLatestQuorumFilesInfo(ctx context.Context, rawFileInfos []RawFileInfo, readQuorum := (len(rawFileInfos) + 1) / 2 meta := &xlMetaV2{versions: mergeXLV2Versions(readQuorum, false, 1, metadataShallowVersions...)} - lfi, err := meta.ToFileInfo(bucket, object, "", inclFreeVers, allParts) + lfi, err := meta.ToFileInfo(bucket, object, "", inclFreeVers, true) if err != nil { for i := range errs { if errs[i] == nil { @@ -654,7 +640,7 @@ func pickLatestQuorumFilesInfo(ctx context.Context, rawFileInfos []RawFileInfo, if !lfi.IsValid() { for i := range errs { if errs[i] == nil { - errs[i] = errCorruptedFormat + errs[i] = errFileCorrupt } } return metaFileInfos, errs @@ -671,7 +657,7 @@ func pickLatestQuorumFilesInfo(ctx context.Context, rawFileInfos []RawFileInfo, } // make sure to preserve this for diskmtime based healing bugfix. - metaFileInfos[index], errs[index] = metadataArray[index].ToFileInfo(bucket, object, versionID, inclFreeVers, allParts) + metaFileInfos[index], errs[index] = metadataArray[index].ToFileInfo(bucket, object, versionID, inclFreeVers, true) if errs[index] != nil { continue } @@ -705,20 +691,16 @@ func shouldCheckForDangling(err error, errs []error, bucket string) bool { // Check if we have a read quorum issue case errors.Is(err, errErasureReadQuorum): return true - // Check if the object is inexistent in most disks but not all of them - case errors.Is(err, errFileNotFound) || errors.Is(err, errFileVersionNotFound): - for i := range errs { - if errs[i] == nil { - return true - } - } + // Check if the object is non-existent on most disks but not all of them + case (errors.Is(err, errFileNotFound) || errors.Is(err, errFileVersionNotFound)) && (countErrs(errs, nil) > 0): + return true } return false } -func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, readData, inclFreeVers, allParts bool) ([]FileInfo, []error) { +func readAllXL(ctx context.Context, disks []StorageAPI, bucket, object string, readData, inclFreeVers bool) ([]FileInfo, []error) { rawFileInfos, errs := readAllRawFileInfo(ctx, disks, bucket, object, readData) - return pickLatestQuorumFilesInfo(ctx, rawFileInfos, errs, bucket, object, readData, inclFreeVers, allParts) + return pickLatestQuorumFilesInfo(ctx, rawFileInfos, errs, bucket, object, readData, inclFreeVers) } func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object string, opts ObjectOptions, readData bool) (FileInfo, []FileInfo, []StorageAPI, error) { @@ -733,8 +715,9 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s disks := er.getDisks() ropts := ReadOptions{ - ReadData: readData, - Healing: false, + ReadData: readData, + InclFreeVersions: opts.InclFreeVersions, + Healing: false, } mrfCheck := make(chan FileInfo) @@ -774,7 +757,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s // Read the latest version rfi, err = disk.ReadXL(ctx, bucket, object, readData) if err == nil { - fi, err = fileInfoFromRaw(rfi, bucket, object, readData, opts.InclFreeVersions, true) + fi, err = fileInfoFromRaw(rfi, bucket, object, readData, opts.InclFreeVersions) } } @@ -820,17 +803,15 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s // additionally do not heal delete markers inline, let them be // healed upon regular heal process. if missingBlocks > 0 && missingBlocks < fi.Erasure.DataBlocks { - globalMRFState.addPartialOp(partialOperation{ - bucket: fi.Volume, - object: fi.Name, - versionID: fi.VersionID, - queued: time.Now(), - setIndex: er.setIndex, - poolIndex: er.poolIndex, + globalMRFState.addPartialOp(PartialOperation{ + Bucket: fi.Volume, + Object: fi.Name, + VersionID: fi.VersionID, + Queued: time.Now(), + SetIndex: er.setIndex, + PoolIndex: er.poolIndex, }) } - - return }() validResp := 0 @@ -846,6 +827,13 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s minDisks = er.setDriveCount - er.defaultParityCount } + if minDisks == er.setDriveCount/2 { + // when data and parity are same we must atleast + // wait for response from 1 extra drive to avoid + // split-brain. + minDisks++ + } + calcQuorum := func(metaArr []FileInfo, errs []error) (FileInfo, []FileInfo, []StorageAPI, time.Time, string, error) { readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) if err != nil { @@ -884,6 +872,20 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s if success { validResp++ } + + if totalResp >= minDisks && opts.FastGetObjInfo { + rw.Lock() + ok := countErrs(errs, errFileNotFound) >= minDisks || countErrs(errs, errFileVersionNotFound) >= minDisks + rw.Unlock() + if ok { + err = errFileNotFound + if opts.VersionID != "" { + err = errFileVersionNotFound + } + break + } + } + if totalResp < er.setDriveCount { if !opts.FastGetObjInfo { continue @@ -894,14 +896,16 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s } rw.Lock() + // when its a versioned bucket and empty versionID - at totalResp == setDriveCount + // we must use rawFileInfo to resolve versions to figure out the latest version. if opts.VersionID == "" && totalResp == er.setDriveCount { fi, onlineMeta, onlineDisks, modTime, etag, err = calcQuorum(pickLatestQuorumFilesInfo(ctx, - rawArr, errs, bucket, object, readData, opts.InclFreeVersions, true)) + rawArr, errs, bucket, object, readData, opts.InclFreeVersions)) } else { fi, onlineMeta, onlineDisks, modTime, etag, err = calcQuorum(metaArr, errs) } rw.Unlock() - if err == nil && fi.InlineData() { + if err == nil && (fi.InlineData() || len(fi.Data) > 0) { break } } @@ -911,8 +915,22 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s // not we simply ignore it, since we can't tell for sure if its dangling object. if totalResp == er.setDriveCount && shouldCheckForDangling(err, errs, bucket) { _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) - if derr != nil { - err = derr + if derr == nil { + if opts.VersionID != "" { + err = errFileVersionNotFound + } else { + err = errFileNotFound + } + } + } + // when we have insufficient read quorum and inconsistent metadata return + // file not found, since we can't possibly have a way to recover this object + // anyway. + if v, ok := err.(InsufficientReadQuorum); ok && v.Type == RQInconsistentMeta { + if opts.VersionID != "" { + err = errFileVersionNotFound + } else { + err = errFileNotFound } } return fi, nil, nil, toObjectErr(err, bucket, object) @@ -921,7 +939,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s if !fi.Deleted && len(fi.Erasure.Distribution) != len(onlineDisks) { err := fmt.Errorf("unexpected file distribution (%v) from online disks (%v), looks like backend disks have been manually modified refusing to heal %s/%s(%s)", fi.Erasure.Distribution, onlineDisks, bucket, object, opts.VersionID) - logger.LogOnceIf(ctx, err, "get-object-file-info-manually-modified") + storageLogOnceIf(ctx, err, "get-object-file-info-manually-modified") return fi, nil, nil, toObjectErr(err, bucket, object, opts.VersionID) } @@ -998,7 +1016,7 @@ func (er erasureObjects) getObjectInfoAndQuorum(ctx context.Context, bucket, obj } // Similar to rename but renames data from srcEntry to dstEntry at dataDir -func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, bool, error) { +func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry string, metadata []FileInfo, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, []byte, string, error) { g := errgroup.WithNErrs(len(disks)) fvID := mustGetUUID() @@ -1006,10 +1024,10 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str metadata[index].SetTierFreeVersionID(fvID) } - diskVersions := make([]uint64, len(disks)) + diskVersions := make([][]byte, len(disks)) + dataDirs := make([]string, len(disks)) // Rename file on all underlying storage disks. for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -1025,11 +1043,12 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str if !fi.IsValid() { return errFileCorrupt } - sign, err := disks[index].RenameData(ctx, srcBucket, srcEntry, fi, dstBucket, dstEntry, RenameOptions{}) + resp, err := disks[index].RenameData(ctx, srcBucket, srcEntry, fi, dstBucket, dstEntry, RenameOptions{}) if err != nil { return err } - diskVersions[index] = sign + diskVersions[index] = resp.Sign + dataDirs[index] = resp.OldDataDir return nil }, index) } @@ -1037,8 +1056,6 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str // Wait for all renames to finish. errs := g.Wait() - var versionsDisparity bool - err := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) if err != nil { dg := errgroup.WithNErrs(len(disks)) @@ -1052,27 +1069,35 @@ func renameData(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry str // caller this dangling object will be now scheduled to be removed // via active healing. dg.Go(func() error { - return disks[index].DeleteVersion(context.Background(), dstBucket, dstEntry, metadata[index], false, DeleteOptions{UndoWrite: true}) + return disks[index].DeleteVersion(context.Background(), dstBucket, dstEntry, metadata[index], false, DeleteOptions{ + UndoWrite: true, + OldDataDir: dataDirs[index], + }) }, index) } dg.Wait() } + var dataDir string + var versions []byte if err == nil { - versions := reduceCommonVersions(diskVersions, writeQuorum) + versions = reduceCommonVersions(diskVersions, writeQuorum) for index, dversions := range diskVersions { if errs[index] != nil { continue } - if versions != dversions { - versionsDisparity = true + if !bytes.Equal(dversions, versions) { + if len(dversions) > len(versions) { + versions = dversions + } break } } + dataDir = reduceCommonDataDir(dataDirs, writeQuorum) } // We can safely allow RenameData errors up to len(er.getDisks()) - writeQuorum // otherwise return failure. - return evalDisks(disks, errs), versionsDisparity, err + return evalDisks(disks, errs), versions, dataDir, err } func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { @@ -1100,7 +1125,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * // Validate input data size and it can never be less than zero. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return ObjectInfo{}, toObjectErr(errInvalidArgument) } @@ -1130,8 +1155,8 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * case size == 0: buffer = make([]byte, 1) // Allocate at least a byte to reach EOF case size >= fi.Erasure.BlockSize: - buffer = globalBytePoolCap.Get() - defer globalBytePoolCap.Put(buffer) + buffer = globalBytePoolCap.Load().Get() + defer globalBytePoolCap.Load().Put(buffer) case size < fi.Erasure.BlockSize: // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) @@ -1141,7 +1166,6 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * buffer = buffer[:fi.Erasure.BlockSize] } - shardFileSize := erasure.ShardFileSize(data.Size()) writers := make([]io.Writer, len(onlineDisks)) inlineBuffers := make([]*bytes.Buffer, len(onlineDisks)) for i, disk := range onlineDisks { @@ -1149,17 +1173,23 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * continue } if disk.IsOnline() { - inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, shardFileSize)) + buf := grid.GetByteBufferCap(int(erasure.ShardFileSize(data.Size())) + 64) + inlineBuffers[i] = bytes.NewBuffer(buf[:0]) + defer grid.PutByteBuffer(buf) writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) } } n, erasureErr := erasure.Encode(ctx, data, writers, buffer, writeQuorum) - closeBitrotWriters(writers) + closeErrs := closeBitrotWriters(writers) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, minioMetaBucket, key) } + if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil { + return ObjectInfo{}, toObjectErr(closeErr, minioMetaBucket, key) + } + // Should return IncompleteBody{} error when reader has fewer bytes // than specified in request header. if n < data.Size() { @@ -1196,7 +1226,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r * partsMetadata[index].SetInlineData() } - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk @@ -1220,48 +1250,10 @@ func (er erasureObjects) PutObject(ctx context.Context, bucket string, object st return er.putObject(ctx, bucket, object, data, opts) } -// Heal up to two versions of one object when there is disparity between disks -func healObjectVersionsDisparity(bucket string, entry metaCacheEntry, scanMode madmin.HealScanMode) error { - if entry.isDir() { - return nil - } - // We might land at .metacache, .trash, .multipart - // no need to heal them skip, only when bucket - // is '.minio.sys' - if bucket == minioMetaBucket { - if wildcard.Match("buckets/*/.metacache/*", entry.name) { - return nil - } - if wildcard.Match("tmp/*", entry.name) { - return nil - } - if wildcard.Match("multipart/*", entry.name) { - return nil - } - if wildcard.Match("tmp-old/*", entry.name) { - return nil - } - } - - fivs, err := entry.fileInfoVersions(bucket) - if err != nil { - healObject(bucket, entry.name, "", madmin.HealDeepScan) - return err - } - - if len(fivs.Versions) <= 2 { - for _, version := range fivs.Versions { - healObject(bucket, entry.name, version.VersionID, scanMode) - } - } - - return nil -} - // putObject wrapper for erasureObjects PutObject func (er erasureObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) + auditObjectErasureSet(ctx, "PutObject", object, &er) } data := r.Reader @@ -1282,14 +1274,20 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st if err == nil && opts.CheckPrecondFn(obj) { return objInfo, PreConditionFailed{} } - if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !isErrReadQuorum(err) { + if err != nil && !isErrVersionNotFound(err) && !isErrObjectNotFound(err) { + return objInfo, err + } + + // if object doesn't exist return error for If-Match conditional requests + // If-None-Match should be allowed to proceed for non-existent objects + if err != nil && opts.HasIfMatch && (isErrObjectNotFound(err) || isErrVersionNotFound(err)) { return objInfo, err } } // Validate input data size and it can never be less than -1. if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.ErrorKind) + bugLogIf(ctx, errInvalidArgument, logger.ErrorKind) return ObjectInfo{}, toObjectErr(errInvalidArgument) } @@ -1297,25 +1295,21 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st storageDisks := er.getDisks() - parityDrives := len(storageDisks) / 2 - if !opts.MaxParity { - // Get parity and data drive count based on storage class metadata - parityDrives = globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass]) - if parityDrives < 0 { - parityDrives = er.defaultParityCount - } - + // Get parity and data drive count based on storage class metadata + parityDrives := globalStorageClass.GetParityForSC(userDefined[xhttp.AmzStorageClass]) + if parityDrives < 0 { + parityDrives = er.defaultParityCount + } + if opts.MaxParity { + parityDrives = len(storageDisks) / 2 + } + if !opts.MaxParity && globalStorageClass.AvailabilityOptimized() { // If we have offline disks upgrade the number of erasure codes for this object. parityOrig := parityDrives var offlineDrives int for _, disk := range storageDisks { - if disk == nil { - parityDrives++ - offlineDrives++ - continue - } - if !disk.IsOnline() { + if disk == nil || !disk.IsOnline() { parityDrives++ offlineDrives++ continue @@ -1356,9 +1350,11 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st } fi.DataDir = mustGetUUID() - fi.Checksum = opts.WantChecksum.AppendTo(nil, nil) - if opts.EncryptFn != nil { - fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum) + if ckSum := userDefined[ReplicationSsecChecksumHeader]; ckSum != "" { + if v, err := base64.StdEncoding.DecodeString(ckSum); err == nil { + fi.Checksum = v + } + delete(userDefined, ReplicationSsecChecksumHeader) } uniqueID := mustGetUUID() tempObj := uniqueID @@ -1383,8 +1379,8 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st case size == 0: buffer = make([]byte, 1) // Allocate at least a byte to reach EOF case size >= fi.Erasure.BlockSize || size == -1: - buffer = globalBytePoolCap.Get() - defer globalBytePoolCap.Put(buffer) + buffer = globalBytePoolCap.Load().Get() + defer globalBytePoolCap.Load().Put(buffer) case size < fi.Erasure.BlockSize: // No need to allocate fully blockSizeV1 buffer if the incoming data is smaller. buffer = make([]byte, size, 2*size+int64(fi.Erasure.ParityBlocks+fi.Erasure.DataBlocks-1)) @@ -1399,25 +1395,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st defer er.deleteAll(context.Background(), minioMetaTmpBucket, tempObj) - shardFileSize := erasure.ShardFileSize(data.Size()) - writers := make([]io.Writer, len(onlineDisks)) var inlineBuffers []*bytes.Buffer - if shardFileSize >= 0 { - if !opts.Versioned && shardFileSize < smallFileThreshold { - inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) - } else if shardFileSize < smallFileThreshold/8 { - inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) - } - } else { - // If compressed, use actual size to determine. - if sz := erasure.ShardFileSize(data.ActualSize()); sz > 0 { - if !opts.Versioned && sz < smallFileThreshold { - inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) - } else if sz < smallFileThreshold/8 { - inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) - } - } + if globalStorageClass.ShouldInline(erasure.ShardFileSize(data.ActualSize()), opts.Versioned) { + inlineBuffers = make([]*bytes.Buffer, len(onlineDisks)) } + + shardFileSize := erasure.ShardFileSize(data.Size()) + writers := make([]io.Writer, len(onlineDisks)) for i, disk := range onlineDisks { if disk == nil { continue @@ -1428,11 +1412,9 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st } if len(inlineBuffers) > 0 { - sz := shardFileSize - if sz < 0 { - sz = data.ActualSize() - } - inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, sz)) + buf := grid.GetByteBufferCap(int(shardFileSize) + 64) + inlineBuffers[i] = bytes.NewBuffer(buf[:0]) + defer grid.PutByteBuffer(buf) writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) continue } @@ -1441,25 +1423,30 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st } toEncode := io.Reader(data) - if data.Size() > bigFileThreshold { + if data.Size() >= bigFileThreshold { // We use 2 buffers, so we always have a full buffer of input. - bufA := globalBytePoolCap.Get() - bufB := globalBytePoolCap.Get() - defer globalBytePoolCap.Put(bufA) - defer globalBytePoolCap.Put(bufB) + pool := globalBytePoolCap.Load() + bufA := pool.Get() + bufB := pool.Get() + defer pool.Put(bufA) + defer pool.Put(bufB) ra, err := readahead.NewReaderBuffer(data, [][]byte{bufA[:fi.Erasure.BlockSize], bufB[:fi.Erasure.BlockSize]}) if err == nil { toEncode = ra defer ra.Close() } - logger.LogIf(ctx, err) + bugLogIf(ctx, err) } n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) - closeBitrotWriters(writers) + closeErrs := closeBitrotWriters(writers) if erasureErr != nil { return ObjectInfo{}, toObjectErr(erasureErr, bucket, object) } + if closeErr := reduceWriteQuorumErrs(ctx, closeErrs, objectOpIgnoredErrs, writeQuorum); closeErr != nil { + return ObjectInfo{}, toObjectErr(closeErr, bucket, object) + } + // Should return IncompleteBody{} error when reader has fewer bytes // than specified in request header. if n < data.Size() { @@ -1470,21 +1457,47 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st if opts.IndexCB != nil { compIndex = opts.IndexCB() } - if !opts.NoLock { - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return ObjectInfo{}, err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx) - } modTime := opts.MTime if opts.MTime.IsZero() { modTime = UTCNow() } + kind, encrypted := crypto.IsEncrypted(userDefined) + actualSize := data.ActualSize() + if actualSize < 0 { + compressed := fi.IsCompressed() + switch { + case compressed: + // ... nothing changes for compressed stream. + // if actualSize is -1 we have no known way to + // determine what is the actualSize. + case encrypted: + decSize, err := sio.DecryptedSize(uint64(n)) + if err == nil { + actualSize = int64(decSize) + } + default: + actualSize = n + } + } + // If ServerSideChecksum is wanted for this object, it takes precedence + // over opts.WantChecksum. + if opts.WantServerSideChecksumType.IsSet() { + serverSideChecksum := r.RawServerSideChecksumResult() + if serverSideChecksum != nil { + fi.Checksum = serverSideChecksum.AppendTo(nil, nil) + if opts.EncryptFn != nil { + fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum) + } + } + } else if fi.Checksum == nil && opts.WantChecksum != nil { + // Trailing headers checksums should now be filled. + fi.Checksum = opts.WantChecksum.AppendTo(nil, nil) + if opts.EncryptFn != nil { + fi.Checksum = opts.EncryptFn("object-checksum", fi.Checksum) + } + } for i, w := range writers { if w == nil { onlineDisks[i] = nil @@ -1496,12 +1509,12 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st partsMetadata[i].Data = nil } // No need to add checksum to part. We already have it on the object. - partsMetadata[i].AddObjectPart(1, "", n, data.ActualSize(), modTime, compIndex, nil) + partsMetadata[i].AddObjectPart(1, "", n, actualSize, modTime, compIndex, nil) partsMetadata[i].Versioned = opts.Versioned || opts.VersionSuspended + partsMetadata[i].Checksum = fi.Checksum } userDefined["etag"] = r.MD5CurrentHexString() - kind, _ := crypto.IsEncrypted(userDefined) if opts.PreserveETag != "" { if !opts.ReplicationRequest { userDefined["etag"] = opts.PreserveETag @@ -1537,16 +1550,35 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st } } + if !opts.NoLock { + lk := er.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + // Rename the successfully written temporary object to final location. - onlineDisks, versionsDisparity, err := renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum) + onlineDisks, versions, oldDataDir, err := renameData(ctx, onlineDisks, minioMetaTmpBucket, tempObj, partsMetadata, bucket, object, writeQuorum) if err != nil { if errors.Is(err, errFileNotFound) { - return ObjectInfo{}, toObjectErr(errErasureWriteQuorum, bucket, object) + // An in-quorum errFileNotFound means that client stream + // prematurely closed and we do not find any xl.meta or + // part.1's - in such a scenario we must return as if client + // disconnected. This means that erasure.Encode() CreateFile() + // did not do anything. + return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object} } return ObjectInfo{}, toObjectErr(err, bucket, object) } - for i := 0; i < len(onlineDisks); i++ { + if err = er.commitRenameDataDir(ctx, bucket, object, oldDataDir, onlineDisks, writeQuorum); err != nil { + return ObjectInfo{}, toObjectErr(err, bucket, object) + } + + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { // Object info is the same in all disks, so we can pick // the first meta from online disk @@ -1560,10 +1592,10 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st // When there is versions disparity we are healing // the content implicitly for all versions, we can // avoid triggering another MRF heal for offline drives. - if !versionsDisparity { + if len(versions) == 0 { // Whether a disk was initially or becomes offline // during this upload, send it to the MRF list. - for i := 0; i < len(onlineDisks); i++ { + for i := range len(onlineDisks) { if onlineDisks[i] != nil && onlineDisks[i].IsOnline() { continue } @@ -1572,13 +1604,13 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st break } } else { - globalMRFState.addPartialOp(partialOperation{ - bucket: bucket, - object: object, - queued: time.Now(), - allVersions: true, - setIndex: er.setIndex, - poolIndex: er.poolIndex, + globalMRFState.addPartialOp(PartialOperation{ + Bucket: bucket, + Object: object, + Queued: time.Now(), + Versions: versions, + SetIndex: er.setIndex, + PoolIndex: er.poolIndex, }) } } @@ -1603,7 +1635,6 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return errDiskNotFound @@ -1621,7 +1652,7 @@ func (er erasureObjects) deleteObjectVersion(ctx context.Context, bucket, object func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { if !opts.NoAuditLog { for _, obj := range objects { - auditObjectErasureSet(ctx, obj.ObjectV.ObjectName, &er) + auditObjectErasureSet(ctx, "DeleteObjects", obj.ObjectName, &er) } } @@ -1703,8 +1734,21 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec } dedupVersions := make([]FileInfoVersions, 0, len(versionsMap)) - for _, version := range versionsMap { - dedupVersions = append(dedupVersions, version) + for _, fivs := range versionsMap { + // Removal of existing versions and adding a delete marker in the same + // request is supported. At the same time, we cannot allow adding + // two delete markers on top of any object. To avoid this situation, + // we will sort deletions to execute existing deletion first, + // then add only one delete marker if requested + sort.SliceStable(fivs.Versions, func(i, j int) bool { + return !fivs.Versions[i].Deleted + }) + if idx := slices.IndexFunc(fivs.Versions, func(fi FileInfo) bool { + return fi.Deleted + }); idx > -1 { + fivs.Versions = fivs.Versions[:idx+1] + } + dedupVersions = append(dedupVersions, fivs) } // Initialize list of errors. @@ -1729,12 +1773,6 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec continue } for _, v := range dedupVersions[i].Versions { - if err == errFileNotFound || err == errFileVersionNotFound { - if !dobjects[v.Idx].DeleteMarker { - // Not delete marker, if not found, ok. - continue - } - } delObjErrs[index][v.Idx] = err } } @@ -1754,6 +1792,13 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec } } err := reduceWriteQuorumErrs(ctx, diskErrs, objectOpIgnoredErrs, writeQuorums[objIndex]) + if err == nil { + dobjects[objIndex].found = true + } else if isErrVersionNotFound(err) || isErrObjectNotFound(err) { + if !dobjects[objIndex].DeleteMarker { + err = nil + } + } if objects[objIndex].VersionID != "" { errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName, objects[objIndex].VersionID) } else { @@ -1788,35 +1833,50 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec return dobjects, errs } +func (er erasureObjects) commitRenameDataDir(ctx context.Context, bucket, object, dataDir string, onlineDisks []StorageAPI, writeQuorum int) error { + if dataDir == "" { + return nil + } + g := errgroup.WithNErrs(len(onlineDisks)) + for index := range onlineDisks { + g.Go(func() error { + if onlineDisks[index] == nil { + return nil + } + return onlineDisks[index].Delete(ctx, bucket, pathJoin(object, dataDir), DeleteOptions{ + Recursive: true, + }) + }, index) + } + + return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) +} + func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string) error { disks := er.getDisks() + // Assume (N/2 + 1) quorum for Delete() + // this is a theoretical assumption such that + // for delete's we do not need to honor storage + // class for objects that have reduced quorum + // due to storage class - this only needs to be honored + // for Read() requests alone that we already do. + writeQuorum := len(disks)/2 + 1 + g := errgroup.WithNErrs(len(disks)) - dirPrefix := encodeDirObject(prefix) for index := range disks { - index := index g.Go(func() error { if disks[index] == nil { return nil } - // Deletes - // - The prefix and its children - // - The prefix__XLDIR__ - defer disks[index].Delete(ctx, bucket, dirPrefix, DeleteOptions{ - Recursive: true, - Immediate: true, - }) return disks[index].Delete(ctx, bucket, prefix, DeleteOptions{ Recursive: true, Immediate: true, }) }, index) } - for _, err := range g.Wait() { - if err != nil { - return err - } - } - return nil + + // return errors if any during deletion + return reduceWriteQuorumErrs(ctx, g.Wait(), objectOpIgnoredErrs, writeQuorum) } // DeleteObject - deletes an object, this call doesn't necessary reply @@ -1824,14 +1884,7 @@ func (er erasureObjects) deletePrefix(ctx context.Context, bucket, prefix string // response to the client request. func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { if !opts.NoAuditLog { - auditObjectErasureSet(ctx, object, &er) - } - - if opts.DeletePrefix { - if globalCacheConfig.Enabled() { - return ObjectInfo{}, toObjectErr(errMethodNotAllowed, bucket, object) - } - return ObjectInfo{}, toObjectErr(er.deletePrefix(ctx, bucket, object), bucket, object) + auditObjectErasureSet(ctx, "DeleteObject", object, &er) } var lc *lifecycle.Lifecycle @@ -1839,9 +1892,18 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string var replcfg *replication.Config if opts.Expiration.Expire { // Check if the current bucket has a configured lifecycle policy - lc, _ = globalLifecycleSys.Get(bucket) - rcfg, _ = globalBucketObjectLockSys.Get(bucket) - replcfg, _ = getReplicationConfig(ctx, bucket) + lc, err = globalLifecycleSys.Get(bucket) + if err != nil && !errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { + return objInfo, err + } + rcfg, err = globalBucketObjectLockSys.Get(bucket) + if err != nil { + return objInfo, err + } + replcfg, err = getReplicationConfig(ctx, bucket) + if err != nil { + return objInfo, err + } } // expiration attempted on a bucket with no lifecycle @@ -1860,23 +1922,79 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string } } + if opts.DeletePrefix { + if opts.Expiration.Expire { + // Expire all versions expiration must still verify the state() on disk + // via a getObjectInfo() call as follows, any read quorum issues we + // must not proceed further for safety reasons. attempt a MRF heal + // while we see such quorum errors. + goi, _, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts) + if gerr != nil && goi.Name == "" { + if _, ok := gerr.(InsufficientReadQuorum); ok { + // Add an MRF heal for next time. + er.addPartial(bucket, object, opts.VersionID) + + return objInfo, InsufficientWriteQuorum{} + } + return objInfo, gerr + } + + // Add protection and re-verify the ILM rules for qualification + // based on the latest objectInfo and see if the object still + // qualifies for deletion. + if gerr == nil { + var isErr bool + evt := evalActionFromLifecycle(ctx, *lc, rcfg, replcfg, goi) + switch evt.Action { + case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction: + // opts.DeletePrefix is used only in the above lifecycle Expiration actions. + default: + // object has been modified since lifecycle action was previously evaluated + isErr = true + } + if isErr { + if goi.VersionID != "" { + return goi, VersionNotFound{ + Bucket: bucket, + Object: object, + VersionID: goi.VersionID, + } + } + return goi, ObjectNotFound{ + Bucket: bucket, + Object: object, + } + } + } + } // Delete marker and any latest that qualifies shall be expired permanently. + + return ObjectInfo{}, toObjectErr(er.deletePrefix(ctx, bucket, object), bucket, object) + } + storageDisks := er.getDisks() versionFound := true objInfo = ObjectInfo{VersionID: opts.VersionID} // version id needed in Delete API response. goi, _, gerr := er.getObjectInfoAndQuorum(ctx, bucket, object, opts) + tryDel := false if gerr != nil && goi.Name == "" { if _, ok := gerr.(InsufficientReadQuorum); ok { - return objInfo, InsufficientWriteQuorum{} + if opts.Versioned || opts.VersionSuspended || countOnlineDisks(storageDisks) < len(storageDisks)/2+1 { + // Add an MRF heal for next time. + er.addPartial(bucket, object, opts.VersionID) + return objInfo, InsufficientWriteQuorum{} + } + tryDel = true // only for unversioned objects if there is write quorum } // For delete marker replication, versionID being replicated will not exist on disk if opts.DeleteMarker { versionFound = false - } else { + } else if !tryDel { return objInfo, gerr } } + if opts.EvalMetadataFn != nil { - dsc, err := opts.EvalMetadataFn(&goi, err) + dsc, err := opts.EvalMetadataFn(&goi, gerr) if err != nil { return ObjectInfo{}, err } @@ -1932,7 +2050,7 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string if opts.VersionPurgeStatus().Empty() && opts.DeleteMarkerReplicationStatus().Empty() { markDelete = false } - if opts.VersionPurgeStatus() == Complete { + if opts.VersionPurgeStatus() == replication.VersionPurgeComplete { markDelete = false } // now, since VersionPurgeStatus() is already set, we can let the @@ -2032,11 +2150,11 @@ func (er erasureObjects) DeleteObject(ctx context.Context, bucket, object string // Send the successful but partial upload/delete, however ignore // if the channel is blocked by other items. func (er erasureObjects) addPartial(bucket, object, versionID string) { - globalMRFState.addPartialOp(partialOperation{ - bucket: bucket, - object: object, - versionID: versionID, - queued: time.Now(), + globalMRFState.addPartialOp(PartialOperation{ + Bucket: bucket, + Object: object, + VersionID: versionID, + Queued: time.Now(), }) } @@ -2061,15 +2179,19 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s if opts.VersionID != "" { metaArr, errs = readAllFileInfo(ctx, disks, "", bucket, object, opts.VersionID, false, false) } else { - metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true) + metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false) } readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) if err != nil { - if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { + if shouldCheckForDangling(err, errs, bucket) { _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) - if derr != nil { - err = derr + if derr == nil { + if opts.VersionID != "" { + err = errFileVersionNotFound + } else { + err = errFileNotFound + } } } return ObjectInfo{}, toObjectErr(err, bucket, object) @@ -2101,9 +2223,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s return ObjectInfo{}, err } } - for k, v := range objInfo.UserDefined { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, objInfo.UserDefined) fi.ModTime = opts.MTime fi.VersionID = opts.VersionID @@ -2116,14 +2236,16 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s // PutObjectTags - replace or add tags to an existing object func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) { - // Lock the object before updating tags. - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return ObjectInfo{}, err + if !opts.NoLock { + // Lock the object before updating tags. + lk := er.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) } - ctx = lkctx.Context() - defer lk.Unlock(lkctx) disks := er.getDisks() @@ -2134,15 +2256,19 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin if opts.VersionID != "" { metaArr, errs = readAllFileInfo(ctx, disks, "", bucket, object, opts.VersionID, false, false) } else { - metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true) + metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false) } readQuorum, _, err := objectQuorumFromMeta(ctx, metaArr, errs, er.defaultParityCount) if err != nil { - if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) { + if shouldCheckForDangling(err, errs, bucket) { _, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts) - if derr != nil { - err = derr + if derr == nil { + if opts.VersionID != "" { + err = errFileVersionNotFound + } else { + err = errFileNotFound + } } } return ObjectInfo{}, toObjectErr(err, bucket, object) @@ -2167,9 +2293,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin fi.Metadata[xhttp.AmzObjectTagging] = tags fi.ReplicationState = opts.PutReplicationState() - for k, v := range opts.UserDefined { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, opts.UserDefined) if err = er.updateObjectMeta(ctx, bucket, object, fi, onlineDisks); err != nil { return ObjectInfo{}, toObjectErr(err, bucket, object) @@ -2187,7 +2311,6 @@ func (er erasureObjects) updateObjectMetaWithOpts(ctx context.Context, bucket, o // Start writing `xl.meta` to all disks in parallel. for index := range onlineDisks { - index := index g.Go(func() error { if onlineDisks[index] == nil { return errDiskNotFound @@ -2225,19 +2348,21 @@ func (er erasureObjects) GetObjectTags(ctx context.Context, bucket, object strin // TransitionObject - transition object content to target tier. func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - tgtClient, err := globalTierConfigMgr.getDriver(opts.Transition.Tier) + tgtClient, err := globalTierConfigMgr.getDriver(ctx, opts.Transition.Tier) if err != nil { return err } - // Acquire write lock before starting to transition the object. - lk := er.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) - if err != nil { - return err + if !opts.NoLock { + // Acquire write lock before starting to transition the object. + lk := er.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) } - ctx = lkctx.Context() - defer lk.Unlock(lkctx) fi, metaArr, onlineDisks, err := er.getObjectFileInfo(ctx, bucket, object, opts, true) if err != nil { @@ -2275,6 +2400,7 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st destObj, err := genTransitionObjName(bucket) if err != nil { + traceFn(ILMTransition, nil, err) return err } @@ -2285,9 +2411,14 @@ func (er erasureObjects) TransitionObject(ctx context.Context, bucket, object st }() var rv remoteVersionID - rv, err = tgtClient.Put(ctx, destObj, pr, fi.Size) + rv, err = tgtClient.PutWithMeta(ctx, destObj, pr, fi.Size, map[string]string{ + "name": object, // preserve the original name of the object on the remote tier object metadata. + // this is just for future reverse lookup() purposes (applies only for new objects) + // does not apply retro-actively on already transitioned objects. + }) pr.CloseWithError(err) if err != nil { + traceFn(ILMTransition, nil, err) return err } fi.TransitionStatus = lifecycle.TransitionComplete @@ -2345,7 +2476,7 @@ func (er erasureObjects) updateRestoreMetadata(ctx context.Context, bucket, obje }, ObjectOptions{ VersionID: oi.VersionID, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) + storageLogIf(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err)) return err } return nil diff --git a/cmd/erasure-object_test.go b/cmd/erasure-object_test.go index a30803f8585f6..03f452f53805a 100644 --- a/cmd/erasure-object_test.go +++ b/cmd/erasure-object_test.go @@ -36,7 +36,7 @@ import ( ) func TestRepeatPutObjectPart(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() var objLayer ObjectLayer @@ -50,7 +50,7 @@ func TestRepeatPutObjectPart(t *testing.T) { } // cleaning up of temporary test directories - defer objLayer.Shutdown(context.Background()) + defer objLayer.Shutdown(t.Context()) defer removeRoots(disks) err = objLayer.MakeBucket(ctx, "bucket1", MakeBucketOptions{}) @@ -91,7 +91,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) { {"bucket", "dir/obj", nil}, } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend @@ -99,7 +99,7 @@ func TestErasureDeleteObjectBasic(t *testing.T) { if err != nil { t.Fatal(err) } - defer xl.Shutdown(context.Background()) + defer xl.Shutdown(t.Context()) err = xl.MakeBucket(ctx, "bucket", MakeBucketOptions{}) if err != nil { @@ -112,7 +112,6 @@ func TestErasureDeleteObjectBasic(t *testing.T) { t.Fatalf("Erasure Object upload failed: %s", err) } for _, test := range testCases { - test := test t.Run("", func(t *testing.T) { _, err := xl.GetObjectInfo(ctx, "bucket", "dir/obj", ObjectOptions{}) if err != nil { @@ -131,8 +130,77 @@ func TestErasureDeleteObjectBasic(t *testing.T) { removeRoots(fsDirs) } +func TestDeleteObjectsVersionedTwoPools(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + obj, fsDirs, err := prepareErasurePools() + if err != nil { + t.Fatal("Unable to initialize 'Erasure' object layer.", err) + } + // Remove all dirs. + for _, dir := range fsDirs { + defer os.RemoveAll(dir) + } + + bucketName := "bucket" + objectName := "myobject" + err = obj.MakeBucket(ctx, bucketName, MakeBucketOptions{ + VersioningEnabled: true, + }) + if err != nil { + t.Fatal(err) + } + + z, ok := obj.(*erasureServerPools) + if !ok { + t.Fatal("unexpected object layer type") + } + + versions := make([]string, 2) + for i := range z.serverPools { + objInfo, err := z.serverPools[i].PutObject(ctx, bucketName, objectName, + mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{ + Versioned: true, + }) + if err != nil { + t.Fatalf("Erasure Object upload failed: %s", err) + } + versions[i] = objInfo.VersionID + } + + // Remove and check the version in the second pool, then + // remove and check the version in the first pool + for testIdx, vid := range []string{versions[1], versions[0]} { + names := []ObjectToDelete{ + { + ObjectV: ObjectV{ + ObjectName: objectName, + VersionID: vid, + }, + }, + } + _, delErrs := obj.DeleteObjects(ctx, bucketName, names, ObjectOptions{ + Versioned: true, + }) + for i := range delErrs { + if delErrs[i] != nil { + t.Errorf("Test %d: Failed to remove object `%v` with the error: `%v`", testIdx, names[i], delErrs[i]) + } + _, statErr := obj.GetObjectInfo(ctx, bucketName, objectName, ObjectOptions{ + VersionID: names[i].VersionID, + }) + switch statErr.(type) { + case VersionNotFound: + default: + t.Errorf("Test %d: Object %s is not removed", testIdx, objectName) + } + } + } +} + func TestDeleteObjectsVersioned(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDirs, err := prepareErasure(ctx, 16) @@ -177,7 +245,6 @@ func TestDeleteObjectsVersioned(t *testing.T) { VersionID: objInfo.VersionID, }, } - } names = append(names, ObjectToDelete{ ObjectV: ObjectV{ @@ -197,7 +264,7 @@ func TestDeleteObjectsVersioned(t *testing.T) { for i, test := range testCases { _, statErr := obj.GetObjectInfo(ctx, test.bucket, test.object, ObjectOptions{ - VersionID: names[i].ObjectV.VersionID, + VersionID: names[i].VersionID, }) switch statErr.(type) { case VersionNotFound: @@ -212,7 +279,7 @@ func TestDeleteObjectsVersioned(t *testing.T) { } func TestErasureDeleteObjectsErasureSet(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDirs, err := prepareErasureSets32(ctx) @@ -285,7 +352,7 @@ func TestErasureDeleteObjectsErasureSet(t *testing.T) { } func TestErasureDeleteObjectDiskNotFound(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -294,7 +361,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { t.Fatal(err) } // Cleanup backend directories - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -354,7 +421,7 @@ func TestErasureDeleteObjectDiskNotFound(t *testing.T) { } func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -363,7 +430,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) { t.Fatal(err) } // Cleanup backend directories - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -414,7 +481,7 @@ func TestErasureDeleteObjectDiskNotFoundErasure4(t *testing.T) { } func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -423,7 +490,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) { t.Fatal(err) } // Cleanup backend directories - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -485,7 +552,7 @@ func TestErasureDeleteObjectDiskNotFoundErr(t *testing.T) { } func TestGetObjectNoQuorum(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -494,7 +561,7 @@ func TestGetObjectNoQuorum(t *testing.T) { t.Fatal(err) } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -557,7 +624,7 @@ func TestGetObjectNoQuorum(t *testing.T) { t.Fatal(err) } - for f := 0; f < 2; f++ { + for f := range 2 { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { diskErrors[i] = nil @@ -594,7 +661,7 @@ func TestGetObjectNoQuorum(t *testing.T) { } func TestHeadObjectNoQuorum(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -603,7 +670,7 @@ func TestHeadObjectNoQuorum(t *testing.T) { t.Fatal(err) } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -671,7 +738,7 @@ func TestHeadObjectNoQuorum(t *testing.T) { } func TestPutObjectNoQuorum(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -681,7 +748,7 @@ func TestPutObjectNoQuorum(t *testing.T) { } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -706,7 +773,7 @@ func TestPutObjectNoQuorum(t *testing.T) { // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,4) - for f := 0; f < 2; f++ { + for f := range 2 { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { diskErrors[i] = nil @@ -734,7 +801,7 @@ func TestPutObjectNoQuorum(t *testing.T) { } func TestPutObjectNoQuorumSmall(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -744,7 +811,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) { } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) @@ -769,7 +836,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) { // in a 16 disk Erasure setup. The original disks are 'replaced' with // naughtyDisks that fail after 'f' successful StorageAPI method // invocations, where f - [0,2) - for f := 0; f < 2; f++ { + for f := range 2 { t.Run("exec-"+strconv.Itoa(f), func(t *testing.T) { diskErrors := make(map[int]error) for i := 0; i <= f; i++ { @@ -801,7 +868,7 @@ func TestPutObjectNoQuorumSmall(t *testing.T) { // Test PutObject twice, one small and another bigger // than small data threshold and checks reading them again func TestPutObjectSmallInlineData(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() const numberOfDisks = 4 @@ -813,7 +880,7 @@ func TestPutObjectSmallInlineData(t *testing.T) { } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) bucket := "bucket" @@ -1041,7 +1108,6 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin {parts7, errs7, 11, 11, parts7SC, nil}, } for _, tt := range tests { - tt := tt t.(*testing.T).Run("", func(t *testing.T) { globalStorageClass.Update(tt.storageClassCfg) actualReadQuorum, actualWriteQuorum, err := objectQuorumFromMeta(ctx, tt.parts, tt.errs, storageclass.DefaultParityBlocks(len(erasureDisks))) @@ -1063,7 +1129,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin // In some deployments, one object has data inlined in one disk and not inlined in other disks. func TestGetObjectInlineNotInline(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create a backend with 4 disks named disk{1...4}, this name convention @@ -1083,7 +1149,7 @@ func TestGetObjectInlineNotInline(t *testing.T) { } // cleaning up of temporary test directories - defer objLayer.Shutdown(context.Background()) + defer objLayer.Shutdown(t.Context()) defer removeRoots(fsDirs) // Create a testbucket @@ -1124,7 +1190,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) { t.Skip() } - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Create an instance of xl backend. @@ -1134,7 +1200,7 @@ func TestGetObjectWithOutdatedDisks(t *testing.T) { } // Cleanup backend directories. - defer obj.Shutdown(context.Background()) + defer obj.Shutdown(t.Context()) defer removeRoots(fsDirs) z := obj.(*erasureServerPools) diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 26ff091be532b..834b9fb86c8ac 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -25,17 +25,22 @@ import ( "io" "math/rand" "net/http" + "slices" "sort" "strings" "time" "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/bucket/lifecycle" + objectlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" + "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/workers" ) // PoolDecommissionInfo currently decommissioning information @@ -72,9 +77,6 @@ func (pd *PoolDecommissionInfo) Clone() *PoolDecommissionInfo { if pd == nil { return nil } - if pd.StartTime.IsZero() { - return nil - } return &PoolDecommissionInfo{ StartTime: pd.StartTime, StartSize: pd.StartSize, @@ -97,7 +99,7 @@ func (pd *PoolDecommissionInfo) Clone() *PoolDecommissionInfo { // bucketPop should be called when a bucket is done decommissioning. // Adds the bucket to the list of decommissioned buckets and updates resume numbers. -func (pd *PoolDecommissionInfo) bucketPop(bucket string) { +func (pd *PoolDecommissionInfo) bucketPop(bucket string) bool { pd.DecommissionedBuckets = append(pd.DecommissionedBuckets, bucket) for i, b := range pd.QueuedBuckets { if b == bucket { @@ -109,18 +111,14 @@ func (pd *PoolDecommissionInfo) bucketPop(bucket string) { pd.Prefix = "" // empty this out for the next bucket pd.Object = "" // empty this out for next object } - return + return true } } + return false } func (pd *PoolDecommissionInfo) isBucketDecommissioned(bucket string) bool { - for _, b := range pd.DecommissionedBuckets { - if b == bucket { - return true - } - } - return false + return slices.Contains(pd.DecommissionedBuckets, bucket) } func (pd *PoolDecommissionInfo) bucketPush(bucket decomBucketInfo) { @@ -222,12 +220,12 @@ func (p poolMeta) isBucketDecommissioned(idx int, bucket string) bool { return p.Pools[idx].Decommission.isBucketDecommissioned(bucket) } -func (p *poolMeta) BucketDone(idx int, bucket decomBucketInfo) { +func (p *poolMeta) BucketDone(idx int, bucket decomBucketInfo) bool { if p.Pools[idx].Decommission == nil { // Decommission not in progress. - return + return false } - p.Pools[idx].Decommission.bucketPop(bucket.String()) + return p.Pools[idx].Decommission.bucketPop(bucket.String()) } func (p poolMeta) ResumeBucketObject(idx int) (bucket, object string) { @@ -235,7 +233,7 @@ func (p poolMeta) ResumeBucketObject(idx int) (bucket, object string) { bucket = p.Pools[idx].Decommission.Bucket object = p.Pools[idx].Decommission.Object } - return + return bucket, object } func (p *poolMeta) TrackCurrentBucketObject(idx int, bucket string, object string) { @@ -356,7 +354,7 @@ func (p *poolMeta) validate(pools []*erasureSets) (bool, error) { update = true } if ok && pi.completed { - return false, fmt.Errorf("pool(%s) = %s is decommissioned, please remove from server command line", humanize.Ordinal(pi.position+1), k) + logger.LogIf(GlobalContext, "decommission", fmt.Errorf("pool(%s) = %s is decommissioned, please remove from server command line", humanize.Ordinal(pi.position+1), k)) } } @@ -469,7 +467,7 @@ func (p poolMeta) save(ctx context.Context, pools []*erasureSets) error { for i, eset := range pools { if err = saveConfig(ctx, eset, poolMetaName, buf); err != nil { if !errors.Is(err, context.Canceled) { - logger.LogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err)) + storageLogIf(ctx, fmt.Errorf("saving pool.bin for pool index %d failed with: %v", i, err)) } return err } @@ -488,14 +486,11 @@ const ( // in 'pool.bin', this is eventually used for decommissioning the pool. func (z *erasureServerPools) Init(ctx context.Context) error { // Load rebalance metadata if present - err := z.loadRebalanceMeta(ctx) - if err != nil { - return fmt.Errorf("failed to load rebalance data: %w", err) + if err := z.loadRebalanceMeta(ctx); err == nil { + // Start rebalance routine if we can reload rebalance metadata. + z.StartRebalance() } - // Start rebalance routine - z.StartRebalance() - meta := poolMeta{} if err := meta.load(ctx, z.serverPools[0], z.serverPools); err != nil { return err @@ -533,6 +528,10 @@ func (z *erasureServerPools) Init(ctx context.Context) error { if len(poolIndices) > 0 && globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal { go func() { + // Resume decommissioning of pools, but wait 3 minutes for cluster to stabilize. + if err := sleepContext(ctx, 3*time.Minute); err != nil { + return + } r := rand.New(rand.NewSource(time.Now().UnixNano())) for { if err := z.Decommission(ctx, poolIndices...); err != nil { @@ -544,11 +543,11 @@ func (z *erasureServerPools) Init(ctx context.Context) error { return } if configRetriableErrors(err) { - logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err)) + decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pools %v: %w: retrying..", pools, err)) time.Sleep(time.Second + time.Duration(r.Float64()*float64(5*time.Second))) continue } - logger.LogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err)) + decomLogIf(ctx, fmt.Errorf("Unable to resume decommission of pool %v: %w", pools, err)) return } } @@ -569,6 +568,7 @@ func newPoolMeta(z *erasureServerPools, prevMeta poolMeta) poolMeta { for _, currentPool := range prevMeta.Pools { // Preserve any current pool status. if currentPool.CmdLine == pool.endpoints.CmdLine { + currentPool.ID = idx newMeta.Pools = append(newMeta.Pools, currentPool) skip = true break @@ -602,7 +602,7 @@ func (z *erasureServerPools) IsDecommissionRunning() bool { return false } -func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket string, gr *GetObjectReader) (err error) { +func (z *erasureServerPools) decommissionObject(ctx context.Context, idx int, bucket string, gr *GetObjectReader) (err error) { objInfo := gr.ObjInfo defer func() { @@ -617,9 +617,11 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri if objInfo.isMultipart() { res, err := z.NewMultipartUpload(ctx, bucket, objInfo.Name, ObjectOptions{ - VersionID: objInfo.VersionID, - UserDefined: objInfo.UserDefined, - NoAuditLog: true, + VersionID: objInfo.VersionID, + UserDefined: objInfo.UserDefined, + NoAuditLog: true, + SrcPoolIdx: idx, + DataMovement: true, }) if err != nil { return fmt.Errorf("decommissionObject: NewMultipartUpload() %w", err) @@ -654,6 +656,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri } } _, err = z.CompleteMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, parts, ObjectOptions{ + SrcPoolIdx: idx, DataMovement: true, MTime: objInfo.ModTime, NoAuditLog: true, @@ -675,6 +678,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri NewPutObjReader(hr), ObjectOptions{ DataMovement: true, + SrcPoolIdx: idx, VersionID: objInfo.VersionID, MTime: objInfo.ModTime, UserDefined: objInfo.UserDefined, @@ -743,7 +747,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool const envDecomWorkers = "_MINIO_DECOMMISSION_WORKERS" workerSize, err := env.GetInt(envDecomWorkers, len(pool.sets)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) + decomLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) workerSize = len(pool.sets) } @@ -756,18 +760,35 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool return err } - vc, _ := globalBucketVersioningSys.Get(bi.Name) + var vc *versioning.Versioning + var lc *lifecycle.Lifecycle + var lr objectlock.Retention + var rcfg *replication.Config + if bi.Name != minioMetaBucket { + vc, err = globalBucketVersioningSys.Get(bi.Name) + if err != nil { + return err + } - // Check if the current bucket has a configured lifecycle policy - lc, _ := globalLifecycleSys.Get(bi.Name) + // Check if the current bucket has a configured lifecycle policy + lc, err = globalLifecycleSys.Get(bi.Name) + if err != nil && !errors.Is(err, BucketLifecycleNotFound{Bucket: bi.Name}) { + return err + } + + // Check if bucket is object locked. + lr, err = globalBucketObjectLockSys.Get(bi.Name) + if err != nil { + return err + } - // Check if bucket is object locked. - lr, _ := globalBucketObjectLockSys.Get(bi.Name) - rcfg, _ := getReplicationConfig(ctx, bi.Name) + rcfg, err = getReplicationConfig(ctx, bi.Name) + if err != nil { + return err + } + } for setIdx, set := range pool.sets { - set := set - filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { return false @@ -811,7 +832,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool if filterLifecycle(bi.Name, version.Name, version) { expired++ decommissioned++ - stopFn(errors.New("ILM expired object/version will be skipped")) + stopFn(version.Size, errors.New("ILM expired object/version will be skipped")) continue } @@ -819,9 +840,9 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool // to decommission, just skip it, this also includes // any other versions that have already expired. remainingVersions := len(fivs.Versions) - expired - if version.Deleted && remainingVersions == 1 { + if version.Deleted && remainingVersions == 1 && rcfg == nil { decommissioned++ - stopFn(errors.New("DELETE marked object with no other non-current versions will be skipped")) + stopFn(version.Size, errors.New("DELETE marked object with no other non-current versions will be skipped")) continue } @@ -830,6 +851,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool versionID = nullVersionID } + var failure, ignore bool if version.Deleted { _, err := z.DeleteObject(ctx, bi.Name, @@ -842,19 +864,24 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool VersionID: versionID, MTime: version.ModTime, DeleteReplication: version.ReplicationState, + SrcPoolIdx: idx, + DataMovement: true, DeleteMarker: true, // make sure we create a delete marker SkipDecommissioned: true, // make sure we skip the decommissioned pool NoAuditLog: true, }) - var failure bool if err != nil { - if isErrObjectNotFound(err) || isErrVersionNotFound(err) { - err = nil + // This can happen when rebalance stop races with ongoing rebalance workers. + // These rebalance failures can be ignored. + if isErrObjectNotFound(err) || isErrVersionNotFound(err) || isDataMovementOverWriteErr(err) { + ignore = true + stopFn(0, nil) + continue } } - stopFn(err) + stopFn(version.Size, err) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) failure = true } z.poolMetaMutex.Lock() @@ -868,22 +895,26 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool continue } - var failure, ignore bool // gr.Close() is ensured by decommissionObject(). - for try := 0; try < 3; try++ { + for range 3 { if version.IsRemote() { if err := z.DecomTieredObject(ctx, bi.Name, version.Name, version, ObjectOptions{ - VersionID: versionID, - MTime: version.ModTime, - UserDefined: version.Metadata, + VersionID: versionID, + MTime: version.ModTime, + UserDefined: version.Metadata, + SrcPoolIdx: idx, + DataMovement: true, }); err != nil { - stopFn(err) - failure = true - logger.LogIf(ctx, err) - continue + if isErrObjectNotFound(err) || isErrVersionNotFound(err) || isDataMovementOverWriteErr(err) { + ignore = true + stopFn(0, nil) + } + } + if !ignore { + stopFn(version.Size, err) + failure = err != nil + decomLogIf(ctx, err) } - stopFn(nil) - failure = false break } gr, err := set.GetObjectNInfo(ctx, @@ -900,31 +931,36 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool if isErrObjectNotFound(err) || isErrVersionNotFound(err) { // object deleted by the application, nothing to do here we move on. ignore = true - stopFn(nil) + stopFn(0, nil) break } if err != nil && !ignore { // if usage-cache.bin is not readable log and ignore it. if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) { ignore = true - stopFn(err) - logger.LogIf(ctx, err) + stopFn(version.Size, err) + decomLogIf(ctx, err) break } } if err != nil { failure = true - logger.LogIf(ctx, err) - stopFn(err) + decomLogIf(ctx, err) + stopFn(version.Size, err) continue } - if err = z.decommissionObject(ctx, bi.Name, gr); err != nil { - stopFn(err) + if err = z.decommissionObject(ctx, idx, bi.Name, gr); err != nil { + if isErrObjectNotFound(err) || isErrVersionNotFound(err) || isDataMovementOverWriteErr(err) { + ignore = true + stopFn(0, nil) + break + } + stopFn(version.Size, err) failure = true - logger.LogIf(ctx, err) + decomLogIf(ctx, err) continue } - stopFn(nil) + stopFn(version.Size, nil) failure = false break } @@ -952,16 +988,16 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool NoAuditLog: true, }, ) - stopFn(err) + stopFn(0, err) auditLogDecom(ctx, "DecomDeleteObject", bi.Name, entry.name, "", err) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) } } z.poolMetaMutex.Lock() z.poolMeta.TrackCurrentBucketObject(idx, bi.Name, entry.name) ok, err := z.poolMeta.updateAfter(ctx, idx, z.serverPools, 30*time.Second) - logger.LogIf(ctx, err) + decomLogIf(ctx, err) if ok { globalNotificationSys.ReloadPoolMeta(ctx) } @@ -973,23 +1009,19 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool defer wk.Give() // We will perpetually retry listing if it fails, since we cannot // possibly give up in this matter - for { - if contextCanceled(ctx) { - break - } - + for !contextCanceled(ctx) { err := set.listObjectsToDecommission(ctx, bi, func(entry metaCacheEntry) { wk.Take() go decommissionEntry(entry) }, ) - if err == nil || errors.Is(err, context.Canceled) { + if err == nil || errors.Is(err, context.Canceled) || errors.Is(err, errVolumeNotFound) { break } setN := humanize.Ordinal(setIdx + 1) retryDur := time.Duration(rand.Float64() * float64(5*time.Second)) - logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN) + decomLogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v, retrying in %v", setN, err, retryDur), "decom-listing-failed"+setN) time.Sleep(retryDur) } }(setIdx) @@ -1013,7 +1045,7 @@ const ( decomMetricDecommissionRemoveObject ) -func decomTrace(d decomMetric, poolIdx int, startTime time.Time, duration time.Duration, path string, err error) madmin.TraceInfo { +func decomTrace(d decomMetric, poolIdx int, startTime time.Time, duration time.Duration, path string, err error, sz int64) madmin.TraceInfo { var errStr string if err != nil { errStr = err.Error() @@ -1026,15 +1058,16 @@ func decomTrace(d decomMetric, poolIdx int, startTime time.Time, duration time.D Duration: duration, Path: path, Error: errStr, + Bytes: sz, } } -func (m *decomMetrics) log(d decomMetric, poolIdx int, paths ...string) func(err error) { +func (m *decomMetrics) log(d decomMetric, poolIdx int, paths ...string) func(z int64, err error) { startTime := time.Now() - return func(err error) { + return func(sz int64, err error) { duration := time.Since(startTime) if globalTrace.NumSubscribers(madmin.TraceDecommission) > 0 { - globalTrace.Publish(decomTrace(d, poolIdx, startTime, duration, strings.Join(paths, " "), err)) + globalTrace.Publish(decomTrace(d, poolIdx, startTime, duration, strings.Join(paths, " "), err, sz)) } } } @@ -1055,8 +1088,10 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i } z.poolMetaMutex.Lock() - z.poolMeta.BucketDone(idx, bucket) // remove from pendingBuckets and persist. - z.poolMeta.save(ctx, z.serverPools) + if z.poolMeta.BucketDone(idx, bucket) { + // remove from pendingBuckets and persist. + decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) + } z.poolMetaMutex.Unlock() continue } @@ -1065,14 +1100,15 @@ func (z *erasureServerPools) decommissionInBackground(ctx context.Context, idx i } stopFn := globalDecommissionMetrics.log(decomMetricDecommissionBucket, idx, bucket.Name) if err := z.decommissionPool(ctx, idx, pool, bucket); err != nil { - stopFn(err) + stopFn(0, err) return err } - stopFn(nil) + stopFn(0, nil) z.poolMetaMutex.Lock() - z.poolMeta.BucketDone(idx, bucket) - z.poolMeta.save(ctx, z.serverPools) + if z.poolMeta.BucketDone(idx, bucket) { + decomLogIf(ctx, z.poolMeta.save(ctx, z.serverPools)) + } z.poolMetaMutex.Unlock() } return nil @@ -1087,14 +1123,33 @@ func (z *erasureServerPools) checkAfterDecom(ctx context.Context, idx int) error pool := z.serverPools[idx] for _, set := range pool.sets { for _, bi := range buckets { - vc, _ := globalBucketVersioningSys.Get(bi.Name) + var vc *versioning.Versioning + var lc *lifecycle.Lifecycle + var lr objectlock.Retention + var rcfg *replication.Config + if bi.Name != minioMetaBucket { + vc, err = globalBucketVersioningSys.Get(bi.Name) + if err != nil { + return err + } + + // Check if the current bucket has a configured lifecycle policy + lc, err = globalLifecycleSys.Get(bi.Name) + if err != nil && !errors.Is(err, BucketLifecycleNotFound{Bucket: bi.Name}) { + return err + } - // Check if the current bucket has a configured lifecycle policy - lc, _ := globalLifecycleSys.Get(bi.Name) + // Check if bucket is object locked. + lr, err = globalBucketObjectLockSys.Get(bi.Name) + if err != nil { + return err + } - // Check if bucket is object locked. - lr, _ := globalBucketObjectLockSys.Get(bi.Name) - rcfg, _ := getReplicationConfig(ctx, bi.Name) + rcfg, err = getReplicationConfig(ctx, bi.Name) + if err != nil { + return err + } + } filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { @@ -1117,36 +1172,38 @@ func (z *erasureServerPools) checkAfterDecom(ctx context.Context, idx int) error } var versionsFound int - err := set.listObjectsToDecommission(ctx, bi, func(entry metaCacheEntry) { + if err = set.listObjectsToDecommission(ctx, bi, func(entry metaCacheEntry) { if !entry.isObject() { return } + // `.usage-cache.bin` still exists, must be not readable ignore it. + if bi.Name == minioMetaBucket && strings.Contains(entry.name, dataUsageCacheName) { + // skipping bucket usage cache name, as its autogenerated. + return + } + fivs, err := entry.fileInfoVersions(bi.Name) if err != nil { return } - // We need a reversed order for decommissioning, - // to create the appropriate stack. - versionsSorter(fivs.Versions).reverse() + var ignored int for _, version := range fivs.Versions { // Apply lifecycle rules on the objects that are expired. if filterLifecycle(bi.Name, version.Name, version) { + ignored++ continue } - - // `.usage-cache.bin` still exists, must be not readable ignore it. - if bi.Name == minioMetaBucket && strings.Contains(version.Name, dataUsageCacheName) { - // skipping bucket usage cache name, as its autogenerated. + if version.Deleted { + ignored++ continue } - - versionsFound++ } - }) - if err != nil { + + versionsFound += len(fivs.Versions) - ignored + }); err != nil { return err } @@ -1169,8 +1226,8 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in dctx = logger.SetReqInfo(dctx, &logger.ReqInfo{}) if err := z.decommissionInBackground(dctx, idx); err != nil { - logger.LogIf(GlobalContext, err) - logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) + decomLogIf(GlobalContext, err) + decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) return } @@ -1180,20 +1237,20 @@ func (z *erasureServerPools) doDecommissionInRoutine(ctx context.Context, idx in z.poolMetaMutex.Unlock() if !failed { - logger.Event(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine) + decomLogEvent(dctx, "Decommissioning complete for pool '%s', verifying for any pending objects", poolCmdLine) err := z.checkAfterDecom(dctx, idx) if err != nil { - logger.LogIf(ctx, err) + decomLogIf(ctx, err) failed = true } } if failed { // Decommission failed indicate as such. - logger.LogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) + decomLogIf(GlobalContext, z.DecommissionFailed(dctx, idx)) } else { // Complete the decommission.. - logger.LogIf(GlobalContext, z.CompleteDecommission(dctx, idx)) + decomLogIf(GlobalContext, z.CompleteDecommission(dctx, idx)) } } @@ -1280,11 +1337,7 @@ func (z *erasureServerPools) Status(ctx context.Context, idx int) (PoolStatus, e poolInfo := z.poolMeta.Pools[idx].Clone() if poolInfo.Decommission != nil { poolInfo.Decommission.TotalSize = pi.Total - if poolInfo.Decommission.Failed || poolInfo.Decommission.Canceled { - poolInfo.Decommission.CurrentSize = pi.Free - } else { - poolInfo.Decommission.CurrentSize = poolInfo.Decommission.StartSize + poolInfo.Decommission.BytesDone - } + poolInfo.Decommission.CurrentSize = pi.Free } else { poolInfo.Decommission = &PoolDecommissionInfo{ TotalSize: pi.Total, @@ -1403,16 +1456,18 @@ func (z *erasureServerPools) getBucketsToDecommission(ctx context.Context) ([]de // Buckets data are dispersed in multiple zones/sets, make // sure to decommission the necessary metadata. - decomBuckets = append(decomBuckets, decomBucketInfo{ - Name: minioMetaBucket, - Prefix: minioConfigPrefix, - }) - decomBuckets = append(decomBuckets, decomBucketInfo{ - Name: minioMetaBucket, - Prefix: bucketMetaPrefix, - }) + decomMetaBuckets := []decomBucketInfo{ + { + Name: minioMetaBucket, + Prefix: minioConfigPrefix, + }, + { + Name: minioMetaBucket, + Prefix: bucketMetaPrefix, + }, + } - return decomBuckets, nil + return append(decomMetaBuckets, decomBuckets...), nil } func (z *erasureServerPools) StartDecommission(ctx context.Context, indices ...int) (err error) { diff --git a/cmd/erasure-server-pool-decom_gen.go b/cmd/erasure-server-pool-decom_gen.go index 7fed2ce452d94..a4c6b56fd4866 100644 --- a/cmd/erasure-server-pool-decom_gen.go +++ b/cmd/erasure-server-pool-decom_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/erasure-server-pool-decom_gen_test.go b/cmd/erasure-server-pool-decom_gen_test.go index 56d4aedbbc02d..47a08be1981f2 100644 --- a/cmd/erasure-server-pool-decom_gen_test.go +++ b/cmd/erasure-server-pool-decom_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/erasure-server-pool-decom_test.go b/cmd/erasure-server-pool-decom_test.go index ee438aeecc0f2..7f4c8ef178f41 100644 --- a/cmd/erasure-server-pool-decom_test.go +++ b/cmd/erasure-server-pool-decom_test.go @@ -32,9 +32,9 @@ func prepareErasurePools() (ObjectLayer, []string, error) { pools := mustGetPoolEndpoints(0, fsDirs[:16]...) pools = append(pools, mustGetPoolEndpoints(1, fsDirs[16:]...)...) - // Everything is fine, should return nil - objLayer, err := newErasureServerPools(context.Background(), pools) + objLayer, _, err := initObjectLayer(context.Background(), pools) if err != nil { + removeRoots(fsDirs) return nil, nil, err } return objLayer, fsDirs, nil @@ -134,7 +134,7 @@ func TestPoolMetaValidate(t *testing.T) { meta: nmeta1, pools: pools, name: "Invalid-Completed-Pool-Not-Removed", - expectedErr: true, + expectedErr: false, expectedUpdate: false, }, { @@ -176,7 +176,6 @@ func TestPoolMetaValidate(t *testing.T) { t.Parallel() for _, testCase := range testCases { - testCase := testCase t.Run(testCase.name, func(t *testing.T) { update, err := testCase.meta.validate(testCase.pools) if testCase.expectedErr { diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index 17c1d05fdfa11..c037f0ddc0807 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -32,11 +32,15 @@ import ( "github.com/dustin/go-humanize" "github.com/lithammer/shortuuid/v4" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/bucket/lifecycle" + objectlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" + "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/hash" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/workers" ) //go:generate msgp -file $GOFILE -unexported @@ -94,34 +98,24 @@ type rebalanceInfo struct { // rebalanceMeta contains information pertaining to an ongoing rebalance operation. type rebalanceMeta struct { - cancel context.CancelFunc `msg:"-"` // to be invoked on rebalance-stop - lastRefreshedAt time.Time `msg:"-"` - StoppedAt time.Time `msg:"stopTs"` // Time when rebalance-stop was issued. - ID string `msg:"id"` // ID of the ongoing rebalance operation - PercentFreeGoal float64 `msg:"pf"` // Computed from total free space and capacity at the start of rebalance - PoolStats []*rebalanceStats `msg:"rss"` // Per-pool rebalance stats keyed by pool index + StoppedAt time.Time `msg:"stopTs"` // Time when rebalance-stop was issued. + ID string `msg:"id"` // ID of the ongoing rebalance operation + PercentFreeGoal float64 `msg:"pf"` // Computed from total free space and capacity at the start of rebalance + PoolStats []*rebalanceStats `msg:"rss"` // Per-pool rebalance stats keyed by pool index } var errRebalanceNotStarted = errors.New("rebalance not started") func (z *erasureServerPools) loadRebalanceMeta(ctx context.Context) error { r := &rebalanceMeta{} - err := r.load(ctx, z.serverPools[0]) - if err != nil { - if errors.Is(err, errConfigNotFound) { - return nil - } - return err - } - - z.rebalMu.Lock() - if len(r.PoolStats) == len(z.serverPools) { + if err := r.load(ctx, z.serverPools[0]); err == nil { + z.rebalMu.Lock() z.rebalMeta = r - } else { z.updateRebalanceStats(ctx) + z.rebalMu.Unlock() + } else if !errors.Is(err, errConfigNotFound) { + rebalanceLogIf(ctx, fmt.Errorf("failed to load rebalance metadata, continue to restart rebalance as needed: %w", err)) } - z.rebalMu.Unlock() - return nil } @@ -143,25 +137,17 @@ func (z *erasureServerPools) updateRebalanceStats(ctx context.Context) error { } } if ok { - lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) - lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) - return err - } - defer lock.Unlock(lkCtx) - - ctx = lkCtx.Context() - - noLockOpts := ObjectOptions{NoLock: true} - return z.rebalMeta.saveWithOpts(ctx, z.serverPools[0], noLockOpts) + return z.rebalMeta.save(ctx, z.serverPools[0]) } return nil } func (z *erasureServerPools) findIndex(index int) int { - for i := 0; i < len(z.rebalMeta.PoolStats); i++ { + if z.rebalMeta == nil { + return 0 + } + for i := range len(z.rebalMeta.PoolStats) { if i == index { return index } @@ -273,6 +259,10 @@ func (z *erasureServerPools) bucketRebalanceDone(bucket string, poolIdx int) { z.rebalMu.Lock() defer z.rebalMu.Unlock() + if z.rebalMeta == nil { + return + } + ps := z.rebalMeta.PoolStats[poolIdx] if ps == nil { return @@ -321,12 +311,14 @@ func (r *rebalanceMeta) loadWithOpts(ctx context.Context, store objectIO, opts O return err } - r.lastRefreshedAt = time.Now() - return nil } func (r *rebalanceMeta) saveWithOpts(ctx context.Context, store objectIO, opts ObjectOptions) error { + if r == nil { + return nil + } + data := make([]byte, 4, r.Msgsize()+4) // Initialize the header. @@ -345,12 +337,20 @@ func (r *rebalanceMeta) save(ctx context.Context, store objectIO) error { return r.saveWithOpts(ctx, store, ObjectOptions{}) } -func (z *erasureServerPools) IsRebalanceStarted() bool { +func (z *erasureServerPools) IsRebalanceStarted(ctx context.Context) bool { + _ = z.loadRebalanceMeta(ctx) z.rebalMu.RLock() defer z.rebalMu.RUnlock() - if r := z.rebalMeta; r != nil { - if r.StoppedAt.IsZero() { + r := z.rebalMeta + if r == nil { + return false + } + if !r.StoppedAt.IsZero() { + return false + } + for _, ps := range r.PoolStats { + if ps.Participating && ps.Info.Status != rebalCompleted { return true } } @@ -365,14 +365,14 @@ func (z *erasureServerPools) IsPoolRebalancing(poolIndex int) bool { if !r.StoppedAt.IsZero() { return false } - ps := z.rebalMeta.PoolStats[poolIndex] + ps := r.PoolStats[poolIndex] return ps.Participating && ps.Info.Status == rebalStarted } return false } func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) (err error) { - doneCh := make(chan struct{}) + doneCh := make(chan error, 1) defer xioutil.SafeClose(doneCh) // Save rebalance.bin periodically. @@ -387,57 +387,66 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) timer := time.NewTimer(randSleepFor()) defer timer.Stop() - var rebalDone bool - var traceMsg string + + var ( + quit bool + traceMsg string + notify bool // if status changed, notify nodes to reload rebalance metadata + ) for { select { - case <-doneCh: - // rebalance completed for poolIdx + case rebalErr := <-doneCh: + quit = true + notify = true now := time.Now() - z.rebalMu.Lock() - z.rebalMeta.PoolStats[poolIdx].Info.Status = rebalCompleted - z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now - z.rebalMu.Unlock() - - rebalDone = true - traceMsg = fmt.Sprintf("completed at %s", now) - - case <-ctx.Done(): + var status rebalStatus + + switch { + case errors.Is(rebalErr, context.Canceled): + status = rebalStopped + traceMsg = fmt.Sprintf("stopped at %s", now) + case rebalErr == nil: + status = rebalCompleted + traceMsg = fmt.Sprintf("completed at %s", now) + default: + status = rebalFailed + traceMsg = fmt.Sprintf("stopped at %s with err: %v", now, rebalErr) + } - // rebalance stopped for poolIdx - now := time.Now() z.rebalMu.Lock() - z.rebalMeta.PoolStats[poolIdx].Info.Status = rebalStopped + z.rebalMeta.PoolStats[poolIdx].Info.Status = status z.rebalMeta.PoolStats[poolIdx].Info.EndTime = now - z.rebalMeta.cancel = nil // remove the already used context.CancelFunc z.rebalMu.Unlock() - rebalDone = true - traceMsg = fmt.Sprintf("stopped at %s", now) - case <-timer.C: + notify = false traceMsg = fmt.Sprintf("saved at %s", time.Now()) } stopFn := globalRebalanceMetrics.log(rebalanceMetricSaveMetadata, poolIdx, traceMsg) - err := z.saveRebalanceStats(ctx, poolIdx, rebalSaveStats) - stopFn(err) - logger.LogIf(ctx, err) - timer.Reset(randSleepFor()) + err := z.saveRebalanceStats(GlobalContext, poolIdx, rebalSaveStats) + stopFn(0, err) + if err == nil && notify { + globalNotificationSys.LoadRebalanceMeta(GlobalContext, false) + } + rebalanceLogIf(GlobalContext, err) - if rebalDone { + if quit { return } + + timer.Reset(randSleepFor()) } }() - logger.Event(ctx, "Pool %d rebalancing is started", poolIdx+1) + rebalanceLogEvent(ctx, "Pool %d rebalancing is started", poolIdx+1) for { select { case <-ctx.Done(): - return + doneCh <- ctx.Err() + return err default: } @@ -448,17 +457,20 @@ func (z *erasureServerPools) rebalanceBuckets(ctx context.Context, poolIdx int) } stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceBucket, poolIdx, bucket) - err = z.rebalanceBucket(ctx, bucket, poolIdx) - if err != nil { - stopFn(err) - logger.LogIf(ctx, err) - return + if err = z.rebalanceBucket(ctx, bucket, poolIdx); err != nil { + stopFn(0, err) + if errors.Is(err, errServerNotInitialized) || errors.Is(err, errBucketMetadataNotInitialized) { + continue + } + rebalanceLogIf(GlobalContext, err) + doneCh <- err + return err } - stopFn(nil) + stopFn(0, nil) z.bucketRebalanceDone(bucket, poolIdx) } - logger.Event(ctx, "Pool %d rebalancing is done", poolIdx+1) + rebalanceLogEvent(GlobalContext, "Pool %d rebalancing is done", poolIdx+1) return err } @@ -521,21 +533,43 @@ func (set *erasureObjects) listObjectsToRebalance(ctx context.Context, bucketNam } // rebalanceBucket rebalances objects under bucket in poolIdx pool -func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, poolIdx int) error { +func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, poolIdx int) (err error) { ctx = logger.SetReqInfo(ctx, &logger.ReqInfo{}) - vc, _ := globalBucketVersioningSys.Get(bucket) - // Check if the current bucket has a configured lifecycle policy - lc, _ := globalLifecycleSys.Get(bucket) - // Check if bucket is object locked. - lr, _ := globalBucketObjectLockSys.Get(bucket) - rcfg, _ := getReplicationConfig(ctx, bucket) + + var vc *versioning.Versioning + var lc *lifecycle.Lifecycle + var lr objectlock.Retention + var rcfg *replication.Config + if bucket != minioMetaBucket { + vc, err = globalBucketVersioningSys.Get(bucket) + if err != nil { + return err + } + + // Check if the current bucket has a configured lifecycle policy + lc, err = globalLifecycleSys.Get(bucket) + if err != nil && !errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { + return err + } + + // Check if bucket is object locked. + lr, err = globalBucketObjectLockSys.Get(bucket) + if err != nil { + return err + } + + rcfg, err = getReplicationConfig(ctx, bucket) + if err != nil { + return err + } + } pool := z.serverPools[poolIdx] const envRebalanceWorkers = "_MINIO_REBALANCE_WORKERS" workerSize, err := env.GetInt(envRebalanceWorkers, len(pool.sets)) if err != nil { - logger.LogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) + rebalanceLogIf(ctx, fmt.Errorf("invalid workers value err: %v, defaulting to %d", err, len(pool.sets))) workerSize = len(pool.sets) } @@ -549,8 +583,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, } for setIdx, set := range pool.sets { - set := set - filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { return false @@ -563,7 +595,6 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_Rebal) return true } - return false } @@ -590,14 +621,18 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, var rebalanced, expired int for _, version := range fivs.Versions { + stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceObject, poolIdx, bucket, version.Name, version.VersionID) + // Skip transitioned objects for now. TBD if version.IsRemote() { + stopFn(version.Size, errors.New("ILM Tiered version will be skipped for now")) continue } // Apply lifecycle rules on the objects that are expired. if filterLifecycle(bucket, version.Name, version) { expired++ + stopFn(version.Size, errors.New("ILM expired object/version will be skipped")) continue } @@ -607,6 +642,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, remainingVersions := len(fivs.Versions) - expired if version.Deleted && remainingVersions == 1 { rebalanced++ + stopFn(version.Size, errors.New("DELETE marked object with no other non-current versions will be skipped")) continue } @@ -615,6 +651,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, versionID = nullVersionID } + var failure, ignore bool if version.Deleted { _, err := z.DeleteObject(ctx, bucket, @@ -624,16 +661,26 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, VersionID: versionID, MTime: version.ModTime, DeleteReplication: version.ReplicationState, + SrcPoolIdx: poolIdx, + DataMovement: true, DeleteMarker: true, // make sure we create a delete marker SkipRebalancing: true, // make sure we skip the decommissioned pool NoAuditLog: true, }) - var failure bool - if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - logger.LogIf(ctx, err) - failure = true + // This can happen when rebalance stop races with ongoing rebalance workers. + // These rebalance failures can be ignored. + if err != nil { + // This can happen when rebalance stop races with ongoing rebalance workers. + // These rebalance failures can be ignored. + if isErrObjectNotFound(err) || isErrVersionNotFound(err) || isDataMovementOverWriteErr(err) { + ignore = true + stopFn(0, nil) + continue + } } - + stopFn(version.Size, err) + rebalanceLogIf(ctx, err) + failure = err != nil if !failure { z.updatePoolStats(poolIdx, bucket, version) rebalanced++ @@ -642,10 +689,8 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, continue } - var failure, ignore bool - for try := 0; try < 3; try++ { + for range 3 { // GetObjectReader.Close is called by rebalanceObject - stopFn := globalRebalanceMetrics.log(rebalanceMetricRebalanceObject, poolIdx, bucket, version.Name, version.VersionID) gr, err := set.GetObjectNInfo(ctx, bucket, encodeDirObject(version.Name), @@ -660,24 +705,31 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, if isErrObjectNotFound(err) || isErrVersionNotFound(err) { // object deleted by the application, nothing to do here we move on. ignore = true - stopFn(nil) + stopFn(0, nil) break } if err != nil { failure = true - logger.LogIf(ctx, err) - stopFn(err) + rebalanceLogIf(ctx, err) + stopFn(0, err) continue } - if err = z.rebalanceObject(ctx, bucket, gr); err != nil { + if err = z.rebalanceObject(ctx, poolIdx, bucket, gr); err != nil { + // This can happen when rebalance stop races with ongoing rebalance workers. + // These rebalance failures can be ignored. + if isErrObjectNotFound(err) || isErrVersionNotFound(err) || isDataMovementOverWriteErr(err) { + ignore = true + stopFn(0, nil) + break + } failure = true - logger.LogIf(ctx, err) - stopFn(err) + rebalanceLogIf(ctx, err) + stopFn(version.Size, err) continue } - stopFn(nil) + stopFn(version.Size, nil) failure = false break } @@ -703,10 +755,10 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, NoAuditLog: true, }, ) - stopFn(err) + stopFn(0, err) auditLogRebalance(ctx, "Rebalance:DeleteObject", bucket, entry.name, "", err) if err != nil { - logger.LogIf(ctx, err) + rebalanceLogIf(ctx, err) } } } @@ -724,7 +776,7 @@ func (z *erasureServerPools) rebalanceBucket(ctx context.Context, bucket string, return } setN := humanize.Ordinal(setIdx + 1) - logger.LogOnceIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN) + rebalanceLogIf(ctx, fmt.Errorf("listing objects from %s set failed with %v", setN, err), "rebalance-listing-failed"+setN) }(setIdx) } @@ -743,7 +795,7 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName) lkCtx, err := lock.GetLock(ctx, globalOperationTimeout) if err != nil { - logger.LogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) + rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err)) return err } defer lock.Unlock(lkCtx) @@ -751,18 +803,27 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int ctx = lkCtx.Context() noLockOpts := ObjectOptions{NoLock: true} r := &rebalanceMeta{} - if err := r.loadWithOpts(ctx, z.serverPools[0], noLockOpts); err != nil { + err = r.loadWithOpts(ctx, z.serverPools[0], noLockOpts) + if err != nil && !errors.Is(err, errConfigNotFound) { return err } z.rebalMu.Lock() defer z.rebalMu.Unlock() + // if not found, we store the memory metadata back + // when rebalance status changed, will notify all nodes update status to memory, we can treat the memory metadata is the latest status + if errors.Is(err, errConfigNotFound) { + r = z.rebalMeta + } + switch opts { case rebalSaveStoppedAt: r.StoppedAt = time.Now() case rebalSaveStats: - r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx] + if z.rebalMeta != nil { + r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx] + } } z.rebalMeta = r @@ -784,7 +845,7 @@ func auditLogRebalance(ctx context.Context, apiName, bucket, object, versionID s }) } -func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, gr *GetObjectReader) (err error) { +func (z *erasureServerPools) rebalanceObject(ctx context.Context, poolIdx int, bucket string, gr *GetObjectReader) (err error) { oi := gr.ObjInfo defer func() { @@ -799,9 +860,11 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, if oi.isMultipart() { res, err := z.NewMultipartUpload(ctx, bucket, oi.Name, ObjectOptions{ - VersionID: oi.VersionID, - UserDefined: oi.UserDefined, - NoAuditLog: true, + VersionID: oi.VersionID, + UserDefined: oi.UserDefined, + NoAuditLog: true, + DataMovement: true, + SrcPoolIdx: poolIdx, }) if err != nil { return fmt.Errorf("rebalanceObject: NewMultipartUpload() %w", err) @@ -853,6 +916,7 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, oi.Name, NewPutObjReader(hr), ObjectOptions{ + SrcPoolIdx: poolIdx, DataMovement: true, VersionID: oi.VersionID, MTime: oi.ModTime, @@ -876,7 +940,7 @@ func (z *erasureServerPools) StartRebalance() { return } ctx, cancel := context.WithCancel(GlobalContext) - z.rebalMeta.cancel = cancel // to be used when rebalance-stop is called + z.rebalCancel = cancel // to be used when rebalance-stop is called z.rebalMu.Unlock() z.rebalMu.RLock() @@ -903,7 +967,7 @@ func (z *erasureServerPools) StartRebalance() { go func(idx int) { stopfn := globalRebalanceMetrics.log(rebalanceMetricRebalanceBuckets, idx) err := z.rebalanceBuckets(ctx, idx) - stopfn(err) + stopfn(0, err) }(poolIdx) } } @@ -919,10 +983,9 @@ func (z *erasureServerPools) StopRebalance() error { return nil } - if cancel := r.cancel; cancel != nil { - // cancel != nil only on pool leaders - r.cancel = nil + if cancel := z.rebalCancel; cancel != nil { cancel() + z.rebalCancel = nil } return nil } @@ -943,7 +1006,9 @@ const ( rebalanceMetricSaveMetadata ) -func rebalanceTrace(r rebalanceMetric, poolIdx int, startTime time.Time, duration time.Duration, err error, path string) madmin.TraceInfo { +var errDataMovementSrcDstPoolSame = errors.New("source and destination pool are the same") + +func rebalanceTrace(r rebalanceMetric, poolIdx int, startTime time.Time, duration time.Duration, err error, path string, sz int64) madmin.TraceInfo { var errStr string if err != nil { errStr = err.Error() @@ -956,15 +1021,16 @@ func rebalanceTrace(r rebalanceMetric, poolIdx int, startTime time.Time, duratio Duration: duration, Path: path, Error: errStr, + Bytes: sz, } } -func (p *rebalanceMetrics) log(r rebalanceMetric, poolIdx int, paths ...string) func(err error) { +func (p *rebalanceMetrics) log(r rebalanceMetric, poolIdx int, paths ...string) func(sz int64, err error) { startTime := time.Now() - return func(err error) { + return func(sz int64, err error) { duration := time.Since(startTime) if globalTrace.NumSubscribers(madmin.TraceRebalance) > 0 { - globalTrace.Publish(rebalanceTrace(r, poolIdx, startTime, duration, err, strings.Join(paths, " "))) + globalTrace.Publish(rebalanceTrace(r, poolIdx, startTime, duration, err, strings.Join(paths, " "), sz)) } } } diff --git a/cmd/erasure-server-pool-rebalance_gen.go b/cmd/erasure-server-pool-rebalance_gen.go index a0cbd56f4e251..842adc1103325 100644 --- a/cmd/erasure-server-pool-rebalance_gen.go +++ b/cmd/erasure-server-pool-rebalance_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -614,6 +614,7 @@ func (z *rebalanceMetrics) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z rebalanceMetrics) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -625,6 +626,7 @@ func (z rebalanceMetrics) EncodeMsg(en *msgp.Writer) (err error) { func (z rebalanceMetrics) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } diff --git a/cmd/erasure-server-pool-rebalance_gen_test.go b/cmd/erasure-server-pool-rebalance_gen_test.go index 0f0b8f46684f2..4365ccfa04213 100644 --- a/cmd/erasure-server-pool-rebalance_gen_test.go +++ b/cmd/erasure-server-pool-rebalance_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index d6109686c206a..6b12ec4fced88 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -43,16 +43,19 @@ import ( "github.com/minio/minio/internal/config/storageclass" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/minio/pkg/v3/wildcard" + "github.com/minio/pkg/v3/workers" + "github.com/puzpuzpuz/xsync/v3" ) type erasureServerPools struct { poolMetaMutex sync.RWMutex poolMeta poolMeta - rebalMu sync.RWMutex - rebalMeta *rebalanceMeta + rebalMu sync.RWMutex + rebalMeta *rebalanceMeta + rebalCancel context.CancelFunc deploymentID [16]byte distributionAlgo string @@ -63,6 +66,8 @@ type erasureServerPools struct { decommissionCancelers []context.CancelFunc s3Peer *S3PeerSys + + mpCache *xsync.MapOf[string, MultipartInfo] } func (z *erasureServerPools) SinglePool() bool { @@ -103,8 +108,13 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ // Initialize byte pool once for all sets, bpool size is set to // setCount * setDriveCount with each memory upto blockSizeV2. - globalBytePoolCap = bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2) - globalBytePoolCap.Populate() + buffers := bpool.NewBytePoolCap(n, blockSizeV2, blockSizeV2*2) + if n >= 16384 { + // pre-populate buffers only n >= 16384 which is (32Gi/2Mi) + // for all setups smaller than this avoid pre-alloc. + buffers.Populate() + } + globalBytePoolCap.Store(buffers) var localDrives []StorageAPI local := endpointServerPools.FirstLocal() @@ -163,7 +173,10 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ if !globalIsDistErasure { globalLocalDrivesMu.Lock() - globalLocalDrives = localDrives + globalLocalDrivesMap = make(map[string]StorageAPI, len(localDrives)) + for _, drive := range localDrives { + globalLocalDrivesMap[drive.Endpoint().String()] = drive + } globalLocalDrivesMu.Unlock() } @@ -179,19 +192,12 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ globalLeaderLock = newSharedLock(GlobalContext, z, "leader.lock") }) - // Enable background operations on - // - // - Disk auto healing - // - MRF (most recently failed) healing - // - Background expiration routine for lifecycle policies + // Start self healing after the object initialization + // so various tasks will be useful bootstrapTrace("initAutoHeal", func() { initAutoHeal(GlobalContext, z) }) - bootstrapTrace("initHealMRF", func() { - go globalMRFState.healRoutine(z) - }) - // initialize the object layer. defer setObjectLayer(z) @@ -207,7 +213,7 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ logger.Fatal(err, "Unable to initialize backend") } retry := time.Duration(r.Float64() * float64(5*time.Second)) - logger.LogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) + storageLogIf(ctx, fmt.Errorf("Unable to initialize backend: %w, retrying in %s", err, retry)) time.Sleep(retry) attempt++ continue @@ -215,20 +221,44 @@ func newErasureServerPools(ctx context.Context, endpointServerPools EndpointServ break } + // initialize the incomplete uploads cache + z.mpCache = xsync.NewMapOf[string, MultipartInfo]() + + go z.cleanupStaleMPCache(ctx) + return z, nil } -func (z *erasureServerPools) NewNSLock(bucket string, objects ...string) RWLocker { - poolID := hashKey(z.distributionAlgo, "", len(z.serverPools), z.deploymentID) - if len(objects) >= 1 { - poolID = hashKey(z.distributionAlgo, objects[0], len(z.serverPools), z.deploymentID) +func (z *erasureServerPools) cleanupStaleMPCache(ctx context.Context) { + timer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval()) + defer timer.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + z.mpCache.Range(func(id string, info MultipartInfo) bool { + if time.Since(info.Initiated) >= globalAPIConfig.getStaleUploadsExpiry() { + z.mpCache.Delete(id) + // No need to notify to peers, each node will delete its own cache. + } + return true + }) + + // Reset for the next interval + timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) + } } - return z.serverPools[poolID].NewNSLock(bucket, objects...) +} + +func (z *erasureServerPools) NewNSLock(bucket string, objects ...string) RWLocker { + return z.serverPools[0].NewNSLock(bucket, objects...) } // GetDisksID will return disks by their ID. func (z *erasureServerPools) GetDisksID(ids ...string) []StorageAPI { - idMap := make(map[string]struct{}) + idMap := make(map[string]struct{}, len(ids)) for _, id := range ids { idMap[id] = struct{}{} } @@ -329,7 +359,7 @@ func (p serverPoolsAvailableSpace) TotalAvailable() uint64 { // FilterMaxUsed will filter out any pools that has used percent bigger than max, // unless all have that, in which case all are preserved. -func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { +func (p serverPoolsAvailableSpace) FilterMaxUsed(maxUsed int) { // We aren't modifying p, only entries in it, so we don't need to receive a pointer. if len(p) <= 1 { // Nothing to do. @@ -337,7 +367,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { } var ok bool for _, z := range p { - if z.Available > 0 && z.MaxUsedPct < max { + if z.Available > 0 && z.MaxUsedPct < maxUsed { ok = true break } @@ -350,7 +380,7 @@ func (p serverPoolsAvailableSpace) FilterMaxUsed(max int) { // Remove entries that are above. for i, z := range p { - if z.Available > 0 && z.MaxUsedPct < max { + if z.Available > 0 && z.MaxUsedPct < maxUsed { continue } p[i].Available = 0 @@ -376,7 +406,7 @@ func (z *erasureServerPools) getAvailablePoolIdx(ctx context.Context, bucket, ob } } // Should not happen, but print values just in case. - logger.LogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) + storageLogIf(ctx, fmt.Errorf("reached end of serverPools (total: %v, atTotal: %v, choose: %v)", total, atTotal, choose)) return -1 } @@ -391,7 +421,6 @@ func (z *erasureServerPools) getServerPoolsAvailableSpace(ctx context.Context, b nSets := make([]int, len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index // Skip suspended pools or pools participating in rebalance for any new // I/O. if z.IsSuspended(index) || z.IsPoolRebalancing(index) { @@ -457,7 +486,13 @@ type PoolObjInfo struct { Err error } -func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (PoolObjInfo, error) { +type poolErrs struct { + Index int + Err error +} + +func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (PoolObjInfo, []poolErrs, error) { + var noReadQuorumPools []poolErrs poolObjInfos := make([]PoolObjInfo, len(z.serverPools)) poolOpts := make([]ObjectOptions, len(z.serverPools)) for i := range z.serverPools { @@ -507,8 +542,9 @@ func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bu } if pinfo.Err == nil { // found a pool - return pinfo, nil + return pinfo, z.poolsWithObject(poolObjInfos, opts), nil } + if isErrReadQuorum(pinfo.Err) && !opts.MetadataChg { // read quorum is returned when the object is visibly // present but its unreadable, we simply ask the writes to @@ -517,30 +553,49 @@ func (z *erasureServerPools) getPoolInfoExistingWithOpts(ctx context.Context, bu // with enough disks online but sufficiently inconsistent to // break parity threshold, allow them to be overwritten // or allow new versions to be added. - return pinfo, nil + + return pinfo, z.poolsWithObject(poolObjInfos, opts), nil } defPool = pinfo - if !isErrObjectNotFound(pinfo.Err) { - return pinfo, pinfo.Err + if !isErrObjectNotFound(pinfo.Err) && !isErrVersionNotFound(pinfo.Err) { + return pinfo, noReadQuorumPools, pinfo.Err } // No object exists or its a delete marker, // check objInfo to confirm. if pinfo.ObjInfo.DeleteMarker && pinfo.ObjInfo.Name != "" { - return pinfo, nil + return pinfo, noReadQuorumPools, nil } } if opts.ReplicationRequest && opts.DeleteMarker && defPool.Index >= 0 { // If the request is a delete marker replication request, return a default pool // in cases where the object does not exist. // This is to ensure that the delete marker is replicated to the destination. - return defPool, nil + return defPool, noReadQuorumPools, nil } - return PoolObjInfo{}, toObjectErr(errFileNotFound, bucket, object) + return PoolObjInfo{}, noReadQuorumPools, toObjectErr(errFileNotFound, bucket, object) +} + +// return all pools with read quorum error or no error for an object with given opts.Note that this array is +// returned in the order of recency of object ModTime. +func (z *erasureServerPools) poolsWithObject(pools []PoolObjInfo, opts ObjectOptions) (errs []poolErrs) { + for _, pool := range pools { + if opts.SkipDecommissioned && z.IsSuspended(pool.Index) { + continue + } + // Skip object if it's from pools participating in a rebalance operation. + if opts.SkipRebalancing && z.IsPoolRebalancing(pool.Index) { + continue + } + if isErrReadQuorum(pool.Err) || pool.Err == nil { + errs = append(errs, poolErrs{Err: pool.Err, Index: pool.Index}) + } + } + return errs } func (z *erasureServerPools) getPoolIdxExistingWithOpts(ctx context.Context, bucket, object string, opts ObjectOptions) (idx int, err error) { - pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, opts) + pinfo, _, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, opts) if err != nil { return -1, err } @@ -580,15 +635,18 @@ func (z *erasureServerPools) getPoolIdxNoLock(ctx context.Context, bucket, objec // if none are found falls back to most available space pool, this function is // designed to be only used by PutObject, CopyObject (newObject creation) and NewMultipartUpload. func (z *erasureServerPools) getPoolIdx(ctx context.Context, bucket, object string, size int64) (idx int, err error) { - idx, err = z.getPoolIdxExistingWithOpts(ctx, bucket, object, ObjectOptions{ + pinfo, _, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, ObjectOptions{ SkipDecommissioned: true, SkipRebalancing: true, }) + if err != nil && !isErrObjectNotFound(err) { - return idx, err + return -1, err } - if isErrObjectNotFound(err) { + idx = pinfo.Index + if isErrObjectNotFound(err) || pinfo.Err == nil { + // will generate a temp object idx = z.getAvailablePoolIdx(ctx, bucket, object, size) if idx < 0 { return -1, toObjectErr(errDiskFull) @@ -602,7 +660,6 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index g.Go(func() error { return z.serverPools[index].Shutdown(ctx) }, index) @@ -610,13 +667,22 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error { for _, err := range g.Wait() { if err != nil { - logger.LogIf(ctx, err) + storageLogIf(ctx, err) } // let's the rest shutdown } return nil } +// Legacy returns 'true' if distribution algo is CRCMOD +func (z *erasureServerPools) Legacy() (ok bool) { + ok = true + for _, set := range z.serverPools { + ok = ok && set.Legacy() + } + return ok +} + func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) { b.Type = madmin.Erasure @@ -636,7 +702,7 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) { b.StandardSCParity = scParity b.RRSCParity = rrSCParity - return + return b } func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo { @@ -645,7 +711,6 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) storageInfos := make([]StorageInfo, len(z.serverPools)) g := errgroup.WithNErrs(len(z.serverPools)) for index := range z.serverPools { - index := index g.Go(func() error { storageInfos[index] = z.serverPools[index].LocalStorageInfo(ctx, metrics) return nil @@ -664,7 +729,7 @@ func (z *erasureServerPools) LocalStorageInfo(ctx context.Context, metrics bool) } func (z *erasureServerPools) StorageInfo(ctx context.Context, metrics bool) StorageInfo { - return globalNotificationSys.StorageInfo(z, metrics) + return globalNotificationSys.StorageInfo(ctx, z, metrics) } func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, healScanMode madmin.HealScanMode) error { @@ -688,12 +753,17 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataU updates <- DataUsageInfo{} // no buckets found update data usage to reflect latest state return nil } - + totalResults := 0 + resultIndex := -1 + for _, z := range z.serverPools { + totalResults += len(z.sets) + } + results = make([]dataUsageCache, totalResults) // Collect for each set in serverPools. for _, z := range z.serverPools { for _, erObj := range z.sets { + resultIndex++ wg.Add(1) - results = append(results, dataUsageCache{}) go func(i int, erObj *erasureObjects) { updates := make(chan dataUsageCache, 1) defer xioutil.SafeClose(updates) @@ -709,7 +779,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataU // Start scanner. Blocks until done. err := erObj.nsScanner(ctx, allBuckets, wantCycle, updates, healScanMode) if err != nil { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) mu.Lock() if firstErr == nil { firstErr = err @@ -719,7 +789,7 @@ func (z *erasureServerPools) NSScanner(ctx context.Context, updates chan<- DataU mu.Unlock() return } - }(len(results)-1, erObj) + }(resultIndex, erObj) } } updateCloser := make(chan chan struct{}) @@ -1020,26 +1090,27 @@ func (z *erasureServerPools) PutObject(ctx context.Context, bucket string, objec object = encodeDirObject(object) if z.SinglePool() { - return z.serverPools[0].PutObject(ctx, bucket, object, data, opts) - } - - if !opts.NoLock { - ns := z.NewNSLock(bucket, object) - lkctx, err := ns.GetLock(ctx, globalOperationTimeout) + _, err := z.getPoolIdx(ctx, bucket, object, data.Size()) if err != nil { return ObjectInfo{}, err } - ctx = lkctx.Context() - defer ns.Unlock(lkctx) - opts.NoLock = true + return z.serverPools[0].PutObject(ctx, bucket, object, data, opts) } - idx, err := z.getPoolIdxNoLock(ctx, bucket, object, data.Size()) + idx, err := z.getPoolIdx(ctx, bucket, object, data.Size()) if err != nil { return ObjectInfo{}, err } - // Overwrite the object at the right pool + if opts.DataMovement && idx == opts.SrcPoolIdx { + return ObjectInfo{}, DataMovementOverwriteErr{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + Err: errDataMovementSrcDstPoolSame, + } + } + return z.serverPools[idx].PutObject(ctx, bucket, object, data, opts) } @@ -1076,7 +1147,8 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob gopts := opts gopts.NoLock = true - pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, gopts) + + pinfo, noReadQuorumPools, err := z.getPoolInfoExistingWithOpts(ctx, bucket, object, gopts) if err != nil { if _, ok := err.(InsufficientReadQuorum); ok { return objInfo, InsufficientWriteQuorum{} @@ -1090,8 +1162,79 @@ func (z *erasureServerPools) DeleteObject(ctx context.Context, bucket string, ob return pinfo.ObjInfo, nil } - objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) + // Datamovement must never be allowed on the same pool. + if opts.DataMovement && opts.SrcPoolIdx == pinfo.Index { + return pinfo.ObjInfo, DataMovementOverwriteErr{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + Err: errDataMovementSrcDstPoolSame, + } + } + + if opts.DataMovement { + objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) + objInfo.Name = decodeDirObject(object) + return objInfo, err + } + + // Delete concurrently in all server pools with read quorum error for unversioned objects. + if len(noReadQuorumPools) > 0 && !opts.Versioned && !opts.VersionSuspended { + return z.deleteObjectFromAllPools(ctx, bucket, object, opts, noReadQuorumPools) + } + + // All replication requests needs to go to pool with the object. + if opts.ReplicationRequest { + objInfo, err = z.serverPools[pinfo.Index].DeleteObject(ctx, bucket, object, opts) + objInfo.Name = decodeDirObject(object) + return objInfo, err + } + + for _, pool := range z.serverPools { + objInfo, err := pool.DeleteObject(ctx, bucket, object, opts) + if err != nil && !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { + objInfo.Name = decodeDirObject(object) + return objInfo, err + } + if err == nil { + objInfo.Name = decodeDirObject(object) + return objInfo, nil + } + } + objInfo.Name = decodeDirObject(object) + if opts.VersionID != "" { + return objInfo, VersionNotFound{Bucket: bucket, Object: object, VersionID: opts.VersionID} + } + return objInfo, ObjectNotFound{Bucket: bucket, Object: object} +} + +func (z *erasureServerPools) deleteObjectFromAllPools(ctx context.Context, bucket string, object string, opts ObjectOptions, poolIndices []poolErrs) (objInfo ObjectInfo, err error) { + derrs := make([]error, len(poolIndices)) + dobjects := make([]ObjectInfo, len(poolIndices)) + + // Delete concurrently in all server pools that reported no error or read quorum error + // where the read quorum issue is from metadata inconsistency. + var wg sync.WaitGroup + for idx, pe := range poolIndices { + if v, ok := pe.Err.(InsufficientReadQuorum); ok && v.Type != RQInconsistentMeta { + derrs[idx] = InsufficientWriteQuorum{} + continue + } + wg.Add(1) + pool := z.serverPools[pe.Index] + go func(idx int, pool *erasureSets) { + defer wg.Done() + dobjects[idx], derrs[idx] = pool.DeleteObject(ctx, bucket, object, opts) + }(idx, pool) + } + wg.Wait() + + // the poolIndices array is pre-sorted in order of latest ModTime, we care only about pool with latest object though + // the delete call tries to clean up other pools during DeleteObject call. + objInfo = dobjects[0] + objInfo.Name = decodeDirObject(object) + err = derrs[0] return objInfo, err } @@ -1118,88 +1261,41 @@ func (z *erasureServerPools) DeleteObjects(ctx context.Context, bucket string, o ctx = lkctx.Context() defer multiDeleteLock.Unlock(lkctx) - // Fetch location of up to 10 objects concurrently. - poolObjIdxMap := map[int][]ObjectToDelete{} - origIndexMap := map[int][]int{} + dObjectsByPool := make([][]DeletedObject, len(z.serverPools)) + dErrsByPool := make([][]error, len(z.serverPools)) - // Always perform 1/10th of the number of objects per delete - concurrent := len(objects) / 10 - if concurrent <= 10 { - // if we cannot get 1/10th then choose the number of - // objects as concurrent. - concurrent = len(objects) - } - - var mu sync.Mutex - eg := errgroup.WithNErrs(len(objects)).WithConcurrency(concurrent) - for j, obj := range objects { - j := j - obj := obj + eg := errgroup.WithNErrs(len(z.serverPools)).WithConcurrency(len(z.serverPools)) + for i, pool := range z.serverPools { + pool := pool eg.Go(func() error { - pinfo, err := z.getPoolInfoExistingWithOpts(ctx, bucket, obj.ObjectName, ObjectOptions{ - NoLock: true, - }) - if err != nil { - if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) { - derrs[j] = err - } - dobjects[j] = DeletedObject{ - ObjectName: decodeDirObject(obj.ObjectName), - VersionID: obj.VersionID, - } - return nil - } - - // Delete marker already present we are not going to create new delete markers. - if pinfo.ObjInfo.DeleteMarker && obj.VersionID == "" { - dobjects[j] = DeletedObject{ - DeleteMarker: pinfo.ObjInfo.DeleteMarker, - DeleteMarkerVersionID: pinfo.ObjInfo.VersionID, - DeleteMarkerMTime: DeleteMarkerMTime{pinfo.ObjInfo.ModTime}, - ObjectName: decodeDirObject(pinfo.ObjInfo.Name), - } - return nil - } - - idx := pinfo.Index - - mu.Lock() - defer mu.Unlock() - - poolObjIdxMap[idx] = append(poolObjIdxMap[idx], obj) - origIndexMap[idx] = append(origIndexMap[idx], j) + dObjectsByPool[i], dErrsByPool[i] = pool.DeleteObjects(ctx, bucket, objects, opts) return nil - }, j) + }, i) } - eg.Wait() // wait to check all the pools. - if len(poolObjIdxMap) > 0 { - // Delete concurrently in all server pools. - var wg sync.WaitGroup - wg.Add(len(z.serverPools)) - for idx, pool := range z.serverPools { - go func(idx int, pool *erasureSets) { - defer wg.Done() - objs := poolObjIdxMap[idx] - if len(objs) > 0 { - orgIndexes := origIndexMap[idx] - deletedObjects, errs := pool.DeleteObjects(ctx, bucket, objs, opts) - mu.Lock() - for i, derr := range errs { - if derr != nil { - derrs[orgIndexes[i]] = derr - } - deletedObjects[i].ObjectName = decodeDirObject(deletedObjects[i].ObjectName) - dobjects[orgIndexes[i]] = deletedObjects[i] - } - mu.Unlock() - } - }(idx, pool) + for i := range dobjects { + // Iterate over pools + for pool := range z.serverPools { + if dErrsByPool[pool][i] == nil && dObjectsByPool[pool][i].found { + // A fast exit when the object is found and removed + dobjects[i] = dObjectsByPool[pool][i] + derrs[i] = nil + break + } + if derrs[i] == nil { + // No error related to this object is found, assign this pool result + // whether it is nil because there is no object found or because of + // some other errors such erasure quorum errors. + dobjects[i] = dObjectsByPool[pool][i] + derrs[i] = dErrsByPool[pool][i] + } } - wg.Wait() } + for i := range dobjects { + dobjects[i].ObjectName = decodeDirObject(dobjects[i].ObjectName) + } return dobjects, derrs } @@ -1255,24 +1351,31 @@ func (z *erasureServerPools) CopyObject(ctx context.Context, srcBucket, srcObjec } putOpts := ObjectOptions{ - ServerSideEncryption: dstOpts.ServerSideEncryption, - UserDefined: srcInfo.UserDefined, - Versioned: dstOpts.Versioned, - VersionID: dstOpts.VersionID, - MTime: dstOpts.MTime, - NoLock: true, + ServerSideEncryption: dstOpts.ServerSideEncryption, + UserDefined: srcInfo.UserDefined, + Versioned: dstOpts.Versioned, + VersionID: dstOpts.VersionID, + MTime: dstOpts.MTime, + NoLock: true, + EncryptFn: dstOpts.EncryptFn, + WantChecksum: dstOpts.WantChecksum, + WantServerSideChecksumType: dstOpts.WantServerSideChecksumType, } return z.serverPools[poolIdx].PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) } +func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { + return z.listObjectsGeneric(ctx, bucket, prefix, marker, delimiter, maxKeys, true) +} + func (z *erasureServerPools) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { marker := continuationToken if marker == "" { marker = startAfter } - loi, err := z.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) + loi, err := z.listObjectsGeneric(ctx, bucket, prefix, marker, delimiter, maxKeys, false) if err != nil { return ListObjectsV2Info{}, err } @@ -1308,8 +1411,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre // It requests unique blocks with a specific prefix. // We skip scanning the parent directory for // more objects matching the prefix. - ri := logger.GetReqInfo(ctx) - if ri != nil && strings.Contains(ri.UserAgent, `1.0 Veeam/1.0 Backup`) && strings.HasSuffix(prefix, ".blk") { + if isVeeamClient(ctx) && strings.HasSuffix(prefix, ".blk") { opts.BaseDir = prefix opts.Transient = true } @@ -1320,7 +1422,7 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre merged, err := z.listPath(ctx, &opts) if err != nil && err != io.EOF { if !isErrBucketNotFound(err) { - logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) + storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) } return loi, toObjectErr(err, bucket) } @@ -1381,9 +1483,9 @@ func maxKeysPlusOne(maxKeys int, addOne bool) int { return maxKeys } -func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (ListObjectsInfo, error) { - var loi ListObjectsInfo +func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, v1 bool) (loi ListObjectsInfo, err error) { opts := listPathOptions{ + V1: v1, Bucket: bucket, Prefix: prefix, Separator: delimiter, @@ -1393,9 +1495,67 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma AskDisks: globalAPIConfig.getListQuorum(), } opts.setBucketMeta(ctx) + listFn := func(ctx context.Context, opts listPathOptions, limitTo int) (ListObjectsInfo, error) { + var loi ListObjectsInfo + merged, err := z.listPath(ctx, &opts) + if err != nil && err != io.EOF { + if !isErrBucketNotFound(err) { + storageLogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) + } + return loi, toObjectErr(err, bucket) + } + merged.forwardPast(opts.Marker) + defer merged.truncate(0) // Release when returning + + if contextCanceled(ctx) { + return ListObjectsInfo{}, ctx.Err() + } + // Default is recursive, if delimiter is set then list non recursive. + objects := merged.fileInfos(bucket, prefix, delimiter) + loi.IsTruncated = err == nil && len(objects) > 0 + if limitTo > 0 && len(objects) > limitTo { + objects = objects[:limitTo] + loi.IsTruncated = true + } + for _, obj := range objects { + if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" { + // Only add each once. + // With slash delimiter we only get the directory once. + found := false + if delimiter != slashSeparator { + for _, p := range loi.Prefixes { + if found { + break + } + found = p == obj.Name + } + } + if !found { + loi.Prefixes = append(loi.Prefixes, obj.Name) + } + } else { + loi.Objects = append(loi.Objects, obj) + } + } + if loi.IsTruncated { + last := objects[len(objects)-1] + loi.NextMarker = last.Name + } + + if loi.IsTruncated && merged.lastSkippedEntry > loi.NextMarker { + // An object hidden by ILM was found during a truncated listing. Set the next marker + // as the last skipped entry if it is lexically higher loi.NextMarker as an optimization + loi.NextMarker = merged.lastSkippedEntry + } + + if loi.NextMarker != "" { + loi.NextMarker = opts.encodeMarker(loi.NextMarker) + } + return loi, nil + } ri := logger.GetReqInfo(ctx) - hadoop := ri != nil && strings.Contains(ri.UserAgent, `Hadoop `) && strings.Contains(ri.UserAgent, "scala/") + hadoop := ri != nil && strings.Contains(ri.UserAgent, "Hadoop ") && strings.Contains(ri.UserAgent, "scala/") matches := func() bool { if prefix == "" { return false @@ -1420,7 +1580,8 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma } return false } - if hadoop && matches() && delimiter == SlashSeparator && maxKeys == 2 && marker == "" { + + if hadoop && delimiter == SlashSeparator && maxKeys == 2 && marker == "" { // Optimization for Spark/Hadoop workload where spark sends a garbage // request of this kind // @@ -1457,25 +1618,57 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma // df.write.parquet("s3a://testbucket/parquet/") // } // } - objInfo, err := z.GetObjectInfo(ctx, bucket, path.Dir(prefix), ObjectOptions{NoLock: true}) - if err == nil { - if opts.Lifecycle != nil { - evt := evalActionFromLifecycle(ctx, *opts.Lifecycle, opts.Retention, opts.Replication.Config, objInfo) - if evt.Action.Delete() { - globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_s3ListObjects) - if !evt.Action.DeleteRestored() { - // Skip entry if ILM action was DeleteVersionAction or DeleteAction - return loi, nil + if matches() { + objInfo, err := z.GetObjectInfo(ctx, bucket, path.Dir(prefix), ObjectOptions{NoLock: true}) + if err == nil || objInfo.IsLatest && objInfo.DeleteMarker { + if opts.Lifecycle != nil { + evt := evalActionFromLifecycle(ctx, *opts.Lifecycle, opts.Retention, opts.Replication.Config, objInfo) + if evt.Action.Delete() { + globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_s3ListObjects) + if !evt.Action.DeleteRestored() { + // Skip entry if ILM action was DeleteVersionAction or DeleteAction + return loi, nil + } } } + return loi, nil + } + if isErrBucketNotFound(err) { + return loi, err + } + if contextCanceled(ctx) { + return ListObjectsInfo{}, ctx.Err() } - return loi, nil - } - if isErrBucketNotFound(err) { - return loi, err } - if contextCanceled(ctx) { - return ListObjectsInfo{}, ctx.Err() + // Hadoop makes the max-keys=2 listing call just to find if the directory is empty or not, or in the case + // of an object to check for object existence. For versioned buckets, MinIO's non-recursive + // call will report top level prefixes in deleted state, whereas spark/hadoop interpret this as non-empty + // and throw a 404 exception. This is especially a problem for spark jobs overwriting the same partition + // repeatedly. This workaround recursively lists the top 3 entries including delete markers to reflect the + // correct state of the directory in the list results. + if strings.HasSuffix(opts.Prefix, SlashSeparator) { + li, err := listFn(ctx, opts, maxKeys) + if err != nil { + return loi, err + } + if len(li.Objects) == 0 { + prefixes := li.Prefixes[:0] + for _, prefix := range li.Prefixes { + objInfo, _ := z.GetObjectInfo(ctx, bucket, pathJoin(prefix, "_SUCCESS"), ObjectOptions{NoLock: true}) + if objInfo.IsLatest && objInfo.DeleteMarker { + continue + } + prefixes = append(prefixes, prefix) + } + if len(prefixes) > 0 { + objInfo, _ := z.GetObjectInfo(ctx, bucket, pathJoin(opts.Prefix, "_SUCCESS"), ObjectOptions{NoLock: true}) + if objInfo.IsLatest && objInfo.DeleteMarker { + return loi, nil + } + } + li.Prefixes = prefixes + } + return li, nil } } @@ -1509,54 +1702,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma return ListObjectsInfo{}, ctx.Err() } } - - merged, err := z.listPath(ctx, &opts) - if err != nil && err != io.EOF { - if !isErrBucketNotFound(err) { - logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) - } - return loi, toObjectErr(err, bucket) - } - - merged.forwardPast(opts.Marker) - defer merged.truncate(0) // Release when returning - - if contextCanceled(ctx) { - return ListObjectsInfo{}, ctx.Err() - } - - // Default is recursive, if delimiter is set then list non recursive. - objects := merged.fileInfos(bucket, prefix, delimiter) - loi.IsTruncated = err == nil && len(objects) > 0 - if maxKeys > 0 && len(objects) > maxKeys { - objects = objects[:maxKeys] - loi.IsTruncated = true - } - for _, obj := range objects { - if obj.IsDir && obj.ModTime.IsZero() && delimiter != "" { - // Only add each once. - // With slash delimiter we only get the directory once. - found := false - if delimiter != slashSeparator { - for _, p := range loi.Prefixes { - if found { - break - } - found = p == obj.Name - } - } - if !found { - loi.Prefixes = append(loi.Prefixes, obj.Name) - } - } else { - loi.Objects = append(loi.Objects, obj) - } - } - if loi.IsTruncated { - last := objects[len(objects)-1] - loi.NextMarker = opts.encodeMarker(last.Name) - } - return loi, nil + return listFn(ctx, opts, maxKeys) } func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { @@ -1564,15 +1710,34 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p return ListMultipartsInfo{}, err } - if z.SinglePool() { - return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - } - poolResult := ListMultipartsInfo{} poolResult.MaxUploads = maxUploads poolResult.KeyMarker = keyMarker poolResult.Prefix = prefix poolResult.Delimiter = delimiter + + // if no prefix provided, return the list from cache + if prefix == "" { + if _, err := z.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil { + return ListMultipartsInfo{}, toObjectErr(err, bucket) + } + + z.mpCache.Range(func(_ string, mp MultipartInfo) bool { + if mp.Bucket == bucket { + poolResult.Uploads = append(poolResult.Uploads, mp) + } + return true + }) + sort.Slice(poolResult.Uploads, func(i int, j int) bool { + return poolResult.Uploads[i].Initiated.Before(poolResult.Uploads[j].Initiated) + }) + return poolResult, nil + } + + if z.SinglePool() { + return z.serverPools[0].ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) + } + for idx, pool := range z.serverPools { if z.IsSuspended(idx) { continue @@ -1584,15 +1749,27 @@ func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, p } poolResult.Uploads = append(poolResult.Uploads, result.Uploads...) } + return poolResult, nil } // Initiate a new multipart upload on a hashedSet based on object name. -func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) { +func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (mp *NewMultipartUploadResult, err error) { if err := checkNewMultipartArgs(ctx, bucket, object); err != nil { return nil, err } + defer func() { + if err == nil && mp != nil { + z.mpCache.Store(mp.UploadID, MultipartInfo{ + Bucket: bucket, + Object: object, + UploadID: mp.UploadID, + Initiated: time.Now(), + }) + } + }() + if z.SinglePool() { return z.serverPools[0].NewMultipartUpload(ctx, bucket, object, opts) } @@ -1621,6 +1798,15 @@ func (z *erasureServerPools) NewMultipartUpload(ctx context.Context, bucket, obj return nil, err } + if opts.DataMovement && idx == opts.SrcPoolIdx { + return nil, DataMovementOverwriteErr{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + Err: errDataMovementSrcDstPoolSame, + } + } + return z.serverPools[idx].NewMultipartUpload(ctx, bucket, object, opts) } @@ -1641,6 +1827,10 @@ func (z *erasureServerPools) PutObjectPart(ctx context.Context, bucket, object, } if z.SinglePool() { + _, err := z.getPoolIdx(ctx, bucket, object, data.Size()) + if err != nil { + return PartInfo{}, err + } return z.serverPools[0].PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) } @@ -1672,9 +1862,18 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec return MultipartInfo{}, err } + uploadIDLock := z.NewNSLock(bucket, pathJoin(object, uploadID)) + lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout) + if err != nil { + return MultipartInfo{}, err + } + ctx = lkctx.Context() + defer uploadIDLock.RUnlock(lkctx) + if z.SinglePool() { return z.serverPools[0].GetMultipartInfo(ctx, bucket, object, uploadID, opts) } + for idx, pool := range z.serverPools { if z.IsSuspended(idx) { continue @@ -1690,6 +1889,7 @@ func (z *erasureServerPools) GetMultipartInfo(ctx context.Context, bucket, objec // any other unhandled error return right here. return MultipartInfo{}, err } + return MultipartInfo{}, InvalidUploadID{ Bucket: bucket, Object: object, @@ -1703,9 +1903,18 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object return ListPartsInfo{}, err } + uploadIDLock := z.NewNSLock(bucket, pathJoin(object, uploadID)) + lkctx, err := uploadIDLock.GetRLock(ctx, globalOperationTimeout) + if err != nil { + return ListPartsInfo{}, err + } + ctx = lkctx.Context() + defer uploadIDLock.RUnlock(lkctx) + if z.SinglePool() { return z.serverPools[0].ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) } + for idx, pool := range z.serverPools { if z.IsSuspended(idx) { continue @@ -1719,6 +1928,7 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object } return ListPartsInfo{}, err } + return ListPartsInfo{}, InvalidUploadID{ Bucket: bucket, Object: object, @@ -1727,11 +1937,26 @@ func (z *erasureServerPools) ListObjectParts(ctx context.Context, bucket, object } // Aborts an in-progress multipart operation on hashedSet based on the object name. -func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { +func (z *erasureServerPools) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (err error) { if err := checkAbortMultipartArgs(ctx, bucket, object, uploadID); err != nil { return err } + defer func() { + if err == nil { + z.mpCache.Delete(uploadID) + globalNotificationSys.DeleteUploadID(ctx, uploadID) + } + }() + + lk := z.NewNSLock(bucket, pathJoin(object, uploadID)) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + if z.SinglePool() { return z.serverPools[0].AbortMultipartUpload(ctx, bucket, object, uploadID, opts) } @@ -1763,6 +1988,23 @@ func (z *erasureServerPools) CompleteMultipartUpload(ctx context.Context, bucket return objInfo, err } + defer func() { + if err == nil { + z.mpCache.Delete(uploadID) + globalNotificationSys.DeleteUploadID(ctx, uploadID) + } + }() + + // Hold write locks to verify uploaded parts, also disallows any + // parallel PutObjectPart() requests. + uploadIDLock := z.NewNSLock(bucket, pathJoin(object, uploadID)) + wlkctx, err := uploadIDLock.GetLock(ctx, globalOperationTimeout) + if err != nil { + return objInfo, err + } + ctx = wlkctx.Context() + defer uploadIDLock.Unlock(wlkctx) + if z.SinglePool() { return z.serverPools[0].CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) } @@ -1805,6 +2047,12 @@ func (z *erasureServerPools) GetBucketInfo(ctx context.Context, bucket string, o return bucketInfo, nil } +// ClearUploadID deletes given uploadID from cache +func (z *erasureServerPools) ClearUploadID(uploadID string) error { + z.mpCache.Delete(uploadID) + return nil +} + // DeleteBucket - deletes a bucket on all serverPools simultaneously, // even if one of the serverPools fail to delete buckets, we proceed to // undo a successful operation. @@ -1829,6 +2077,34 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op defer lk.Unlock(lkctx) } + if !opts.Force { + results := make(chan itemOrErr[ObjectInfo]) + + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + err := z.Walk(ctx, bucket, "", results, WalkOptions{Limit: 1}) + if err != nil { + s3LogIf(ctx, fmt.Errorf("unable to verify if the bucket %s is empty: %w", bucket, err)) + return toObjectErr(err, bucket) + } + + select { + case <-ctx.Done(): + return ctx.Err() + case r, found := <-results: + if found { + if r.Err != nil { + s3LogIf(ctx, fmt.Errorf("unable to verify if the bucket %s is empty: %w", bucket, r.Err)) + return toObjectErr(r.Err, bucket) + } + return toObjectErr(errVolumeNotEmpty, bucket) + } + } + + // Always pass force to the lower level + opts.Force = true + } + err := z.s3Peer.DeleteBucket(ctx, bucket, opts) if err == nil || isErrBucketNotFound(err) { // If site replication is configured, hold on to deleted bucket state until sites sync @@ -1867,35 +2143,40 @@ func (z *erasureServerPools) ListBuckets(ctx context.Context, opts BucketOptions if opts.Cached { listBucketsCache.InitOnce(time.Second, cachevalue.Opts{ReturnLastGood: true, NoWait: true}, - func() ([]BucketInfo, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + func(ctx context.Context) ([]BucketInfo, error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() buckets, err = z.s3Peer.ListBuckets(ctx, opts) if err != nil { return nil, err } - for i := range buckets { - createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name) - if err == nil { - buckets[i].Created = createdAt + if !opts.NoMetadata { + for i := range buckets { + createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name) + if err == nil { + buckets[i].Created = createdAt + } } } return buckets, nil }, ) - return listBucketsCache.Get() + return listBucketsCache.GetWithCtx(ctx) } buckets, err = z.s3Peer.ListBuckets(ctx, opts) if err != nil { return nil, err } - for i := range buckets { - createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name) - if err == nil { - buckets[i].Created = createdAt + + if !opts.NoMetadata { + for i := range buckets { + createdAt, err := globalBucketMetadataSys.CreatedAt(buckets[i].Name) + if err == nil { + buckets[i].Created = createdAt + } } } return buckets, nil @@ -1920,7 +2201,7 @@ func (z *erasureServerPools) HealFormat(ctx context.Context, dryRun bool) (madmi for _, pool := range z.serverPools { result, err := pool.HealFormat(ctx, dryRun) if err != nil && !errors.Is(err, errNoHealRequired) { - logger.LogOnceIf(ctx, err, "erasure-heal-format") + healingLogOnceIf(ctx, err, "erasure-heal-format") continue } // Count errNoHealRequired across all serverPools, @@ -1948,177 +2229,208 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts } // Walk a bucket, optionally prefix recursively, until we have returned -// all the content to objectInfo channel, it is callers responsibility -// to allocate a receive channel for ObjectInfo, upon any unhandled -// error walker returns error. Optionally if context.Done() is received -// then Walk() stops the walker. -func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error { +// all the contents of the provided bucket+prefix. +func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- itemOrErr[ObjectInfo], opts WalkOptions) error { if err := checkListObjsArgs(ctx, bucket, prefix, ""); err != nil { - // Upon error close the channel. xioutil.SafeClose(results) return err } - - vcfg, _ := globalBucketVersioningSys.Get(bucket) - - ctx, cancel := context.WithCancel(ctx) - go func() { - defer cancel() - defer xioutil.SafeClose(results) - - for _, erasureSet := range z.serverPools { - var wg sync.WaitGroup - for _, set := range erasureSet.sets { - set := set - wg.Add(1) - go func() { - defer wg.Done() - - disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true) - if len(disks) == 0 { - cancel() + parentCtx := ctx + ctx, cancelCause := context.WithCancelCause(ctx) + var entries []chan metaCacheEntry + + for poolIdx, erasureSet := range z.serverPools { + for setIdx, set := range erasureSet.sets { + listOut := make(chan metaCacheEntry, 1) + entries = append(entries, listOut) + disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true) + if len(disks) == 0 { + xioutil.SafeClose(results) + err := fmt.Errorf("Walk: no online disks found in (set:%d pool:%d) %w", setIdx, poolIdx, errErasureReadQuorum) + cancelCause(err) + return err + } + go func() { + defer xioutil.SafeClose(listOut) + send := func(e metaCacheEntry) { + if e.isDir() { + // Ignore directories. return } - - send := func(objInfo ObjectInfo) bool { - select { - case <-ctx.Done(): - return false - case results <- objInfo: - return true - } + select { + case listOut <- e: + case <-ctx.Done(): } + } - askDisks := getListQuorum(opts.AskDisks, set.setDriveCount) - if askDisks == -1 { - newDisks := getQuorumDisks(disks, infos, (len(disks)+1)/2) - if newDisks != nil { - // If we found disks signature in quorum, we proceed to list - // from a single drive, shuffling of the drives is subsequently. - disks = newDisks - askDisks = 1 - } else { - // If we did not find suitable disks, perform strict quorum listing - // as no disk agrees on quorum anymore. - askDisks = getListQuorum("strict", set.setDriveCount) - } + askDisks := getListQuorum(opts.AskDisks, set.setDriveCount) + if askDisks == -1 { + newDisks := getQuorumDisks(disks, infos, (len(disks)+1)/2) + if newDisks != nil { + // If we found disks signature in quorum, we proceed to list + // from a single drive, shuffling of the drives is subsequently. + disks = newDisks + askDisks = 1 + } else { + // If we did not find suitable disks, perform strict quorum listing + // as no disk agrees on quorum anymore. + askDisks = getListQuorum("strict", set.setDriveCount) } + } - // Special case: ask all disks if the drive count is 4 - if set.setDriveCount == 4 || askDisks > len(disks) { - askDisks = len(disks) // use all available drives - } + // Special case: ask all disks if the drive count is 4 + if set.setDriveCount == 4 || askDisks > len(disks) { + askDisks = len(disks) // use all available drives + } - var fallbackDisks []StorageAPI - if askDisks > 0 && len(disks) > askDisks { - rand.Shuffle(len(disks), func(i, j int) { - disks[i], disks[j] = disks[j], disks[i] - }) - fallbackDisks = disks[askDisks:] - disks = disks[:askDisks] - } + var fallbackDisks []StorageAPI + if askDisks > 0 && len(disks) > askDisks { + rand.Shuffle(len(disks), func(i, j int) { + disks[i], disks[j] = disks[j], disks[i] + }) + fallbackDisks = disks[askDisks:] + disks = disks[:askDisks] + } - requestedVersions := 0 - if opts.LatestOnly { - requestedVersions = 1 - } - loadEntry := func(entry metaCacheEntry) { - if entry.isDir() { - return - } + requestedVersions := 0 + if opts.LatestOnly { + requestedVersions = 1 + } + + // However many we ask, versions must exist on ~50% + listingQuorum := (askDisks + 1) / 2 + + // How to resolve partial results. + resolver := metadataResolutionParams{ + dirQuorum: listingQuorum, + objQuorum: listingQuorum, + bucket: bucket, + requestedVersions: requestedVersions, + } + + path := baseDirFromPrefix(prefix) + filterPrefix := strings.Trim(strings.TrimPrefix(prefix, path), slashSeparator) + if path == prefix { + filterPrefix = "" + } - if opts.LatestOnly { - fi, err := entry.fileInfo(bucket) - if err != nil { - cancel() - return - } - if opts.Filter != nil { - if opts.Filter(fi) { - if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) { - return - } - } - } else { - if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) { - return - } - } - - } else { - fivs, err := entry.fileInfoVersions(bucket) - if err != nil { - cancel() - return - } - - // Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime - if opts.VersionsSort == WalkVersionsSortAsc { - versionsSorter(fivs.Versions).reverse() - } - - for _, version := range fivs.Versions { - if opts.Filter != nil { - if opts.Filter(version) { - if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) { - return - } - } - } else { - if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) { - return - } - } - } + lopts := listPathRawOptions{ + disks: disks, + fallbackDisks: fallbackDisks, + bucket: bucket, + path: path, + filterPrefix: filterPrefix, + recursive: true, + forwardTo: opts.Marker, + perDiskLimit: opts.Limit, + minDisks: listingQuorum, + reportNotFound: false, + agreed: send, + partial: func(entries metaCacheEntries, _ []error) { + entry, ok := entries.resolve(&resolver) + if ok { + send(*entry) } - } + }, + finished: nil, + } - // However many we ask, versions must exist on ~50% - listingQuorum := (askDisks + 1) / 2 + if err := listPathRaw(ctx, lopts); err != nil { + cancelCause(fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) + return + } + }() + } + } - // How to resolve partial results. - resolver := metadataResolutionParams{ - dirQuorum: listingQuorum, - objQuorum: listingQuorum, - bucket: bucket, - requestedVersions: requestedVersions, + // Convert and filter merged entries. + merged := make(chan metaCacheEntry, 100) + vcfg, _ := globalBucketVersioningSys.Get(bucket) + errCh := make(chan error, 1) + go func() { + sentErr := false + sendErr := func(err error) { + if !sentErr { + select { + case results <- itemOrErr[ObjectInfo]{Err: err}: + sentErr = true + case <-parentCtx.Done(): + } + } + } + defer func() { + select { + case <-ctx.Done(): + sendErr(ctx.Err()) + default: + } + xioutil.SafeClose(results) + cancelCause(nil) + }() + send := func(oi ObjectInfo) bool { + select { + case results <- itemOrErr[ObjectInfo]{Item: oi}: + return true + case <-ctx.Done(): + sendErr(context.Cause(ctx)) + return false + } + } + for entry := range merged { + if opts.LatestOnly { + fi, err := entry.fileInfo(bucket) + if err != nil { + sendErr(err) + return + } + if opts.Filter != nil { + if opts.Filter(fi) { + if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) { + return + } } - - path := baseDirFromPrefix(prefix) - filterPrefix := strings.Trim(strings.TrimPrefix(prefix, path), slashSeparator) - if path == prefix { - filterPrefix = "" + } else { + if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) { + return } + } + continue + } + fivs, err := entry.fileInfoVersions(bucket) + if err != nil { + sendErr(err) + return + } - lopts := listPathRawOptions{ - disks: disks, - fallbackDisks: fallbackDisks, - bucket: bucket, - path: path, - filterPrefix: filterPrefix, - recursive: true, - forwardTo: opts.Marker, - minDisks: 1, - reportNotFound: false, - agreed: loadEntry, - partial: func(entries metaCacheEntries, _ []error) { - entry, ok := entries.resolve(&resolver) - if ok { - loadEntry(*entry) - } - }, - finished: nil, - } + // Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime + if opts.VersionsSort == WalkVersionsSortAsc { + versionsSorter(fivs.Versions).reverse() + } - if err := listPathRaw(ctx, lopts); err != nil { - logger.LogIf(ctx, fmt.Errorf("listPathRaw returned %w: opts(%#v)", err, lopts)) - cancel() + for _, version := range fivs.Versions { + if opts.Filter != nil { + if opts.Filter(version) { + if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) { + return + } + } + } else { + if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) { return } - }() + } } - wg.Wait() } + if err := <-errCh; err != nil { + sendErr(err) + } + }() + go func() { + defer close(errCh) + // Merge all entries from all disks. + // We leave quorum at 1, since entries are already resolved to have the desired quorum. + // mergeEntryChannels will close 'merged' channel upon completion or cancellation. + errCh <- mergeEntryChannels(ctx, entries, merged, 1) }() return nil @@ -2127,6 +2439,7 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re // HealObjectFn closure function heals the object. type HealObjectFn func(bucket, object, versionID string, scanMode madmin.HealScanMode) error +// List a prefix or a single object versions and heal func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, healObjectFn HealObjectFn) error { healEntry := func(bucket string, entry metaCacheEntry, scanMode madmin.HealScanMode) error { if entry.isDir() { @@ -2156,7 +2469,7 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str if opts.Remove && !opts.DryRun { err := z.CheckAbandonedParts(ctx, bucket, entry.name, opts) if err != nil { - logger.LogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) + healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", bucket, entry.name, err)) } } for _, version := range fivs.Versions { @@ -2172,23 +2485,29 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str ctx, cancel := context.WithCancel(ctx) defer cancel() - var poolErrs [][]error + poolErrs := make([][]error, len(z.serverPools)) for idx, erasureSet := range z.serverPools { + if opts.Pool != nil && *opts.Pool != idx { + continue + } if z.IsSuspended(idx) { continue } errs := make([]error, len(erasureSet.sets)) - var wg sync.WaitGroup + wk, _ := workers.New(3) for idx, set := range erasureSet.sets { - wg.Add(1) + if opts.Set != nil && *opts.Set != idx { + continue + } + wk.Take() go func(idx int, set *erasureObjects) { - defer wg.Done() + defer wk.Give() - errs[idx] = set.listAndHeal(bucket, prefix, opts.ScanMode, healEntry) + errs[idx] = set.listAndHeal(ctx, bucket, prefix, opts.Recursive, opts.ScanMode, healEntry) }(idx, set) } - wg.Wait() - poolErrs = append(poolErrs, errs) + wk.Wait() + poolErrs[idx] = errs } for _, errs := range poolErrs { for _, err := range errs { @@ -2236,15 +2555,22 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver } } + hr := madmin.HealResultItem{ + Type: madmin.HealItemObject, + Bucket: bucket, + Object: object, + VersionID: versionID, + } + // At this stage, all errors are 'not found' if versionID != "" { - return madmin.HealResultItem{}, VersionNotFound{ + return hr, VersionNotFound{ Bucket: bucket, Object: object, VersionID: versionID, } } - return madmin.HealResultItem{}, ObjectNotFound{ + return hr, ObjectNotFound{ Bucket: bucket, Object: object, } @@ -2272,6 +2598,7 @@ const ( type HealthOptions struct { Maintenance bool DeploymentType string + NoLogging bool } // HealthResult returns the current state of the system, also @@ -2296,6 +2623,24 @@ type HealthResult struct { UsingDefaults bool } +func (hr HealthResult) String() string { + var str strings.Builder + for i, es := range hr.ESHealth { + str.WriteString("(Pool: ") + str.WriteString(strconv.Itoa(es.PoolID)) + str.WriteString(" Set: ") + str.WriteString(strconv.Itoa(es.SetID)) + str.WriteString(" Healthy: ") + str.WriteString(strconv.FormatBool(es.Healthy)) + if i == 0 { + str.WriteString(")") + } else { + str.WriteString(") | ") + } + } + return str.String() +} + // Health - returns current status of the object layer health, // provides if write access exists across sets, additionally // can be used to query scenarios if health may be lost @@ -2319,16 +2664,10 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea for _, disk := range storageInfo.Disks { if opts.Maintenance { - var skip bool globalLocalDrivesMu.RLock() - for _, drive := range globalLocalDrives { - if drive != nil && drive.Endpoint().String() == disk.Endpoint { - skip = true - break - } - } + _, ok := globalLocalDrivesMap[disk.Endpoint] globalLocalDrivesMu.RUnlock() - if skip { + if ok { continue } } @@ -2360,7 +2699,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea // Check if disks are healing on in-case of VMware vsphere deployments. if opts.Maintenance && opts.DeploymentType == vmware { if drivesHealing > 0 { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing)) + healingLogIf(logger.SetReqInfo(ctx, reqInfo), fmt.Errorf("Total drives to be healed %d", drivesHealing)) } } @@ -2419,18 +2758,18 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea }) healthy := erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx] - if !healthy { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), - fmt.Errorf("Write quorum may be lost on pool: %d, set: %d, expected write quorum: %d", - poolIdx, setIdx, poolWriteQuorums[poolIdx])) + if !healthy && !opts.NoLogging { + storageLogIf(logger.SetReqInfo(ctx, reqInfo), + fmt.Errorf("Write quorum could not be established on pool: %d, set: %d, expected write quorum: %d, drives-online: %d", + poolIdx, setIdx, poolWriteQuorums[poolIdx], erasureSetUpCount[poolIdx][setIdx].online), logger.FatalKind) } result.Healthy = result.Healthy && healthy healthyRead := erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx] - if !healthyRead { - logger.LogIf(logger.SetReqInfo(ctx, reqInfo), - fmt.Errorf("Read quorum may be lost on pool: %d, set: %d, expected read quorum: %d", - poolIdx, setIdx, poolReadQuorums[poolIdx])) + if !healthyRead && !opts.NoLogging { + storageLogIf(logger.SetReqInfo(ctx, reqInfo), + fmt.Errorf("Read quorum could not be established on pool: %d, set: %d, expected read quorum: %d, drives-online: %d", + poolIdx, setIdx, poolReadQuorums[poolIdx], erasureSetUpCount[poolIdx][setIdx].online)) } result.HealthyRead = result.HealthyRead && healthyRead } @@ -2452,7 +2791,19 @@ func (z *erasureServerPools) PutObjectMetadata(ctx context.Context, bucket, obje return z.serverPools[0].PutObjectMetadata(ctx, bucket, object, opts) } + if !opts.NoLock { + // Lock the object before updating metadata. + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + opts.MetadataChg = true + opts.NoLock = true // We don't know the size here set 1GiB at least. idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) if err != nil { @@ -2469,7 +2820,19 @@ func (z *erasureServerPools) PutObjectTags(ctx context.Context, bucket, object s return z.serverPools[0].PutObjectTags(ctx, bucket, object, tags, opts) } + if !opts.NoLock { + // Lock the object before updating tags. + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + opts.MetadataChg = true + opts.NoLock = true // We don't know the size here set 1GiB at least. idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) @@ -2487,8 +2850,19 @@ func (z *erasureServerPools) DeleteObjectTags(ctx context.Context, bucket, objec return z.serverPools[0].DeleteObjectTags(ctx, bucket, object, opts) } - opts.MetadataChg = true + if !opts.NoLock { + // Lock the object before deleting tags. + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalOperationTimeout) + if err != nil { + return ObjectInfo{}, err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + opts.MetadataChg = true + opts.NoLock = true idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) if err != nil { return ObjectInfo{}, err @@ -2519,8 +2893,20 @@ func (z *erasureServerPools) TransitionObject(ctx context.Context, bucket, objec return z.serverPools[0].TransitionObject(ctx, bucket, object, opts) } + if !opts.NoLock { + // Acquire write lock before starting to transition the object. + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + // Avoid transitioning an object from a pool being decommissioned. opts.SkipDecommissioned = true + opts.NoLock = true idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) if err != nil { return err @@ -2536,8 +2922,20 @@ func (z *erasureServerPools) RestoreTransitionedObject(ctx context.Context, buck return z.serverPools[0].RestoreTransitionedObject(ctx, bucket, object, opts) } + if !opts.NoLock { + // Acquire write lock before restoring transitioned object + lk := z.NewNSLock(bucket, object) + lkctx, err := lk.GetLock(ctx, globalDeleteOperationTimeout) + if err != nil { + return err + } + ctx = lkctx.Context() + defer lk.Unlock(lkctx) + } + // Avoid restoring object from a pool being decommissioned. opts.SkipDecommissioned = true + opts.NoLock = true idx, err := z.getPoolIdxExistingWithOpts(ctx, bucket, object, opts) if err != nil { return err @@ -2594,5 +2992,14 @@ func (z *erasureServerPools) DecomTieredObject(ctx context.Context, bucket, obje return err } + if opts.DataMovement && idx == opts.SrcPoolIdx { + return DataMovementOverwriteErr{ + Bucket: bucket, + Object: object, + VersionID: opts.VersionID, + Err: errDataMovementSrcDstPoolSame, + } + } + return z.serverPools[idx].DecomTieredObject(ctx, bucket, object, fi, opts) } diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index 58503558bc29e..9ceb80856bf57 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -31,16 +31,15 @@ import ( "time" "github.com/dchest/siphash" - "github.com/dustin/go-humanize" "github.com/google/uuid" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio/internal/dsync" - xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/puzpuzpuz/xsync/v3" ) // setsDsyncLockers is encapsulated type for Close() @@ -81,10 +80,6 @@ type erasureSets struct { poolIndex int - // A channel to send the set index to the MRF when - // any disk belonging to that set is connected - setReconnectEvent chan int - // Distribution algorithm of choice. distributionAlgo string deploymentID [16]byte @@ -92,13 +87,15 @@ type erasureSets struct { lastConnectDisksOpTime time.Time } +var staleUploadsCleanupIntervalChangedCh = make(chan struct{}) + func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI { diskMap := make(map[Endpoint]StorageAPI) s.erasureDisksMu.RLock() defer s.erasureDisksMu.RUnlock() - for i := 0; i < s.setCount; i++ { + for i := range s.setCount { for j := 0; j < s.setDriveCount; j++ { disk := s.erasureDisks[i][j] if disk == OfflineDisk { @@ -115,26 +112,19 @@ func (s *erasureSets) getDiskMap() map[Endpoint]StorageAPI { // Initializes a new StorageAPI from the endpoint argument, returns // StorageAPI and also `format` which exists on the disk. -func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, []byte, error) { +func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) { disk, err := newStorageAPI(endpoint, storageOpts{ cleanUp: false, healthCheck: false, }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - format, formatData, err := loadFormatErasureWithData(disk, false) + format, err := loadFormatErasure(disk, false) if err != nil { - if errors.Is(err, errUnformattedDisk) { - info, derr := disk.DiskInfo(context.TODO(), DiskInfoOptions{}) - if derr != nil && info.RootDisk { - disk.Close() - return nil, nil, nil, fmt.Errorf("Drive: %s is a root drive", disk) - } - } disk.Close() - return nil, nil, nil, fmt.Errorf("Drive: %s returned %w", disk, err) // make sure to '%w' to wrap the error + return nil, nil, fmt.Errorf("Drive: %s returned %w", disk, err) // make sure to '%w' to wrap the error } disk.Close() @@ -143,10 +133,10 @@ func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, []byte, e healthCheck: true, }) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return disk, format, formatData, nil + return disk, format, nil } // findDiskIndex - returns the i,j'th position of the input `diskID` against the reference @@ -160,7 +150,7 @@ func findDiskIndexByDiskID(refFormat *formatErasureV3, diskID string) (int, int, if diskID == offlineDiskUUID { return -1, -1, fmt.Errorf("DriveID: %s is offline", diskID) } - for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for i := range len(refFormat.Erasure.Sets) { for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { if refFormat.Erasure.Sets[i][j] == diskID { return i, j, nil @@ -184,7 +174,7 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { return -1, -1, fmt.Errorf("DriveID: %s is offline", format.Erasure.This) } - for i := 0; i < len(refFormat.Erasure.Sets); i++ { + for i := range len(refFormat.Erasure.Sets) { for j := 0; j < len(refFormat.Erasure.Sets[0]); j++ { if refFormat.Erasure.Sets[i][j] == format.Erasure.This { return i, j, nil @@ -195,9 +185,14 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) { return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This) } +// Legacy returns 'true' if distribution algo is CRCMOD +func (s *erasureSets) Legacy() (ok bool) { + return s.distributionAlgo == formatErasureVersionV2DistributionAlgoV1 +} + // connectDisks - attempt to connect all the endpoints, loads format // and re-arranges the disks in proper position. -func (s *erasureSets) connectDisks() { +func (s *erasureSets) connectDisks(log bool) { defer func() { s.lastConnectDisksOpTime = time.Now() }() @@ -227,17 +222,22 @@ func (s *erasureSets) connectDisks() { wg.Add(1) go func(endpoint Endpoint) { defer wg.Done() - disk, format, formatData, err := connectEndpoint(endpoint) + disk, format, err := connectEndpoint(endpoint) if err != nil { if endpoint.IsLocal && errors.Is(err, errUnformattedDisk) { globalBackgroundHealState.pushHealLocalDisks(endpoint) - } else { - printEndpointError(endpoint, err, true) + } else if !errors.Is(err, errDriveIsRoot) { + if log { + printEndpointError(endpoint, err, true) + } } return } - if disk.IsLocal() && disk.Healing() != nil { - globalBackgroundHealState.pushHealLocalDisks(disk.Endpoint()) + if disk.IsLocal() { + h := disk.Healing() + if h != nil && !h.Finished { + globalBackgroundHealState.pushHealLocalDisks(disk.Endpoint()) + } } s.erasureDisksMu.Lock() setIndex, diskIndex, err := findDiskIndex(s.format, format) @@ -261,8 +261,6 @@ func (s *erasureSets) connectDisks() { } disk.SetDiskID(format.Erasure.This) - disk.SetDiskLoc(s.poolIndex, setIndex, diskIndex) - disk.SetFormatData(formatData) s.erasureDisks[setIndex][diskIndex] = disk if disk.IsLocal() { @@ -270,13 +268,7 @@ func (s *erasureSets) connectDisks() { if globalIsDistErasure { globalLocalSetDrives[s.poolIndex][setIndex][diskIndex] = disk } - for i, ldisk := range globalLocalDrives { - _, k, l := ldisk.GetDiskLoc() - if k == setIndex && l == diskIndex { - globalLocalDrives[i] = disk - break - } - } + globalLocalDrivesMap[disk.Endpoint().String()] = disk globalLocalDrivesMu.Unlock() } s.erasureDisksMu.Unlock() @@ -295,7 +287,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt time.Sleep(time.Duration(r.Float64() * float64(time.Second))) // Pre-emptively connect the disks if possible. - s.connectDisks() + s.connectDisks(false) monitor := time.NewTimer(monitorInterval) defer monitor.Stop() @@ -309,7 +301,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt console.Debugln("running drive monitoring") } - s.connectDisks() + s.connectDisks(true) // Reset the timer for next interval monitor.Reset(monitorInterval) @@ -378,7 +370,6 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ setDriveCount: setDriveCount, defaultParityCount: defaultParityCount, format: format, - setReconnectEvent: make(chan int), distributionAlgo: format.Erasure.DistributionAlgo, deploymentID: uuid.MustParse(format.ID), poolIndex: poolIdx, @@ -386,7 +377,7 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ mutex := newNSLock(globalIsDistErasure) - for i := 0; i < setCount; i++ { + for i := range setCount { s.erasureDisks[i] = make([]StorageAPI, setDriveCount) } @@ -399,9 +390,9 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ var wg sync.WaitGroup var lk sync.Mutex - for i := 0; i < setCount; i++ { + for i := range setCount { lockerEpSet := set.NewStringSet() - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { wg.Add(1) go func(i int, endpoint Endpoint) { defer wg.Done() @@ -418,13 +409,13 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ } wg.Wait() - for i := 0; i < setCount; i++ { + for i := range setCount { wg.Add(1) go func(i int) { defer wg.Done() var innerWg sync.WaitGroup - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] if disk == nil { continue @@ -448,27 +439,17 @@ func newErasureSets(ctx context.Context, endpoints PoolEndpoints, storageDisks [ diskID, err := disk.GetDiskID() if err != nil { if !errors.Is(err, errUnformattedDisk) { - logger.LogIf(ctx, err) + bootLogIf(ctx, err) } return } if diskID == "" { return } - m, n, err := findDiskIndexByDiskID(format, diskID) - if err != nil { - logger.LogIf(ctx, err) - return - } - if m != i || n != j { - logger.LogIf(ctx, fmt.Errorf("Detected unexpected drive ordering refusing to use the drive - poolID: %s, found drive mounted at (set=%s, drive=%s) expected mount at (set=%s, drive=%s): %s(%s)", humanize.Ordinal(poolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(i+1), humanize.Ordinal(j+1), disk, diskID)) - s.erasureDisks[i][j] = &unrecognizedDisk{storage: disk} - return - } - disk.SetDiskLoc(s.poolIndex, m, n) - s.erasureDisks[m][n] = disk + s.erasureDisks[i][j] = disk }(disk, i, j) } + innerWg.Wait() // Initialize erasure objects for a given set. @@ -550,46 +531,47 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) { if set == nil { return } - set.cleanupStaleUploads(ctx, globalAPIConfig.getStaleUploadsExpiry()) + set.cleanupStaleUploads(ctx) }(set) } wg.Wait() - - // Reset for the next interval - timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) + case <-staleUploadsCleanupIntervalChangedCh: } + + // Reset for the next interval + timer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) } } type auditObjectOp struct { - Name string `json:"name"` - Pool int `json:"poolId"` - Set int `json:"setId"` - Disks []string `json:"disks"` + Name string `json:"name"` + Pool int `json:"poolId"` + Set int `json:"setId"` +} + +func (op auditObjectOp) String() string { + // Flatten the auditObjectOp + return fmt.Sprintf("name=%s,pool=%d,set=%d", op.Name, op.Pool, op.Set) } // Add erasure set information to the current context -func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjects) { +func auditObjectErasureSet(ctx context.Context, api, object string, set *erasureObjects) { if len(logger.AuditTargets()) == 0 { return } op := auditObjectOp{ - Name: decodeDirObject(object), - Pool: set.poolIndex + 1, - Set: set.setIndex + 1, - Disks: set.getEndpointStrings(), + Name: decodeDirObject(object), + Pool: set.poolIndex + 1, + Set: set.setIndex + 1, } - logger.GetReqInfo(ctx).AppendTags("objectLocation", op) + logger.GetReqInfo(ctx).AppendTags(api, op.String()) } // NewNSLock - initialize a new namespace RWLocker instance. func (s *erasureSets) NewNSLock(bucket string, objects ...string) RWLocker { - if len(objects) == 1 { - return s.getHashedSet(objects[0]).NewNSLock(bucket, objects...) - } - return s.getHashedSet("").NewNSLock(bucket, objects...) + return s.sets[0].NewNSLock(bucket, objects...) } // SetDriveCount returns the current drives per set. @@ -611,7 +593,6 @@ func (s *erasureSets) StorageInfo(ctx context.Context) StorageInfo { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { storageInfos[index] = s.sets[index].StorageInfo(ctx) return nil @@ -636,7 +617,6 @@ func (s *erasureSets) LocalStorageInfo(ctx context.Context, metrics bool) Storag g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { storageInfos[index] = s.sets[index].LocalStorageInfo(ctx, metrics) return nil @@ -659,7 +639,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error { g := errgroup.WithNErrs(len(s.sets)) for index := range s.sets { - index := index g.Go(func() error { return s.sets[index].Shutdown(ctx) }, index) @@ -670,14 +649,6 @@ func (s *erasureSets) Shutdown(ctx context.Context) error { return err } } - select { - case _, ok := <-s.setReconnectEvent: - if ok { - xioutil.SafeClose(s.setReconnectEvent) - } - default: - xioutil.SafeClose(s.setReconnectEvent) - } return nil } @@ -728,11 +699,9 @@ func (s *erasureSets) getHashedSet(input string) (set *erasureObjects) { } // listDeletedBuckets lists deleted buckets from all disks. -func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets map[string]VolInfo, readQuorum int) error { +func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBuckets *xsync.MapOf[string, VolInfo], readQuorum int) error { g := errgroup.WithNErrs(len(storageDisks)) - var mu sync.Mutex for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { // we ignore disk not found errors @@ -749,11 +718,7 @@ func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBucke vi, err := storageDisks[index].StatVol(ctx, pathJoin(minioMetaBucket, bucketMetaPrefix, deletedBucketsPrefix, volName)) if err == nil { vi.Name = strings.TrimSuffix(volName, SlashSeparator) - mu.Lock() - if _, ok := delBuckets[volName]; !ok { - delBuckets[volName] = vi - } - mu.Unlock() + delBuckets.Store(volName, vi) } } return nil @@ -899,11 +864,14 @@ func (s *erasureSets) CopyObject(ctx context.Context, srcBucket, srcObject, dstB } putOpts := ObjectOptions{ - ServerSideEncryption: dstOpts.ServerSideEncryption, - UserDefined: srcInfo.UserDefined, - Versioned: dstOpts.Versioned, - VersionID: dstOpts.VersionID, - MTime: dstOpts.MTime, + ServerSideEncryption: dstOpts.ServerSideEncryption, + UserDefined: srcInfo.UserDefined, + Versioned: dstOpts.Versioned, + VersionID: dstOpts.VersionID, + MTime: dstOpts.MTime, + EncryptFn: dstOpts.EncryptFn, + WantChecksum: dstOpts.WantChecksum, + WantServerSideChecksumType: dstOpts.WantServerSideChecksumType, } return dstSet.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, putOpts) @@ -1083,7 +1051,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H if !reflect.DeepEqual(s.format, refFormat) { // Format is corrupted and unrecognized by the running instance. - logger.LogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w", + healingLogIf(ctx, fmt.Errorf("Unable to heal the newly replaced drives due to format.json inconsistencies, please engage MinIO support for further assistance: %w", errCorruptedFormat)) return res, errCorruptedFormat } @@ -1112,7 +1080,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H continue } if err := saveFormatErasure(storageDisks[index], format, formatOpID); err != nil { - logger.LogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err)) + healingLogIf(ctx, fmt.Errorf("Drive %s failed to write updated 'format.json': %v", storageDisks[index], err)) storageDisks[index].Close() tmpNewFormats[index] = nil // this disk failed to write new format } @@ -1127,7 +1095,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H m, n, err := findDiskIndexByDiskID(refFormat, format.Erasure.This) if err != nil { - logger.LogIf(ctx, err) + healingLogIf(ctx, err) continue } @@ -1137,8 +1105,6 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H if disk := storageDisks[index]; disk != nil { if disk.IsLocal() { - disk.SetDiskLoc(s.poolIndex, m, n) - xldisk, ok := disk.(*xlStorageDiskIDCheck) if ok { _, commonDeletes := calcCommonWritesDeletes(currentDisksInfo[m], (s.setDriveCount+1)/2) @@ -1155,7 +1121,6 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H if err != nil { continue } - disk.SetDiskLoc(s.poolIndex, m, n) } s.erasureDisks[m][n] = disk @@ -1165,13 +1130,7 @@ func (s *erasureSets) HealFormat(ctx context.Context, dryRun bool) (res madmin.H if globalIsDistErasure { globalLocalSetDrives[s.poolIndex][m][n] = disk } - for i, ldisk := range globalLocalDrives { - _, k, l := ldisk.GetDiskLoc() - if k == m && l == n { - globalLocalDrives[i] = disk - break - } - } + globalLocalDrivesMap[disk.Endpoint().String()] = disk globalLocalDrivesMu.Unlock() } } diff --git a/cmd/erasure-sets_test.go b/cmd/erasure-sets_test.go index f8cb16b7d2188..63311d557d02a 100644 --- a/cmd/erasure-sets_test.go +++ b/cmd/erasure-sets_test.go @@ -40,13 +40,12 @@ func BenchmarkCrcHash(b *testing.B) { {1024}, } for _, testCase := range cases { - testCase := testCase key := randString(testCase.key) b.Run("", func(b *testing.B) { b.SetBytes(1024) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { crcHashMod(key, 16) } }) @@ -65,13 +64,12 @@ func BenchmarkSipHash(b *testing.B) { {1024}, } for _, testCase := range cases { - testCase := testCase key := randString(testCase.key) b.Run("", func(b *testing.B) { b.SetBytes(1024) b.ReportAllocs() b.ResetTimer() - for i := 0; i < b.N; i++ { + for b.Loop() { sipHashMod(key, 16, testUUID) } }) @@ -159,12 +157,12 @@ func TestCrcHashMod(t *testing.T) { // TestNewErasure - tests initialization of all input disks // and constructs a valid `Erasure` object func TestNewErasureSets(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 // Maximum disks. var erasureDisks []string - for i := 0; i < nDisks; i++ { + for range nDisks { // Do not attempt to create this path, the test validates // so that newErasureSets initializes non existing paths // and successfully returns initialized object layer. diff --git a/cmd/erasure.go b/cmd/erasure.go index 94ddb2f63ac84..4e6674c35b1c8 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "maps" "math/rand" "os" "runtime" @@ -31,8 +32,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/dsync" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" ) // list all errors that can be ignore in a bucket operation. @@ -91,6 +91,11 @@ func (er erasureObjects) defaultWQuorum() int { return dataCount } +// defaultRQuorum read quorum based on setDriveCount and defaultParityCount +func (er erasureObjects) defaultRQuorum() int { + return er.setDriveCount - er.defaultParityCount +} + func diskErrToDriveState(err error) (state string) { switch { case errors.Is(err, errDiskNotFound) || errors.Is(err, context.DeadlineExceeded): @@ -103,13 +108,15 @@ func diskErrToDriveState(err error) (state string) { state = madmin.DriveStatePermission case errors.Is(err, errFaultyDisk): state = madmin.DriveStateFaulty + case errors.Is(err, errDriveIsRoot): + state = madmin.DriveStateRootMount case err == nil: state = madmin.DriveStateOk default: state = fmt.Sprintf("%s (cause: %s)", madmin.DriveStateUnknown, err) } - return + return state } func getOnlineOfflineDisksStats(disksInfo []madmin.Disk) (onlineDisks, offlineDisks madmin.BackendDisks) { @@ -169,7 +176,6 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks g := errgroup.WithNErrs(len(disks)) for index := range disks { - index := index g.Go(func() error { di := madmin.Disk{ Endpoint: endpoints[index].String(), @@ -197,11 +203,9 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks di.State = diskErrToDriveState(err) di.FreeInodes = info.FreeInodes di.UsedInodes = info.UsedInodes - if info.Healing { - if hi := disks[index].Healing(); hi != nil { - hd := hi.toHealingDisk() - di.HealInfo = &hd - } + if hi := disks[index].Healing(); hi != nil { + hd := hi.toHealingDisk() + di.HealInfo = &hd } di.Metrics = &madmin.DiskMetrics{ LastMinute: make(map[string]madmin.TimedAction, len(info.Metrics.LastMinute)), @@ -215,9 +219,7 @@ func getDisksInfo(disks []StorageAPI, endpoints []Endpoint, metrics bool) (disks di.Metrics.LastMinute[k] = v.asTimedAction() } } - for k, v := range info.Metrics.APICalls { - di.Metrics.APICalls[k] = v - } + maps.Copy(di.Metrics.APICalls, info.Metrics.APICalls) if info.Total > 0 { di.Utilization = float64(info.Used / info.Total * 100) } @@ -273,17 +275,16 @@ func (er erasureObjects) LocalStorageInfo(ctx context.Context, metrics bool) Sto } // getOnlineDisksWithHealingAndInfo - returns online disks and overall healing status. -// Disks are randomly ordered, but in the following groups: +// Disks are ordered in the following groups: // - Non-scanning disks // - Non-healing disks // - Healing disks (if inclHealing is true) -func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (newDisks []StorageAPI, newInfos []DiskInfo, healing bool) { +func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (newDisks []StorageAPI, newInfos []DiskInfo, healing int) { var wg sync.WaitGroup disks := er.getDisks() infos := make([]DiskInfo, len(disks)) r := rand.New(rand.NewSource(time.Now().UnixNano())) for _, i := range r.Perm(len(disks)) { - i := i wg.Add(1) go func() { defer wg.Done() @@ -316,7 +317,7 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new continue } if info.Healing { - healing = true + healing++ if inclHealing { healingDisks = append(healingDisks, disks[i]) healingInfos = append(healingInfos, infos[i]) @@ -344,32 +345,32 @@ func (er erasureObjects) getOnlineDisksWithHealingAndInfo(inclHealing bool) (new return newDisks, newInfos, healing } -func (er erasureObjects) getOnlineDisksWithHealing(inclHealing bool) (newDisks []StorageAPI, healing bool) { - newDisks, _, healing = er.getOnlineDisksWithHealingAndInfo(inclHealing) - return +func (er erasureObjects) getOnlineDisksWithHealing(inclHealing bool) ([]StorageAPI, bool) { + newDisks, _, healing := er.getOnlineDisksWithHealingAndInfo(inclHealing) + return newDisks, healing > 0 } // Clean-up previously deleted objects. from .minio.sys/tmp/.trash/ func (er erasureObjects) cleanupDeletedObjects(ctx context.Context) { - // run multiple cleanup's local to this server. var wg sync.WaitGroup for _, disk := range er.getLocalDisks() { - if disk != nil { - wg.Add(1) - go func(disk StorageAPI) { - defer wg.Done() - diskPath := disk.Endpoint().Path - readDirFn(pathJoin(diskPath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error { - w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) - return w.Run(func() error { - wait := deletedCleanupSleeper.Timer(ctx) - removeAll(pathJoin(diskPath, minioMetaTmpDeletedBucket, ddir)) - wait() - return nil - }) - }) - }(disk) + if disk == nil { + continue } + wg.Add(1) + go func(disk StorageAPI) { + defer wg.Done() + drivePath := disk.Endpoint().Path + readDirFn(pathJoin(drivePath, minioMetaTmpDeletedBucket), func(ddir string, typ os.FileMode) error { + w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) + return w.Run(func() error { + wait := deleteCleanupSleeper.Timer(ctx) + removeAll(pathJoin(drivePath, minioMetaTmpDeletedBucket, ddir)) + wait() + return nil + }) + }) + }(disk) } wg.Wait() } @@ -384,7 +385,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Collect disks we can use. disks, healing := er.getOnlineDisksWithHealing(false) if len(disks) == 0 { - logger.LogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) + scannerLogIf(ctx, errors.New("data-scanner: all drives are offline or being healed, skipping scanner cycle")) return nil } @@ -449,7 +450,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa if cache.Info.LastUpdate.Equal(lastSave) { continue } - logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") + scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-cache-update") updates <- cache.clone() lastSave = cache.Info.LastUpdate @@ -458,7 +459,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Save final state... cache.Info.NextCycle = wantCycle cache.Info.LastUpdate = time.Now() - logger.LogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") + scannerLogOnceIf(ctx, cache.save(ctx, er, dataUsageCacheName), "nsscanner-channel-closed") updates <- cache.clone() return } @@ -494,7 +495,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa // Load cache for bucket cacheName := pathJoin(bucket.Name, dataUsageCacheName) cache := dataUsageCache{} - logger.LogIf(ctx, cache.load(ctx, er, cacheName)) + scannerLogIf(ctx, cache.load(ctx, er, cacheName)) if cache.Info.Name == "" { cache.Info.Name = bucket.Name } @@ -530,9 +531,9 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa cache, err = disk.NSScanner(ctx, cache, updates, healScanMode, nil) if err != nil { if !cache.Info.LastUpdate.IsZero() && cache.Info.LastUpdate.After(before) { - logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + scannerLogIf(ctx, cache.save(ctx, er, cacheName)) } else { - logger.LogIf(ctx, err) + scannerLogIf(ctx, err) } // This ensures that we don't close // bucketResults channel while the @@ -543,6 +544,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa } wg.Wait() + // Flatten for upstream, but save full state. var root dataUsageEntry if r := cache.root(); r != nil { root = cache.flatten(*r) @@ -558,7 +560,7 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa } // Save cache - logger.LogIf(ctx, cache.save(ctx, er, cacheName)) + scannerLogIf(ctx, cache.save(ctx, er, cacheName)) } }(i) } diff --git a/cmd/erasure_test.go b/cmd/erasure_test.go index d9f1f21fb6eba..078771fd19a3d 100644 --- a/cmd/erasure_test.go +++ b/cmd/erasure_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" - "context" "crypto/rand" "io" "testing" @@ -52,11 +51,11 @@ func TestErasureEncodeDecode(t *testing.T) { buffer := make([]byte, len(data), 2*len(data)) copy(buffer, data) - erasure, err := NewErasure(context.Background(), test.dataBlocks, test.parityBlocks, blockSizeV2) + erasure, err := NewErasure(t.Context(), test.dataBlocks, test.parityBlocks, blockSizeV2) if err != nil { t.Fatalf("Test %d: failed to create erasure: %v", i, err) } - encoded, err := erasure.EncodeData(context.Background(), buffer) + encoded, err := erasure.EncodeData(t.Context(), buffer) if err != nil { t.Fatalf("Test %d: failed to encode data: %v", i, err) } @@ -69,7 +68,7 @@ func TestErasureEncodeDecode(t *testing.T) { } if test.reconstructParity { - err = erasure.DecodeDataAndParityBlocks(context.Background(), encoded) + err = erasure.DecodeDataAndParityBlocks(t.Context(), encoded) } else { err = erasure.DecodeDataBlocks(encoded) } @@ -98,7 +97,7 @@ func TestErasureEncodeDecode(t *testing.T) { } decodedData := new(bytes.Buffer) - if _, err = writeDataBlocks(context.Background(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil { + if _, err = writeDataBlocks(t.Context(), decodedData, decoded, test.dataBlocks, 0, int64(len(data))); err != nil { t.Errorf("Test %d: failed to write data blocks: %v", i, err) } if !bytes.Equal(decodedData.Bytes(), data) { @@ -127,7 +126,7 @@ func newErasureTestSetup(tb testing.TB, dataBlocks int, parityBlocks int, blockS if err != nil { return nil, err } - err = disks[i].MakeVol(context.Background(), "testbucket") + err = disks[i].MakeVol(tb.Context(), "testbucket") if err != nil { return nil, err } diff --git a/cmd/etcd.go b/cmd/etcd.go index 028a72780e643..e3cdebb7994cc 100644 --- a/cmd/etcd.go +++ b/cmd/etcd.go @@ -22,7 +22,6 @@ import ( "errors" "fmt" - "github.com/minio/minio/internal/logger" etcd "go.etcd.io/etcd/client/v3" ) @@ -48,7 +47,7 @@ func saveKeyEtcdWithTTL(ctx context.Context, client *etcd.Client, key string, da return etcdErrToErr(err, client.Endpoints()) } _, err = client.Put(timeoutCtx, key, string(data), etcd.WithLease(lease.ID)) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -59,7 +58,7 @@ func saveKeyEtcd(ctx context.Context, client *etcd.Client, key string, data []by return saveKeyEtcdWithTTL(ctx, client, key, data, opts[0].ttl) } _, err := client.Put(timeoutCtx, key, string(data)) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -68,7 +67,7 @@ func deleteKeyEtcd(ctx context.Context, client *etcd.Client, key string) error { defer cancel() _, err := client.Delete(timeoutCtx, key) - logger.LogIf(ctx, err) + etcdLogIf(ctx, err) return etcdErrToErr(err, client.Endpoints()) } @@ -77,7 +76,7 @@ func readKeyEtcd(ctx context.Context, client *etcd.Client, key string) ([]byte, defer cancel() resp, err := client.Get(timeoutCtx, key) if err != nil { - logger.LogOnceIf(ctx, err, "etcd-retrieve-keys") + etcdLogOnceIf(ctx, err, "etcd-retrieve-keys") return nil, etcdErrToErr(err, client.Endpoints()) } if resp.Count == 0 { diff --git a/cmd/event-notification.go b/cmd/event-notification.go index 9f56c4968282d..ceda47ef905a5 100644 --- a/cmd/event-notification.go +++ b/cmd/event-notification.go @@ -28,9 +28,8 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/event" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // EventNotifier - notifies external systems about events in MinIO. @@ -50,23 +49,18 @@ func NewEventNotifier(ctx context.Context) *EventNotifier { } // GetARNList - returns available ARNs. -func (evnot *EventNotifier) GetARNList(onlyActive bool) []string { +func (evnot *EventNotifier) GetARNList() []string { arns := []string{} if evnot == nil { return arns } - region := globalSite.Region - for targetID, target := range evnot.targetList.TargetMap() { + region := globalSite.Region() + for targetID := range evnot.targetList.TargetMap() { // httpclient target is part of ListenNotification // which doesn't need to be listed as part of the ARN list // This list is only meant for external targets, filter // this out pro-actively. if !strings.HasPrefix(targetID.ID, "httpclient+") { - if onlyActive { - if _, err := target.IsActive(); err != nil { - continue - } - } arns = append(arns, targetID.ToARN(region).String()) } } @@ -75,18 +69,19 @@ func (evnot *EventNotifier) GetARNList(onlyActive bool) []string { } // Loads notification policies for all buckets into EventNotifier. -func (evnot *EventNotifier) set(bucket BucketInfo, meta BucketMetadata) { +func (evnot *EventNotifier) set(bucket string, meta BucketMetadata) { config := meta.notificationConfig if config == nil { return } - config.SetRegion(globalSite.Region) - if err := config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil { + region := globalSite.Region() + config.SetRegion(region) + if err := config.Validate(region, globalEventNotifier.targetList); err != nil { if _, ok := err.(*event.ErrARNNotFound); !ok { - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) } } - evnot.AddRulesMap(bucket.Name, config.ToRulesMap()) + evnot.AddRulesMap(bucket, config.ToRulesMap()) } // Targets returns all the registered targets diff --git a/cmd/fmt-gen.go b/cmd/fmt-gen.go new file mode 100644 index 0000000000000..c616c4dc04feb --- /dev/null +++ b/cmd/fmt-gen.go @@ -0,0 +1,119 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "encoding/json" + "log" + "os" + "path/filepath" + + "github.com/klauspost/compress/zip" + "github.com/minio/cli" +) + +var fmtGenFlags = []cli.Flag{ + cli.IntFlag{ + Name: "parity", + Usage: "specify erasure code parity", + }, + cli.StringFlag{ + Name: "deployment-id", + Usage: "deployment-id of the MinIO cluster for which format.json is needed", + }, + cli.StringFlag{ + Name: "address", + Value: ":" + GlobalMinioDefaultPort, + Usage: "bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname", + EnvVar: "MINIO_ADDRESS", + }, +} + +var fmtGenCmd = cli.Command{ + Name: "fmt-gen", + Usage: "Generate format.json files for an erasure server pool", + Flags: append(fmtGenFlags, GlobalFlags...), + Action: fmtGenMain, + Hidden: true, + CustomHelpTemplate: `NAME: + {{.HelpName}} - {{.Usage}} + +USAGE: + {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR1 [DIR2..] + {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} + {{.HelpName}} {{if .VisibleFlags}}[FLAGS] {{end}}DIR{1...64} DIR{65...128} + +DIR: + DIR points to a directory on a filesystem. When you want to combine + multiple drives into a single large system, pass one directory per + filesystem separated by space. You may also use a '...' convention + to abbreviate the directory arguments. Remote directories in a + distributed setup are encoded as HTTP(s) URIs. +{{if .VisibleFlags}} +FLAGS: + {{range .VisibleFlags}}{{.}} + {{end}}{{end}} +EXAMPLES: + 1. Generate format.json.zip containing format.json files for all drives in a distributed MinIO server pool of 32 nodes with 32 drives each. + {{.Prompt}} {{.HelpName}} http://node{1...32}.example.com/mnt/export{1...32} + +`, +} + +func fmtGenMain(ctxt *cli.Context) { + deploymentID := ctxt.String("deployment-id") + err := buildServerCtxt(ctxt, &globalServerCtxt) + if err != nil { + log.Fatalln(err) + } + handleCommonArgs(globalServerCtxt) + pools, _, err := createServerEndpoints(globalMinioAddr, globalServerCtxt.Layout.pools, globalServerCtxt.Layout.legacy) + if err != nil { + log.Fatalln(err) + } + + zipFile, err := os.Create("format.json.zip") + if err != nil { + log.Fatalf("failed to create format.json.zip: %v", err) + } + defer zipFile.Close() + fmtZipW := zip.NewWriter(zipFile) + defer fmtZipW.Close() + for _, pool := range pools { // for each pool + setCount, setDriveCount := pool.SetCount, pool.DrivesPerSet + format := newFormatErasureV3(setCount, setDriveCount) + format.ID = deploymentID + for i := range setCount { // for each erasure set + for j := range setDriveCount { + newFormat := format.Clone() + newFormat.Erasure.This = format.Erasure.Sets[i][j] + if deploymentID != "" { + newFormat.ID = deploymentID + } + drive := pool.Endpoints[i*setDriveCount+j] + fmtBytes, err := json.Marshal(newFormat) + if err != nil { + //nolint:gocritic + log.Fatalf("failed to marshal format.json for %s: %v", drive.String(), err) + } + fmtJSON := filepath.Join(drive.Host, drive.Path, minioMetaBucket, "format.json") + embedFileInZip(fmtZipW, fmtJSON, fmtBytes, 0o600) + } + } + } +} diff --git a/cmd/format-erasure.go b/cmd/format-erasure.go index eae47446184fa..6ff6d9e99c93f 100644 --- a/cmd/format-erasure.go +++ b/cmd/format-erasure.go @@ -24,17 +24,15 @@ import ( "fmt" "io/fs" "os" - "reflect" "sync" "github.com/dustin/go-humanize" - jsoniter "github.com/json-iterator/go" "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/storageclass" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" ) const ( @@ -159,9 +157,9 @@ func newFormatErasureV3(numSets int, setLen int) *formatErasureV3 { format.Erasure.DistributionAlgo = formatErasureVersionV3DistributionAlgoV3 format.Erasure.Sets = make([][]string, numSets) - for i := 0; i < numSets; i++ { + for i := range numSets { format.Erasure.Sets[i] = make([]string, setLen) - for j := 0; j < setLen; j++ { + for j := range setLen { format.Erasure.Sets[i][j] = mustGetUUID() } } @@ -179,7 +177,7 @@ func formatGetBackendErasureVersion(b []byte) (string, error) { return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, meta.Version) } if meta.Format != formatBackendErasure && meta.Format != formatBackendErasureSingle { - return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://min.io/docs/minio/linux/operations/install-deploy-manage/migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle) + return "", fmt.Errorf(`found backend type %s, expected %s or %s - to migrate to a supported backend visit https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-migrate-fs-gateway.html`, meta.Format, formatBackendErasure, formatBackendErasureSingle) } // Erasure backend found, proceed to detect version. format := &formatErasureVersionDetect{} @@ -278,7 +276,7 @@ func formatErasureMigrateV2ToV3(data []byte, export, version string) ([]byte, er tmpOld := pathJoin(export, minioMetaTmpDeletedBucket, mustGetUUID()) if err := renameAll(pathJoin(export, minioMetaMultipartBucket), tmpOld, export); err != nil && err != errFileNotFound { - logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", + bootLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", pathJoin(export, minioMetaMultipartBucket), tmpOld, osErrToFileErr(err))) @@ -326,12 +324,11 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur // Load format from each disk in parallel for index := range storageDisks { - index := index g.Go(func() error { if storageDisks[index] == nil { return errDiskNotFound } - format, formatData, err := loadFormatErasureWithData(storageDisks[index], heal) + format, err := loadFormatErasure(storageDisks[index], heal) if err != nil { return err } @@ -340,7 +337,6 @@ func loadFormatErasureAll(storageDisks []StorageAPI, heal bool) ([]*formatErasur // If no healing required, make the disks valid and // online. storageDisks[index].SetDiskID(format.Erasure.This) - storageDisks[index].SetFormatData(formatData) } return nil }, index) @@ -380,7 +376,6 @@ func saveFormatErasure(disk StorageAPI, format *formatErasureV3, healID string) } disk.SetDiskID(format.Erasure.This) - disk.SetFormatData(formatData) if healID != "" { ctx := context.Background() ht := initHealingTracker(disk, healID) @@ -389,56 +384,32 @@ func saveFormatErasure(disk StorageAPI, format *formatErasureV3, healID string) return nil } -// loadFormatErasureWithData - loads format.json from disk. -func loadFormatErasureWithData(disk StorageAPI, heal bool) (format *formatErasureV3, data []byte, err error) { - data, err = disk.ReadAll(context.TODO(), minioMetaBucket, formatConfigFile) +// loadFormatErasure - loads format.json from disk. +func loadFormatErasure(disk StorageAPI, heal bool) (format *formatErasureV3, err error) { + data, err := disk.ReadAll(context.TODO(), minioMetaBucket, formatConfigFile) if err != nil { // 'file not found' and 'volume not found' as // same. 'volume not found' usually means its a fresh disk. if errors.Is(err, errFileNotFound) || errors.Is(err, errVolumeNotFound) { - return nil, nil, errUnformattedDisk + return nil, errUnformattedDisk } - return nil, nil, err + return nil, err } // Try to decode format json into formatConfigV1 struct. format = &formatErasureV3{} if err = json.Unmarshal(data, format); err != nil { - return nil, nil, err + return nil, err } if heal { info, err := disk.DiskInfo(context.Background(), DiskInfoOptions{NoOp: heal}) if err != nil { - return nil, nil, err + return nil, err } format.Info = info } - // Success. - return format, data, nil -} - -// loadFormatErasure - loads format.json from disk. -func loadFormatErasure(disk StorageAPI) (format *formatErasureV3, err error) { - buf, err := disk.ReadAll(context.TODO(), minioMetaBucket, formatConfigFile) - if err != nil { - // 'file not found' and 'volume not found' as - // same. 'volume not found' usually means its a fresh disk. - if err == errFileNotFound || err == errVolumeNotFound { - return nil, errUnformattedDisk - } - return nil, err - } - - json := jsoniter.ConfigCompatibleWithStandardLibrary - - // Try to decode format json into formatConfigV1 struct. - format = &formatErasureV3{} - if err = json.Unmarshal(buf, format); err != nil { - return nil, err - } - // Success. return format, nil } @@ -471,115 +442,8 @@ func checkFormatErasureValues(formats []*formatErasureV3, disks []StorageAPI, se return fmt.Errorf("%s drive is already being used in another erasure deployment. (Number of drives specified: %d but the number of drives found in the %s drive's format.json: %d)", disks[i], len(formats), humanize.Ordinal(i+1), len(formatErasure.Erasure.Sets)*len(formatErasure.Erasure.Sets[0])) } - // Only if custom erasure drive count is set, verify if the - // set_drive_count was manually set - we need to honor what is - // present on the drives. - if globalCustomErasureDriveCount && len(formatErasure.Erasure.Sets[0]) != setDriveCount { - return fmt.Errorf("%s drive is already formatted with %d drives per erasure set. This cannot be changed to %d, please revert your MINIO_ERASURE_SET_DRIVE_COUNT setting", disks[i], len(formatErasure.Erasure.Sets[0]), setDriveCount) - } - } - return nil -} - -// Get Deployment ID for the Erasure sets from format.json. -// This need not be in quorum. Even if one of the format.json -// file has this value, we assume it is valid. -// If more than one format.json's have different id, it is considered a corrupt -// backend format. -func formatErasureGetDeploymentID(refFormat *formatErasureV3, formats []*formatErasureV3) (string, error) { - var deploymentID string - for _, format := range formats { - if format == nil || format.ID == "" { - continue - } - if reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { - // Found an ID in one of the format.json file - // Set deploymentID for the first time. - if deploymentID == "" { - deploymentID = format.ID - } else if deploymentID != format.ID { - // DeploymentID found earlier doesn't match with the - // current format.json's ID. - return "", fmt.Errorf("Deployment IDs do not match expected %s, got %s: %w", - deploymentID, format.ID, errCorruptedFormat) - } - } - } - return deploymentID, nil -} - -// formatErasureFixDeploymentID - Add deployment id if it is not present. -func formatErasureFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3, formats []*formatErasureV3) (err error) { - for index := range formats { - // If the Erasure sets do not match, set those formats to nil, - // We do not have to update the ID on those format.json file. - if formats[index] != nil && !reflect.DeepEqual(formats[index].Erasure.Sets, refFormat.Erasure.Sets) { - formats[index] = nil - } - } - - refFormat.ID, err = formatErasureGetDeploymentID(refFormat, formats) - if err != nil { - return err - } - - // If ID is set, then some other node got the lock - // before this node could and generated an ID - // for the deployment. No need to generate one. - if refFormat.ID != "" { - return nil - } - - // ID is generated for the first time, - // We set the ID in all the formats and update. - refFormat.ID = mustGetUUID() - for _, format := range formats { - if format != nil { - format.ID = refFormat.ID - } - } - // Deployment ID needs to be set on all the disks. - // Save `format.json` across all disks. - return saveFormatErasureAll(GlobalContext, storageDisks, formats) -} - -// Update only the valid local disks which have not been updated before. -func formatErasureFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatErasureV3) error { - // If this server was down when the deploymentID was updated - // then we make sure that we update the local disks with the deploymentID. - - // Initialize errs to collect errors inside go-routine. - g := errgroup.WithNErrs(len(storageDisks)) - - for index := range storageDisks { - index := index - g.Go(func() error { - if endpoints[index].IsLocal && storageDisks[index] != nil && storageDisks[index].IsOnline() { - format, err := loadFormatErasure(storageDisks[index]) - if err != nil { - // Disk can be offline etc. - // ignore the errors seen here. - return nil - } - if format.ID != "" { - return nil - } - if !reflect.DeepEqual(format.Erasure.Sets, refFormat.Erasure.Sets) { - return nil - } - format.ID = refFormat.ID - // Heal the drive if we fixed its deployment ID. - if err := saveFormatErasure(storageDisks[index], format, mustGetUUID()); err != nil { - logger.LogIf(GlobalContext, err) - return fmt.Errorf("Unable to save format.json, %w", err) - } - } - return nil - }, index) - } - for _, err := range g.Wait() { - if err != nil { - return err + if len(formatErasure.Erasure.Sets[0]) != setDriveCount { + return fmt.Errorf("%s drive is already formatted with %d drives per erasure set. This cannot be changed to %d", disks[i], len(formatErasure.Erasure.Sets[0]), setDriveCount) } } return nil @@ -649,7 +513,7 @@ func formatErasureV3Check(reference *formatErasureV3, format *formatErasureV3) e } // Make sure that the diskID is found in the set. - for i := 0; i < len(tmpFormat.Erasure.Sets); i++ { + for i := range len(tmpFormat.Erasure.Sets) { for j := 0; j < len(tmpFormat.Erasure.Sets[i]); j++ { if this == tmpFormat.Erasure.Sets[i][j] { return nil @@ -665,7 +529,6 @@ func saveFormatErasureAll(ctx context.Context, storageDisks []StorageAPI, format // Write `format.json` to all disks. for index := range storageDisks { - index := index g.Go(func() error { if formats[index] == nil { return errDiskNotFound @@ -701,7 +564,6 @@ func initStorageDisksWithErrors(endpoints Endpoints, opts storageOpts) ([]Storag storageDisks := make([]StorageAPI, len(endpoints)) g := errgroup.WithNErrs(len(endpoints)) for index := range endpoints { - index := index g.Go(func() (err error) { storageDisks[index], err = newStorageAPI(endpoints[index], opts) return err @@ -735,7 +597,6 @@ func formatErasureV3ThisEmpty(formats []*formatErasureV3) bool { func fixFormatErasureV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatErasureV3) error { g := errgroup.WithNErrs(len(formats)) for i := range formats { - i := i g.Go(func() error { if formats[i] == nil || !endpoints[i].IsLocal { return nil @@ -774,9 +635,9 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, return nil, err } - for i := 0; i < setCount; i++ { + for i := range setCount { hostCount := make(map[string]int, setDriveCount) - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] @@ -797,7 +658,7 @@ func initFormatErasure(ctx context.Context, storageDisks []StorageAPI, setCount, return } logger.Info(" * Set %v:", i+1) - for j := 0; j < setDriveCount; j++ { + for j := range setDriveCount { disk := storageDisks[i*setDriveCount+j] logger.Info(" - Drive: %s", disk.String()) } diff --git a/cmd/format-erasure_test.go b/cmd/format-erasure_test.go index 4a4dec8d08006..732f6728e01e6 100644 --- a/cmd/format-erasure_test.go +++ b/cmd/format-erasure_test.go @@ -21,7 +21,6 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" - "errors" "os" "reflect" "testing" @@ -49,7 +48,7 @@ func TestFixFormatV3(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 8) - for j := 0; j < 8; j++ { + for j := range 8 { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat @@ -80,7 +79,7 @@ func TestFormatErasureEmpty(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 16) - for j := 0; j < 16; j++ { + for j := range 16 { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[0][j] formats[j] = newFormat @@ -277,8 +276,8 @@ func TestGetFormatErasureInQuorumCheck(t *testing.T) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 32) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat @@ -391,18 +390,17 @@ func BenchmarkGetFormatErasureInQuorumOld(b *testing.B) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 15*200) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat } } - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = getFormatErasureInQuorumOld(formats) } } @@ -415,78 +413,21 @@ func BenchmarkGetFormatErasureInQuorum(b *testing.B) { format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 formats := make([]*formatErasureV3, 15*200) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat } } - b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, _ = getFormatErasureInQuorum(formats) } } -// Tests formatErasureGetDeploymentID() -func TestGetErasureID(t *testing.T) { - setCount := 2 - setDriveCount := 8 - - format := newFormatErasureV3(setCount, setDriveCount) - format.Erasure.DistributionAlgo = formatErasureVersionV2DistributionAlgoV1 - formats := make([]*formatErasureV3, 16) - - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { - newFormat := format.Clone() - newFormat.Erasure.This = format.Erasure.Sets[i][j] - formats[i*setDriveCount+j] = newFormat - } - } - - // Return a format from list of formats in quorum. - quorumFormat, err := getFormatErasureInQuorum(formats) - if err != nil { - t.Fatal(err) - } - - // Check if the reference format and input formats are same. - var id string - if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { - t.Fatal(err) - } - - if id == "" { - t.Fatal("ID cannot be empty.") - } - - formats[0] = nil - if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { - t.Fatal(err) - } - if id == "" { - t.Fatal("ID cannot be empty.") - } - - formats[1].Erasure.Sets[0][0] = "bad-uuid" - if id, err = formatErasureGetDeploymentID(quorumFormat, formats); err != nil { - t.Fatal(err) - } - - if id == "" { - t.Fatal("ID cannot be empty.") - } - - formats[2].ID = "bad-id" - if _, err = formatErasureGetDeploymentID(quorumFormat, formats); !errors.Is(err, errCorruptedFormat) { - t.Fatalf("Unexpected error %s", err) - } -} - // Initialize new format sets. func TestNewFormatSets(t *testing.T) { setCount := 2 @@ -497,8 +438,8 @@ func TestNewFormatSets(t *testing.T) { formats := make([]*formatErasureV3, 32) errs := make([]error, 32) - for i := 0; i < setCount; i++ { - for j := 0; j < setDriveCount; j++ { + for i := range setCount { + for j := range setDriveCount { newFormat := format.Clone() newFormat.Erasure.This = format.Erasure.Sets[i][j] formats[i*setDriveCount+j] = newFormat diff --git a/cmd/ftp-server-driver.go b/cmd/ftp-server-driver.go index 2b55357db3def..7f21eeb8da33d 100644 --- a/cmd/ftp-server-driver.go +++ b/cmd/ftp-server-driver.go @@ -24,7 +24,10 @@ import ( "errors" "fmt" "io" + "net" + "net/http" "os" + "path" "strings" "time" @@ -33,7 +36,7 @@ import ( "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio/internal/auth" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/v3/mimedb" ftp "goftp.io/server/v2" ) @@ -82,15 +85,20 @@ func (m *minioFileInfo) Mode() os.FileMode { return os.ModePerm } +var minFileDate = time.Date(1980, 1, 1, 0, 0, 0, 0, time.UTC) // Workaround for Filezilla + func (m *minioFileInfo) ModTime() time.Time { - return m.info.LastModified + if !m.info.LastModified.IsZero() { + return m.info.LastModified + } + return minFileDate } func (m *minioFileInfo) IsDir() bool { return m.isDir } -func (m *minioFileInfo) Sys() interface{} { +func (m *minioFileInfo) Sys() any { return nil } @@ -99,7 +107,7 @@ type ftpMetrics struct{} var globalFtpMetrics ftpMetrics -func ftpTrace(s *ftp.Context, startTime time.Time, source, path string, err error) madmin.TraceInfo { +func ftpTrace(s *ftp.Context, startTime time.Time, source, objPath string, err error, sz int64) madmin.TraceInfo { var errStr string if err != nil { errStr = err.Error() @@ -108,34 +116,42 @@ func ftpTrace(s *ftp.Context, startTime time.Time, source, path string, err erro TraceType: madmin.TraceFTP, Time: startTime, NodeName: globalLocalNodeName, - FuncName: fmt.Sprintf("ftp USER=%s COMMAND=%s PARAM=%s ISLOGIN=%t, Source=%s", s.Sess.LoginUser(), s.Cmd, s.Param, s.Sess.IsLogin(), source), + FuncName: s.Cmd, Duration: time.Since(startTime), - Path: path, + Path: objPath, Error: errStr, + Bytes: sz, + Custom: map[string]string{ + "user": s.Sess.LoginUser(), + "cmd": s.Cmd, + "param": s.Param, + "login": fmt.Sprintf("%t", s.Sess.IsLogin()), + "source": source, + }, } } -func (m *ftpMetrics) log(s *ftp.Context, paths ...string) func(err error) { +func (m *ftpMetrics) log(s *ftp.Context, paths ...string) func(sz int64, err error) { startTime := time.Now() source := getSource(2) - return func(err error) { - globalTrace.Publish(ftpTrace(s, startTime, source, strings.Join(paths, " "), err)) + return func(sz int64, err error) { + globalTrace.Publish(ftpTrace(s, startTime, source, strings.Join(paths, " "), err, sz)) } } // Stat implements ftpDriver -func (driver *ftpDriver) Stat(ctx *ftp.Context, path string) (fi os.FileInfo, err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) Stat(ctx *ftp.Context, objPath string) (fi os.FileInfo, err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(0, err) - if path == SlashSeparator { + if objPath == SlashSeparator { return &minioFileInfo{ p: SlashSeparator, isDir: true, }, nil } - bucket, object := path2BucketObject(path) + bucket, object := path2BucketObject(objPath) if bucket == "" { return nil, errors.New("bucket name cannot be empty") } @@ -182,9 +198,9 @@ func (driver *ftpDriver) Stat(ctx *ftp.Context, path string) (fi os.FileInfo, er } // ListDir implements ftpDriver -func (driver *ftpDriver) ListDir(ctx *ftp.Context, path string, callback func(os.FileInfo) error) (err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) ListDir(ctx *ftp.Context, objPath string, callback func(os.FileInfo) error) (err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(0, err) clnt, err := driver.getMinIOClient(ctx) if err != nil { @@ -194,7 +210,7 @@ func (driver *ftpDriver) ListDir(ctx *ftp.Context, path string, callback func(os cctx, cancel := context.WithCancel(context.Background()) defer cancel() - bucket, prefix := path2BucketObject(path) + bucket, prefix := path2BucketObject(objPath) if bucket == "" { buckets, err := clnt.ListBuckets(cctx) if err != nil { @@ -246,7 +262,7 @@ func (driver *ftpDriver) ListDir(ctx *ftp.Context, path string, callback func(os func (driver *ftpDriver) CheckPasswd(c *ftp.Context, username, password string) (ok bool, err error) { stopFn := globalFtpMetrics.log(c, username) - defer stopFn(err) + defer stopFn(0, err) if globalIAMSys.LDAPConfig.Enabled() { sa, _, err := globalIAMSys.getServiceAccount(context.Background(), username) @@ -254,11 +270,11 @@ func (driver *ftpDriver) CheckPasswd(c *ftp.Context, username, password string) return false, err } if errors.Is(err, errNoSuchServiceAccount) { - ldapUserDN, groupDistNames, err := globalIAMSys.LDAPConfig.Bind(username, password) + lookupRes, groupDistNames, err := globalIAMSys.LDAPConfig.Bind(username, password) if err != nil { return false, err } - ldapPolicies, _ := globalIAMSys.PolicyDBGet(ldapUserDN, groupDistNames...) + ldapPolicies, _ := globalIAMSys.PolicyDBGet(lookupRes.NormDN, groupDistNames...) return len(ldapPolicies) > 0, nil } return subtle.ConstantTimeCompare([]byte(sa.Credentials.SecretKey), []byte(password)) == 1, nil @@ -272,6 +288,10 @@ func (driver *ftpDriver) CheckPasswd(c *ftp.Context, username, password string) } func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) { + tr := http.RoundTripper(globalRemoteFTPClientTransport) + if host, _, err := net.SplitHostPort(ctx.Sess.RemoteAddr().String()); err == nil { + tr = forwardForTransport{tr: tr, fwd: host} + } ui, ok := globalIAMSys.GetUser(context.Background(), ctx.Sess.LoginUser()) if !ok && !globalIAMSys.LDAPConfig.Enabled() { return nil, errNoSuchUser @@ -284,11 +304,11 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) var mcreds *credentials.Credentials if errors.Is(err, errNoSuchServiceAccount) { - targetUser, targetGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(ctx.Sess.LoginUser()) + lookupResult, targetGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(ctx.Sess.LoginUser()) if err != nil { return nil, err } - ldapPolicies, _ := globalIAMSys.PolicyDBGet(targetUser, targetGroups...) + ldapPolicies, _ := globalIAMSys.PolicyDBGet(lookupResult.NormDN, targetGroups...) if len(ldapPolicies) == 0 { return nil, errAuthentication } @@ -296,11 +316,18 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) if err != nil { return nil, err } - claims := make(map[string]interface{}) + claims := make(map[string]any) claims[expClaim] = UTCNow().Add(expiryDur).Unix() - claims[ldapUser] = targetUser + + claims[ldapUser] = lookupResult.NormDN + claims[ldapActualUser] = lookupResult.ActualDN claims[ldapUserN] = ctx.Sess.LoginUser() + // Add LDAP attributes that were looked up into the claims. + for attribKey, attribValue := range lookupResult.Attributes { + claims[ldapAttribPrefix+attribKey] = attribValue + } + cred, err := auth.GetNewCredentialsWithMetadata(claims, globalActiveCred.SecretKey) if err != nil { return nil, err @@ -308,7 +335,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) // Set the parent of the temporary access key, this is useful // in obtaining service accounts by this cred. - cred.ParentUser = targetUser + cred.ParentUser = lookupResult.NormDN // Set this value to LDAP groups, LDAP user can be part // of large number of groups @@ -323,7 +350,7 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) } // Call hook for site replication. - logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ + replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -340,9 +367,10 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) } return minio.New(driver.endpoint, &minio.Options{ - Creds: mcreds, - Secure: globalIsTLS, - Transport: globalRemoteFTPClientTransport, + Creds: mcreds, + Secure: globalIsTLS, + Transport: tr, + TrailingHeaders: true, }) } @@ -354,18 +382,19 @@ func (driver *ftpDriver) getMinIOClient(ctx *ftp.Context) (*minio.Client, error) } return minio.New(driver.endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""), - Secure: globalIsTLS, - Transport: globalRemoteFTPClientTransport, + Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""), + Secure: globalIsTLS, + Transport: tr, + TrailingHeaders: true, }) } // DeleteDir implements ftpDriver -func (driver *ftpDriver) DeleteDir(ctx *ftp.Context, path string) (err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) DeleteDir(ctx *ftp.Context, objPath string) (err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(0, err) - bucket, prefix := path2BucketObject(path) + bucket, prefix := path2BucketObject(objPath) if bucket == "" { return errors.New("deleting all buckets not allowed") } @@ -411,11 +440,11 @@ func (driver *ftpDriver) DeleteDir(ctx *ftp.Context, path string) (err error) { } // DeleteFile implements ftpDriver -func (driver *ftpDriver) DeleteFile(ctx *ftp.Context, path string) (err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) DeleteFile(ctx *ftp.Context, objPath string) (err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(0, err) - bucket, object := path2BucketObject(path) + bucket, object := path2BucketObject(objPath) if bucket == "" { return errors.New("bucket name cannot be empty") } @@ -429,19 +458,19 @@ func (driver *ftpDriver) DeleteFile(ctx *ftp.Context, path string) (err error) { } // Rename implements ftpDriver -func (driver *ftpDriver) Rename(ctx *ftp.Context, fromPath string, toPath string) (err error) { - stopFn := globalFtpMetrics.log(ctx, fromPath, toPath) - defer stopFn(err) +func (driver *ftpDriver) Rename(ctx *ftp.Context, fromObjPath string, toObjPath string) (err error) { + stopFn := globalFtpMetrics.log(ctx, fromObjPath, toObjPath) + defer stopFn(0, err) return NotImplemented{} } // MakeDir implements ftpDriver -func (driver *ftpDriver) MakeDir(ctx *ftp.Context, path string) (err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) MakeDir(ctx *ftp.Context, objPath string) (err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(0, err) - bucket, prefix := path2BucketObject(path) + bucket, prefix := path2BucketObject(objPath) if bucket == "" { return errors.New("bucket name cannot be empty") } @@ -452,26 +481,23 @@ func (driver *ftpDriver) MakeDir(ctx *ftp.Context, path string) (err error) { } if prefix == "" { - return clnt.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: globalSite.Region}) + return clnt.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: globalSite.Region()}) } dirPath := buildMinioDir(prefix) - _, err = clnt.PutObject(context.Background(), bucket, dirPath, bytes.NewReader([]byte("")), 0, - // Always send Content-MD5 to succeed with bucket with - // locking enabled. There is no performance hit since - // this is always an empty object - minio.PutObjectOptions{SendContentMd5: true}, - ) + _, err = clnt.PutObject(context.Background(), bucket, dirPath, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{ + DisableContentSha256: true, + }) return err } // GetFile implements ftpDriver -func (driver *ftpDriver) GetFile(ctx *ftp.Context, path string, offset int64) (n int64, rc io.ReadCloser, err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) GetFile(ctx *ftp.Context, objPath string, offset int64) (n int64, rc io.ReadCloser, err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(n, err) - bucket, object := path2BucketObject(path) + bucket, object := path2BucketObject(objPath) if bucket == "" { return 0, nil, errors.New("bucket name cannot be empty") } @@ -501,16 +527,16 @@ func (driver *ftpDriver) GetFile(ctx *ftp.Context, path string, offset int64) (n if err != nil { return 0, nil, err } - - return info.Size - offset, obj, nil + n = info.Size - offset + return n, obj, nil } // PutFile implements ftpDriver -func (driver *ftpDriver) PutFile(ctx *ftp.Context, path string, data io.Reader, offset int64) (n int64, err error) { - stopFn := globalFtpMetrics.log(ctx, path) - defer stopFn(err) +func (driver *ftpDriver) PutFile(ctx *ftp.Context, objPath string, data io.Reader, offset int64) (n int64, err error) { + stopFn := globalFtpMetrics.log(ctx, objPath) + defer stopFn(n, err) - bucket, object := path2BucketObject(path) + bucket, object := path2BucketObject(objPath) if bucket == "" { return 0, errors.New("bucket name cannot be empty") } @@ -526,8 +552,10 @@ func (driver *ftpDriver) PutFile(ctx *ftp.Context, path string, data io.Reader, } info, err := clnt.PutObject(context.Background(), bucket, object, data, -1, minio.PutObjectOptions{ - ContentType: "application/octet-stream", - SendContentMd5: true, + ContentType: mimedb.TypeByExtension(path.Ext(object)), + DisableContentSha256: true, + Checksum: minio.ChecksumFullObjectCRC32C, }) - return info.Size, err + n = info.Size + return n, err } diff --git a/cmd/ftp-server.go b/cmd/ftp-server.go index 179ce64c4c560..562336e33ada6 100644 --- a/cmd/ftp-server.go +++ b/cmd/ftp-server.go @@ -33,14 +33,14 @@ var globalRemoteFTPClientTransport = NewRemoteTargetHTTPTransport(true)() type minioLogger struct{} // Print implement Logger -func (log *minioLogger) Print(sessionID string, message interface{}) { +func (log *minioLogger) Print(sessionID string, message any) { if serverDebugLog { fmt.Printf("%s %s\n", sessionID, message) } } // Printf implement Logger -func (log *minioLogger) Printf(sessionID string, format string, v ...interface{}) { +func (log *minioLogger) Printf(sessionID string, format string, v ...any) { if serverDebugLog { if sessionID != "" { fmt.Printf("%s %s\n", sessionID, fmt.Sprintf(format, v...)) @@ -75,6 +75,7 @@ func startFTPServer(args []string) { portRange string tlsPrivateKey string tlsPublicCert string + forceTLS bool ) var err error @@ -103,6 +104,11 @@ func startFTPServer(args []string) { tlsPrivateKey = tokens[1] case "tls-public-cert": tlsPublicCert = tokens[1] + case "force-tls": + forceTLS, err = strconv.ParseBool(tokens[1]) + if err != nil { + logger.Fatal(fmt.Errorf("invalid arguments passed to --ftp=%s (%v)", arg, err), "unable to start FTP server") + } } } @@ -129,6 +135,10 @@ func startFTPServer(args []string) { tls := tlsPrivateKey != "" && tlsPublicCert != "" + if forceTLS && !tls { + logger.Fatal(fmt.Errorf("invalid TLS arguments provided. force-tls, but missing private key --ftp=\"tls-private-key=path/to/private.key\""), "unable to start FTP server") + } + name := "MinIO FTP Server" if tls { name = "MinIO FTP(Secure) Server" @@ -147,6 +157,7 @@ func startFTPServer(args []string) { Logger: &minioLogger{}, PassivePorts: portRange, PublicIP: publicIP, + ForceTLS: forceTLS, }) if err != nil { logger.Fatal(err, "unable to initialize FTP server") diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index fad099a0b68e5..88a111668df83 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -22,17 +22,18 @@ import ( "net" "net/http" "path" - "path/filepath" "runtime/debug" + "slices" "strings" "sync/atomic" "time" + "unicode" "github.com/dustin/go-humanize" "github.com/minio/minio-go/v7/pkg/s3utils" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/grid" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/minio/minio/internal/amztime" "github.com/minio/minio/internal/config/dns" @@ -73,6 +74,9 @@ const ( // and must not set by clients func containsReservedMetadata(header http.Header) bool { for key := range header { + if _, ok := validSSEReplicationHeaders[key]; ok { + return false + } if stringsHasPrefixFold(key, ReservedMetadataPrefix) { return true } @@ -233,7 +237,8 @@ func guessIsMetricsReq(req *http.Request) bool { req.URL.Path == minioReservedBucketPath+prometheusMetricsV2ClusterPath || req.URL.Path == minioReservedBucketPath+prometheusMetricsV2NodePath || req.URL.Path == minioReservedBucketPath+prometheusMetricsV2BucketPath || - req.URL.Path == minioReservedBucketPath+prometheusMetricsV2ResourcePath + req.URL.Path == minioReservedBucketPath+prometheusMetricsV2ResourcePath || + strings.HasPrefix(req.URL.Path, minioReservedBucketPath+metricsV3Path) } // guessIsRPCReq - returns true if the request is for an RPC endpoint. @@ -241,11 +246,14 @@ func guessIsRPCReq(req *http.Request) bool { if req == nil { return false } - if req.Method == http.MethodGet && req.URL != nil && req.URL.Path == grid.RoutePath { - return true + if req.Method == http.MethodGet && req.URL != nil { + switch req.URL.Path { + case grid.RoutePath, grid.RouteLockPath: + return true + } } - return req.Method == http.MethodPost && + return (req.Method == http.MethodPost || req.Method == http.MethodGet) && strings.HasPrefix(req.URL.Path, minioReservedBucketPath+SlashSeparator) } @@ -286,12 +294,6 @@ func parseAmzDateHeader(req *http.Request) (time.Time, APIErrorCode) { return time.Time{}, ErrMissingDateHeader } -// Bad path components to be rejected by the path validity handler. -const ( - dotdotComponent = ".." - dotComponent = "." -) - func hasBadHost(host string) error { if globalIsCICD && strings.TrimSpace(host) == "" { // under CI/CD test setups ignore empty hosts as invalid hosts @@ -304,14 +306,41 @@ func hasBadHost(host string) error { // Check if the incoming path has bad path components, // such as ".." and "." func hasBadPathComponent(path string) bool { - path = filepath.ToSlash(strings.TrimSpace(path)) // For windows '\' must be converted to '/' - for _, p := range strings.Split(path, SlashSeparator) { - switch strings.TrimSpace(p) { - case dotdotComponent: + n := len(path) + if n > 32<<10 { + // At 32K we are beyond reasonable. + return true + } + i := 0 + // Skip leading slashes (for sake of Windows \ is included as well) + for i < n && (path[i] == SlashSeparatorChar || path[i] == '\\') { + i++ + } + + for i < n { + // Find the next segment + start := i + for i < n && path[i] != SlashSeparatorChar && path[i] != '\\' { + i++ + } + + // Trim whitespace of segment + segmentStart, segmentEnd := start, i + for segmentStart < segmentEnd && unicode.IsSpace(rune(path[segmentStart])) { + segmentStart++ + } + for segmentEnd > segmentStart && unicode.IsSpace(rune(path[segmentEnd-1])) { + segmentEnd-- + } + + // Check for ".." or "." + switch { + case segmentEnd-segmentStart == 2 && path[segmentStart] == '.' && path[segmentStart+1] == '.': return true - case dotComponent: + case segmentEnd-segmentStart == 1 && path[segmentStart] == '.': return true } + i++ } return false } @@ -368,18 +397,16 @@ func setRequestValidityMiddleware(h http.Handler) http.Handler { if k == "delimiter" { // delimiters are allowed to have `.` or `..` continue } - for _, v := range vv { - if hasBadPathComponent(v) { - if ok { - tc.FuncName = "handler.ValidRequest" - tc.ResponseRecorder.LogErrBody = true - } - - defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) - writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL) - atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1) - return + if slices.ContainsFunc(vv, hasBadPathComponent) { + if ok { + tc.FuncName = "handler.ValidRequest" + tc.ResponseRecorder.LogErrBody = true } + + defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) + writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL) + atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1) + return } } if hasMultipleAuth(r) { @@ -456,7 +483,7 @@ func setBucketForwardingMiddleware(h http.Handler) http.Handler { } if globalDNSConfig == nil || !globalBucketFederation || guessIsHealthCheckReq(r) || guessIsMetricsReq(r) || - guessIsRPCReq(r) || guessIsLoginSTSReq(r) || isAdminReq(r) { + guessIsRPCReq(r) || guessIsLoginSTSReq(r) || isAdminReq(r) || isKMSReq(r) { h.ServeHTTP(w, r) return } @@ -588,13 +615,6 @@ func setUploadForwardingMiddleware(h http.Handler) http.Handler { h.ServeHTTP(w, r) return } - // forward request to peer handling this upload - if globalBucketTargetSys.isOffline(remote.EndpointURL) { - defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) - writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrReplicationRemoteConnectionError), r.URL) - return - } - r.URL.Scheme = remote.EndpointURL.Scheme r.URL.Host = remote.EndpointURL.Host // Make sure we remove any existing headers before diff --git a/cmd/generic-handlers_test.go b/cmd/generic-handlers_test.go index b76ec29105819..c2d71ae9d5fe0 100644 --- a/cmd/generic-handlers_test.go +++ b/cmd/generic-handlers_test.go @@ -22,6 +22,7 @@ import ( "net/http/httptest" "net/url" "strconv" + "strings" "testing" "github.com/minio/minio/internal/crypto" @@ -51,9 +52,10 @@ func TestGuessIsRPC(t *testing.T) { r = &http.Request{ Proto: "HTTP/1.1", Method: http.MethodGet, + URL: u, } - if guessIsRPCReq(r) { - t.Fatal("Test shouldn't report as net/rpc for a non net/rpc request.") + if !guessIsRPCReq(r) { + t.Fatal("Test shouldn't fail for a possible net/rpc request.") } r = &http.Request{ Proto: "HTTP/1.1", @@ -63,6 +65,14 @@ func TestGuessIsRPC(t *testing.T) { if !guessIsRPCReq(r) { t.Fatal("Grid RPC path not detected") } + r = &http.Request{ + Proto: "HTTP/1.1", + Method: http.MethodGet, + URL: &url.URL{Path: grid.RouteLockPath}, + } + if !guessIsRPCReq(r) { + t.Fatal("Grid RPC path not detected") + } } var isHTTPHeaderSizeTooLargeTests = []struct { @@ -80,7 +90,7 @@ var isHTTPHeaderSizeTooLargeTests = []struct { func generateHeader(size, usersize int) http.Header { header := http.Header{} - for i := 0; i < size; i++ { + for i := range size { header.Set(strconv.Itoa(i), "") } userlength := 0 @@ -108,15 +118,15 @@ var containsReservedMetadataTests = []struct { }, { header: http.Header{crypto.MetaIV: []string{"iv"}}, - shouldFail: true, + shouldFail: false, }, { header: http.Header{crypto.MetaAlgorithm: []string{crypto.InsecureSealAlgorithm}}, - shouldFail: true, + shouldFail: false, }, { header: http.Header{crypto.MetaSealedKeySSEC: []string{"mac"}}, - shouldFail: true, + shouldFail: false, }, { header: http.Header{ReservedMetadataPrefix + "Key": []string{"value"}}, @@ -126,7 +136,6 @@ var containsReservedMetadataTests = []struct { func TestContainsReservedMetadata(t *testing.T) { for _, test := range containsReservedMetadataTests { - test := test t.Run("", func(t *testing.T) { contains := containsReservedMetadata(test.header) if contains && !test.shouldFail { @@ -175,3 +184,27 @@ func TestSSETLSHandler(t *testing.T) { } } } + +func Benchmark_hasBadPathComponent(t *testing.B) { + tests := []struct { + name string + input string + want bool + }{ + {name: "empty", input: "", want: false}, + {name: "backslashes", input: `\a\a\ \\ \\\\\\\`, want: false}, + {name: "long", input: strings.Repeat("a/", 2000), want: false}, + {name: "long-fail", input: strings.Repeat("a/", 2000) + "../..", want: true}, + } + for _, tt := range tests { + t.Run(tt.name, func(b *testing.B) { + b.SetBytes(int64(len(tt.input))) + b.ReportAllocs() + for b.Loop() { + if got := hasBadPathComponent(tt.input); got != tt.want { + t.Fatalf("hasBadPathComponent() = %v, want %v", got, tt.want) + } + } + }) + } +} diff --git a/cmd/global-heal.go b/cmd/global-heal.go index ebec7e2a5f5a5..57cce16ee7890 100644 --- a/cmd/global-heal.go +++ b/cmd/global-heal.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2022 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -19,20 +19,25 @@ package cmd import ( "context" + "errors" "fmt" + "math/rand" "runtime" "sort" "time" "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/bucket/lifecycle" + objectlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" + "github.com/minio/minio/internal/bucket/versioning" "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/config/storageclass" - xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/wildcard" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/wildcard" + "github.com/minio/pkg/v3/workers" ) const ( @@ -65,7 +70,7 @@ func newBgHealSequence() *healSequence { reportProgress: false, scannedItemsMap: make(map[madmin.HealItemType]int64), healedItemsMap: make(map[madmin.HealItemType]int64), - healFailedItemsMap: make(map[string]int64), + healFailedItemsMap: make(map[madmin.HealItemType]int64), } } @@ -135,8 +140,21 @@ func getLocalBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.Bg return status, true } +type healEntryResult struct { + bytes uint64 + success bool + skipped bool + entryDone bool + name string +} + // healErasureSet lists and heals all objects in a specific erasure set func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, tracker *healingTracker) error { + bgSeq, found := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) + if !found { + return errors.New("no local healing sequence initialized, unable to heal the drive") + } + scanMode := madmin.HealNormalScan // Make sure to copy since `buckets slice` @@ -149,11 +167,23 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, return errServerNotInitialized } + started := tracker.Started + if started.IsZero() || started.Equal(timeSentinel) { + healingLogIf(ctx, fmt.Errorf("unexpected tracker healing start time found: %v", started)) + started = time.Time{} + } + + // Final tracer update before quitting + defer func() { + tracker.setObject("") + tracker.setBucket("") + healingLogIf(ctx, tracker.update(ctx)) + }() + for _, bucket := range healBuckets { - _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ScanMode: scanMode}) - if err != nil { + if err := bgSeq.healBucket(objAPI, bucket, true); err != nil { // Log bucket healing error if any, we shall retry again. - logger.LogIf(ctx, err) + healingLogIf(ctx, err) } } @@ -177,95 +207,151 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, numHealers = uint64(v) } - logger.Event(ctx, fmt.Sprintf("Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers)) + healingLogEvent(ctx, "Healing drive '%s' - use %d parallel workers.", tracker.disk.String(), numHealers) jt, _ := workers.New(int(numHealers)) + healEntryDone := func(name string) healEntryResult { + return healEntryResult{ + entryDone: true, + name: name, + } + } + + healEntrySuccess := func(sz uint64) healEntryResult { + return healEntryResult{ + bytes: sz, + success: true, + } + } + + healEntryFailure := func(sz uint64) healEntryResult { + return healEntryResult{ + bytes: sz, + } + } + + healEntrySkipped := func(sz uint64) healEntryResult { + return healEntryResult{ + bytes: sz, + skipped: true, + } + } + + // Collect updates to tracker from concurrent healEntry calls + results := make(chan healEntryResult, 1000) + quitting := make(chan struct{}) + defer func() { + close(results) + <-quitting + }() + + go func() { + for res := range results { + if res.entryDone { + tracker.setObject(res.name) + if time.Since(tracker.getLastUpdate()) > time.Minute { + healingLogIf(ctx, tracker.update(ctx)) + } + continue + } + + tracker.updateProgress(res.success, res.skipped, res.bytes) + } + + healingLogIf(ctx, tracker.update(ctx)) + close(quitting) + }() + var retErr error + // Heal all buckets with all objects for _, bucket := range healBuckets { if tracker.isHealed(bucket) { continue } + var forwardTo string // If we resume to the same bucket, forward to last known item. - if b := tracker.getBucket(); b != "" { - if b == bucket { - forwardTo = tracker.getObject() - } else { - // Reset to where last bucket ended if resuming. - tracker.resume() - } + b := tracker.getBucket() + if b == bucket { + forwardTo = tracker.getObject() + } + if b != "" { + // Reset to where last bucket ended if resuming. + tracker.resume() } tracker.setObject("") tracker.setBucket(bucket) // Heal current bucket again in case if it is failed // in the beginning of erasure set healing - if _, err := objAPI.HealBucket(ctx, bucket, madmin.HealOpts{ - ScanMode: scanMode, - }); err != nil { - logger.LogIf(ctx, err) + if err := bgSeq.healBucket(objAPI, bucket, true); err != nil { + // Set this such that when we return this function + // we let the caller retry this disk again for the + // buckets that failed healing. + retErr = err + healingLogIf(ctx, err) continue } - vc, _ := globalBucketVersioningSys.Get(bucket) - - // Check if the current bucket has a configured lifecycle policy - lc, _ := globalLifecycleSys.Get(bucket) + var ( + vc *versioning.Versioning + lc *lifecycle.Lifecycle + lr objectlock.Retention + rcfg *replication.Config + ) - // Check if bucket is object locked. - lr, _ := globalBucketObjectLockSys.Get(bucket) - rcfg, _ := getReplicationConfig(ctx, bucket) + if !isMinioMetaBucketName(bucket) { + vc, err = globalBucketVersioningSys.Get(bucket) + if err != nil { + retErr = err + healingLogIf(ctx, err) + continue + } + // Check if the current bucket has a configured lifecycle policy + lc, err = globalLifecycleSys.Get(bucket) + if err != nil && !errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) { + retErr = err + healingLogIf(ctx, err) + continue + } + // Check if bucket is object locked. + lr, err = globalBucketObjectLockSys.Get(bucket) + if err != nil { + retErr = err + healingLogIf(ctx, err) + continue + } + rcfg, err = getReplicationConfig(ctx, bucket) + if err != nil { + retErr = err + healingLogIf(ctx, err) + continue + } + } if serverDebugLog { console.Debugf(color.Green("healDrive:")+" healing bucket %s content on %s erasure set\n", bucket, humanize.Ordinal(er.setIndex+1)) } - disks, _ := er.getOnlineDisksWithHealing(false) - if len(disks) == 0 { - // No object healing necessary - tracker.bucketDone(bucket) - logger.LogIf(ctx, tracker.update(ctx)) - continue + disks, _, healing := er.getOnlineDisksWithHealingAndInfo(true) + if len(disks) == healing { + // All drives in this erasure set were reformatted for some reasons, abort healing and mark it as successful + healingLogIf(ctx, errors.New("all drives are in healing state, aborting..")) + return nil } - // Limit listing to 3 drives. - if len(disks) > 3 { - disks = disks[:3] - } + disks = disks[:len(disks)-healing] // healing drives are always at the end of the list - type healEntryResult struct { - bytes uint64 - success bool - skipped bool - entryDone bool - name string - } - healEntryDone := func(name string) healEntryResult { - return healEntryResult{ - entryDone: true, - name: name, - } - } - healEntrySuccess := func(sz uint64) healEntryResult { - return healEntryResult{ - bytes: sz, - success: true, - } - } - healEntryFailure := func(sz uint64) healEntryResult { - return healEntryResult{ - bytes: sz, - } - } - healEntrySkipped := func(sz uint64) healEntryResult { - return healEntryResult{ - bytes: sz, - skipped: true, - } + if len(disks) < er.setDriveCount/2 { + return fmt.Errorf("not enough drives (found=%d, healing=%d, total=%d) are available to heal `%s`", len(disks), healing, er.setDriveCount, tracker.disk.String()) } + rand.Shuffle(len(disks), func(i, j int) { + disks[i], disks[j] = disks[j], disks[i] + }) + filterLifecycle := func(bucket, object string, fi FileInfo) bool { if lc == nil { return false @@ -286,30 +372,15 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, } } - // Collect updates to tracker from concurrent healEntry calls - results := make(chan healEntryResult, 1000) - go func() { - for res := range results { - if res.entryDone { - tracker.setObject(res.name) - if time.Since(tracker.getLastUpdate()) > time.Minute { - logger.LogIf(ctx, tracker.update(ctx)) - } - continue - } - - tracker.updateProgress(res.success, res.skipped, res.bytes) - } - }() - send := func(result healEntryResult) bool { select { case <-ctx.Done(): if !contextCanceled(ctx) { - logger.LogIf(ctx, ctx.Err()) + healingLogIf(ctx, ctx.Err()) } return false case results <- result: + bgSeq.countScanned(madmin.HealItemObject) return true } } @@ -348,7 +419,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, var result healEntryResult fivs, err := entry.fileInfoVersions(bucket) if err != nil { - _, err := er.HealObject(ctx, bucket, encodedEntryName, "", + res, err := er.HealObject(ctx, bucket, encodedEntryName, "", madmin.HealOpts{ ScanMode: scanMode, Remove: healDeleteDangling, @@ -360,9 +431,11 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, return } result = healEntryFailure(0) - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) + bgSeq.countFailed(madmin.HealItemObject) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err)) } else { - result = healEntrySuccess(0) + bgSeq.countHealed(madmin.HealItemObject) + result = healEntrySuccess(uint64(res.ObjectSize)) } send(result) @@ -371,13 +444,10 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, var versionNotFound int for _, version := range fivs.Versions { - // Ignore a version with a modtime newer than healing start time. - if version.ModTime.After(tracker.Started) { - continue - } - - // Apply lifecycle rules on the objects that are expired. - if filterLifecycle(bucket, version.Name, version) { + // Ignore healing a version if: + // - It is uploaded after the drive healing is started + // - An object that is already expired by ILM rule. + if !started.IsZero() && version.ModTime.After(started) || filterLifecycle(bucket, version.Name, version) { versionNotFound++ if !send(healEntrySkipped(uint64(version.Size))) { return @@ -385,66 +455,73 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, continue } - if _, err := er.HealObject(ctx, bucket, encodedEntryName, + res, err := er.HealObject(ctx, bucket, encodedEntryName, version.VersionID, madmin.HealOpts{ ScanMode: scanMode, Remove: healDeleteDangling, - }); err != nil { + }) + if err != nil { if isErrObjectNotFound(err) || isErrVersionNotFound(err) { // queueing happens across namespace, ignore // objects that are not found. versionNotFound++ continue } - // If not deleted, assume they failed. + } else { + // Look for the healing results + if res.After.Drives[tracker.DiskIndex].State != madmin.DriveStateOk { + err = fmt.Errorf("unexpected after heal state: %s", res.After.Drives[tracker.DiskIndex].State) + } + } + + if err == nil { + bgSeq.countHealed(madmin.HealItemObject) + result = healEntrySuccess(uint64(version.Size)) + } else { + bgSeq.countFailed(madmin.HealItemObject) result = healEntryFailure(uint64(version.Size)) if version.VersionID != "" { - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err)) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s (version-id=%s): %w", + bucket, version.Name, version.VersionID, err)) } else { - logger.LogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err)) + healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", + bucket, version.Name, err)) } - } else { - result = healEntrySuccess(uint64(version.Size)) } if !send(result) { return } } + // All versions resulted in 'ObjectNotFound/VersionNotFound' if versionNotFound == len(fivs.Versions) { return } - select { - case <-ctx.Done(): - return - case results <- healEntryDone(entry.name): - } + + send(healEntryDone(entry.name)) // Wait and proceed if there are active requests waitForLowHTTPReq() } - actualBucket, prefix := path2BucketObject(bucket) - // How to resolve partial results. resolver := metadataResolutionParams{ dirQuorum: 1, objQuorum: 1, - bucket: actualBucket, + bucket: bucket, } - err := listPathRaw(ctx, listPathRawOptions{ + err = listPathRaw(ctx, listPathRawOptions{ disks: disks, - bucket: actualBucket, - path: prefix, + bucket: bucket, recursive: true, forwardTo: forwardTo, minDisks: 1, reportNotFound: false, agreed: func(entry metaCacheEntry) { jt.Take() - go healEntry(actualBucket, entry) + go healEntry(bucket, entry) }, partial: func(entries metaCacheEntries, _ []error) { entry, ok := entries.resolve(&resolver) @@ -454,18 +531,25 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, entry, _ = entries.firstFound() } jt.Take() - go healEntry(actualBucket, *entry) + go healEntry(bucket, *entry) + }, + finished: func(errs []error) { + success := countErrs(errs, nil) + if success < len(disks)/2+1 { + retErr = fmt.Errorf("one or more errors reported during listing: %v", errors.Join(errs...)) + } }, - finished: nil, }) jt.Wait() // synchronize all the concurrent heal jobs - xioutil.SafeClose(results) if err != nil { // Set this such that when we return this function // we let the caller retry this disk again for the // buckets it failed to list. retErr = err - logger.LogIf(ctx, err) + } + + if retErr != nil { + healingLogIf(ctx, fmt.Errorf("listing failed with: %v on bucket: %v", retErr, bucket)) continue } @@ -475,14 +559,19 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, return ctx.Err() default: tracker.bucketDone(bucket) - logger.LogIf(ctx, tracker.update(ctx)) + healingLogIf(ctx, tracker.update(ctx)) } } + if retErr != nil { + return retErr + } - tracker.setObject("") - tracker.setBucket("") + // Last sanity check + if len(tracker.QueuedBuckets) > 0 { + return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets) + } - return retErr + return nil } func healBucket(bucket string, scan madmin.HealScanMode) error { @@ -499,16 +588,7 @@ func healObject(bucket, object, versionID string, scan madmin.HealScanMode) erro // Get background heal sequence to send elements to heal bgSeq, ok := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) if ok { - return bgSeq.queueHealTask(healSource{ - bucket: bucket, - object: object, - versionID: versionID, - noWait: true, // do not block callers. - opts: &madmin.HealOpts{ - Remove: healDeleteDangling, // if found dangling purge it. - ScanMode: scan, - }, - }, madmin.HealItemObject) + return bgSeq.healObject(bucket, object, versionID, scan) } return nil } diff --git a/cmd/globals.go b/cmd/globals.go index 1f2f68faadf7a..734e6abe22027 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -41,7 +41,6 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/minio/internal/auth" - "github.com/minio/minio/internal/config/cache" "github.com/minio/minio/internal/config/callhome" "github.com/minio/minio/internal/config/compress" "github.com/minio/minio/internal/config/dns" @@ -56,9 +55,9 @@ import ( levent "github.com/minio/minio/internal/config/lambda/event" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/pubsub" - "github.com/minio/pkg/v2/certs" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/certs" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" ) // minio configuration related constants. @@ -160,17 +159,15 @@ type serverCtxt struct { FTP []string SFTP []string - UserTimeout time.Duration - ConnReadDeadline time.Duration - ConnWriteDeadline time.Duration - ConnClientReadDeadline time.Duration - ConnClientWriteDeadline time.Duration + MemLimit uint64 - ShutdownTimeout time.Duration + UserTimeout time.Duration IdleTimeout time.Duration ReadHeaderTimeout time.Duration MaxIdleConnsPerHost int + SendBufSize, RecvBufSize int + CrossDomainXML string // The layout of disks as interpreted Layout disksLayout } @@ -207,9 +204,8 @@ var ( // This flag is set to 'true' when MINIO_UPDATE env is set to 'off'. Default is false. globalInplaceUpdateDisabled = false - globalSite = config.Site{ - Region: globalMinioDefaultRegion, - } + // Captures site name and region + globalSite config.Site // MinIO local server address (in `host:port` format) globalMinioAddr = "" @@ -240,7 +236,7 @@ var ( globalBucketMonitor *bandwidth.Monitor globalPolicySys *PolicySys globalIAMSys *IAMSys - globalBytePoolCap *bpool.BytePoolCap + globalBytePoolCap atomic.Pointer[bpool.BytePoolCap] globalLifecycleSys *LifecycleSys globalBucketSSEConfigSys *BucketSSEConfigSys @@ -296,9 +292,6 @@ var ( // The global drive config globalDriveConfig drive.Config - // The global cache config - globalCacheConfig cache.Config - // Global server's network statistics globalConnStats = newConnStats() @@ -313,6 +306,7 @@ var ( globalBootTime = UTCNow() globalActiveCred auth.Credentials + globalNodeAuthToken string globalSiteReplicatorCred siteReplicatorCred // Captures if root credentials are set via ENV. @@ -347,7 +341,7 @@ var ( globalDNSConfig dns.Store // GlobalKMS initialized KMS configuration - GlobalKMS kms.KMS + GlobalKMS *kms.KMS // Common lock for various subsystems performing the leader tasks globalLeaderLock *sharedLock @@ -386,9 +380,7 @@ var ( globalBackgroundHealRoutine = newHealRoutine() globalBackgroundHealState = newHealState(GlobalContext, false) - globalMRFState = mrfState{ - opCh: make(chan partialOperation, mrfOpsQueueSize), - } + globalMRFState = newMRFState() // If writes to FS backend should be O_SYNC. globalFSOSync bool @@ -397,12 +389,8 @@ var ( globalInternodeTransport http.RoundTripper - globalProxyTransport http.RoundTripper - globalRemoteTargetTransport http.RoundTripper - globalHealthChkTransport http.RoundTripper - globalDNSCache = &dnscache.Resolver{ Timeout: 5 * time.Second, } @@ -420,10 +408,10 @@ var ( globalServiceFreezeCnt int32 globalServiceFreezeMu sync.Mutex // Updates. - // List of local drives to this node, this is only set during server startup, - // and is only mutated by HealFormat. Hold globalLocalDrivesMu to access. - globalLocalDrives []StorageAPI - globalLocalDrivesMu sync.RWMutex + // Map of local drives to this node, this is set during server startup, + // disk reconnect and mutated by HealFormat. Hold globalLocalDrivesMu to access. + globalLocalDrivesMap map[string]StorageAPI + globalLocalDrivesMu sync.RWMutex globalDriveMonitoring = env.Get("_MINIO_DRIVE_ACTIVE_MONITORING", config.EnableOn) == config.EnableOn @@ -449,14 +437,14 @@ var ( subnetAdminPublicKey = []byte("-----BEGIN PUBLIC KEY-----\nMIIBCgKCAQEAyC+ol5v0FP+QcsR6d1KypR/063FInmNEFsFzbEwlHQyEQN3O7kNI\nwVDN1vqp1wDmJYmv4VZGRGzfFw1q+QV7K1TnysrEjrqpVxfxzDQCoUadAp8IxLLc\ns2fjyDNxnZjoC6fTID9C0khKnEa5fPZZc3Ihci9SiCGkPmyUyCGVSxWXIKqL2Lrj\nyDc0pGeEhWeEPqw6q8X2jvTC246tlzqpDeNsPbcv2KblXRcKniQNbBrizT37CKHQ\nM6hc9kugrZbFuo8U5/4RQvZPJnx/DVjLDyoKo2uzuVQs4s+iBrA5sSSLp8rPED/3\n6DgWw3e244Dxtrg972dIT1IOqgn7KUJzVQIDAQAB\n-----END PUBLIC KEY-----") subnetAdminPublicKeyDev = []byte("-----BEGIN PUBLIC KEY-----\nMIIBCgKCAQEArhQYXQd6zI4uagtVfthAPOt6i4AYHnEWCoNeAovM4MNl42I9uQFh\n3VHkbWj9Gpx9ghf6PgRgK+8FcFvy+StmGcXpDCiFywXX24uNhcZjscX1C4Esk0BW\nidfI2eXYkOlymD4lcK70SVgJvC693Qa7Z3FE1KU8Nfv2bkxEE4bzOkojX9t6a3+J\nR8X6Z2U8EMlH1qxJPgiPogELhWP0qf2Lq7GwSAflo1Tj/ytxvD12WrnE0Rrj/8yP\nSnp7TbYm91KocKMExlmvx3l2XPLxeU8nf9U0U+KOmorejD3MDMEPF+tlk9LB3JWP\nZqYYe38rfALVTn4RVJriUcNOoEpEyC0WEwIDAQAB\n-----END PUBLIC KEY-----") - globalConnReadDeadline time.Duration - globalConnWriteDeadline time.Duration + // dynamic sleeper to avoid thundering herd for trash folder expunge routine + deleteCleanupSleeper = newDynamicSleeper(5, 25*time.Millisecond, false) - // Controller for deleted file sweeper. - deletedCleanupSleeper = newDynamicSleeper(5, 25*time.Millisecond, false) + // dynamic sleeper for multipart expiration routine + deleteMultipartCleanupSleeper = newDynamicSleeper(5, 25*time.Millisecond, false) - // Is _MINIO_DISABLE_API_FREEZE_ON_BOOT set? - globalDisableFreezeOnBoot bool + // Is MINIO_SYNC_BOOT set? + globalEnableSyncBoot bool // Contains NIC interface name used for internode communication globalInternodeInterface string @@ -470,6 +458,7 @@ var ( // Indicates if server was started as `--address ":0"` globalDynamicAPIPort bool + // Add new variable global values here. ) diff --git a/cmd/grid.go b/cmd/grid.go index e347e994dc7dd..0b442267cadc1 100644 --- a/cmd/grid.go +++ b/cmd/grid.go @@ -22,7 +22,7 @@ import ( "crypto/tls" "sync/atomic" - "github.com/minio/minio/internal/fips" + "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/grid" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/rest" @@ -31,35 +31,77 @@ import ( // globalGrid is the global grid manager. var globalGrid atomic.Pointer[grid.Manager] +// globalLockGrid is the global lock grid manager. +var globalLockGrid atomic.Pointer[grid.Manager] + // globalGridStart is a channel that will block startup of grid connections until closed. var globalGridStart = make(chan struct{}) +// globalLockGridStart is a channel that will block startup of lock grid connections until closed. +var globalLockGridStart = make(chan struct{}) + func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error { + hosts, local := eps.GridHosts() lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil + g, err := grid.NewManager(ctx, grid.ManagerOptions{ + // Pass Dialer for websocket grid, make sure we do not + // provide any DriveOPTimeout() function, as that is not + // useful over persistent connections. + Dialer: grid.ConnectWS( + grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions.ForWebsocket()))), + newCachedAuthToken(), + &tls.Config{ + RootCAs: globalRootCAs, + CipherSuites: crypto.TLSCiphers(), + CurvePreferences: crypto.TLSCurveIDs(), + }), + Local: local, + Hosts: hosts, + AuthToken: validateStorageRequestToken, + AuthFn: newCachedAuthToken(), + BlockConnect: globalGridStart, + // Record incoming and outgoing bytes. + Incoming: globalConnStats.incInternodeInputBytes, + Outgoing: globalConnStats.incInternodeOutputBytes, + TraceTo: globalTrace, + RoutePath: grid.RoutePath, + }) + if err != nil { + return err } + globalGrid.Store(g) + return nil +} + +func initGlobalLockGrid(ctx context.Context, eps EndpointServerPools) error { hosts, local := eps.GridHosts() + lookupHost := globalDNSCache.LookupHost g, err := grid.NewManager(ctx, grid.ManagerOptions{ - Dialer: grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions))), + // Pass Dialer for websocket grid, make sure we do not + // provide any DriveOPTimeout() function, as that is not + // useful over persistent connections. + Dialer: grid.ConnectWSWithRoutePath( + grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions.ForWebsocket()))), + newCachedAuthToken(), + &tls.Config{ + RootCAs: globalRootCAs, + CipherSuites: crypto.TLSCiphers(), + CurvePreferences: crypto.TLSCurveIDs(), + }, grid.RouteLockPath), Local: local, Hosts: hosts, - AddAuth: newCachedAuthToken(), - AuthRequest: storageServerRequestValidate, + AuthToken: validateStorageRequestToken, + AuthFn: newCachedAuthToken(), BlockConnect: globalGridStart, - TLSConfig: &tls.Config{ - RootCAs: globalRootCAs, - CipherSuites: fips.TLSCiphers(), - CurvePreferences: fips.TLSCurveIDs(), - }, // Record incoming and outgoing bytes. - Incoming: globalConnStats.incInternodeInputBytes, - Outgoing: globalConnStats.incInternodeOutputBytes, - TraceTo: globalTrace, + Incoming: globalConnStats.incInternodeInputBytes, + Outgoing: globalConnStats.incInternodeOutputBytes, + TraceTo: globalTrace, + RoutePath: grid.RouteLockPath, }) if err != nil { return err } - globalGrid.Store(g) + globalLockGrid.Store(g) return nil } diff --git a/cmd/handler-api.go b/cmd/handler-api.go index dab5f9da71906..09790a60aef6b 100644 --- a/cmd/handler-api.go +++ b/cmd/handler-api.go @@ -18,14 +18,17 @@ package cmd import ( + "math" "net/http" "os" "runtime" + "slices" "strconv" "strings" "sync" "time" + "github.com/dustin/go-humanize" "github.com/shirou/gopsutil/v3/mem" "github.com/minio/minio/internal/config/api" @@ -37,16 +40,14 @@ import ( type apiConfig struct { mu sync.RWMutex - requestsDeadline time.Duration - requestsPool chan struct{} - clusterDeadline time.Duration - listQuorum string - corsAllowOrigins []string - // total drives per erasure set across pools. - totalDriveCount int - replicationPriority string - replicationMaxWorkers int - transitionWorkers int + requestsPool chan struct{} + clusterDeadline time.Duration + listQuorum string + corsAllowOrigins []string + replicationPriority string + replicationMaxWorkers int + replicationMaxLWorkers int + transitionWorkers int staleUploadsExpiry time.Duration staleUploadsCleanupInterval time.Duration @@ -55,12 +56,12 @@ type apiConfig struct { gzipObjects bool rootAccess bool syncEvents bool + objectMaxVersions int64 } const ( cgroupV1MemLimitFile = "/sys/fs/cgroup/memory/memory.limit_in_bytes" cgroupV2MemLimitFile = "/sys/fs/cgroup/memory.max" - cgroupMemNoLimit = 9223372036854771712 ) func cgroupMemLimit() (limit uint64) { @@ -77,10 +78,8 @@ func cgroupMemLimit() (limit uint64) { // but still, no need to interpret more return 0 } - if limit == cgroupMemNoLimit { - // No limit set, It's the highest positive signed 64-bit - // integer (2^63-1), rounded down to multiples of 4096 (2^12), - // the most common page size on x86 systems - for cgroup_limits. + if limit >= 100*humanize.TiByte { + // No limit set, or unreasonably high. Ignore return 0 } return limit @@ -90,25 +89,26 @@ func availableMemory() (available uint64) { available = 2048 * blockSizeV2 * 2 // Default to 4 GiB when we can't find the limits. if runtime.GOOS == "linux" { - // Useful in container mode + // Honor cgroup limits if set. limit := cgroupMemLimit() if limit > 0 { - // A valid value is found, return its 75% - available = (limit * 3) / 4 - return + // A valid value is found, return its 90% + available = (limit * 9) / 10 + return available } } // for all other platforms limits are based on virtual memory. memStats, err := mem.VirtualMemory() if err != nil { - return + return available } - // A valid value is available return its 75% - available = (memStats.Available * 3) / 4 - return + + // A valid value is available return its 90% + available = (memStats.Available * 9) / 10 + return available } -func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { +func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) { t.mu.Lock() defer t.mu.Unlock() @@ -123,27 +123,24 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { } t.corsAllowOrigins = corsAllowOrigin - maxSetDrives := 0 - for _, setDriveCount := range setDriveCounts { - t.totalDriveCount += setDriveCount - if setDriveCount > maxSetDrives { - maxSetDrives = setDriveCount - } - } - var apiRequestsMaxPerNode int if cfg.RequestsMax <= 0 { + maxSetDrives := slices.Max(setDriveCounts) + // Returns 75% of max memory allowed - maxMem := availableMemory() + maxMem := globalServerCtxt.MemLimit // max requests per node is calculated as // total_ram / ram_per_request - // ram_per_request is (2MiB+128KiB) * driveCount \ - // + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2) - blockSize := xioutil.BlockSizeLarge + xioutil.BlockSizeSmall - apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2))) - if globalIsDistErasure { - logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode) + blockSize := xioutil.LargeBlock + xioutil.SmallBlock + if legacy { + // ram_per_request is (1MiB+32KiB) * driveCount \ + // + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2) + apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2))) + } else { + // ram_per_request is (1MiB+32KiB) * driveCount \ + // + 2 * 1MiB (default erasure block size v2) + apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV2*2))) } } else { apiRequestsMaxPerNode = cfg.RequestsMax @@ -152,6 +149,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { } } + if globalIsDistErasure { + logger.Info("Configured max API requests per node based on available memory: %d", apiRequestsMaxPerNode) + } + if cap(t.requestsPool) != apiRequestsMaxPerNode { // Only replace if needed. // Existing requests will use the previous limit, @@ -160,18 +161,18 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { // but this shouldn't last long. t.requestsPool = make(chan struct{}, apiRequestsMaxPerNode) } - t.requestsDeadline = cfg.RequestsDeadline listQuorum := cfg.ListQuorum if listQuorum == "" { listQuorum = "strict" } t.listQuorum = listQuorum - if globalReplicationPool != nil && - (cfg.ReplicationPriority != t.replicationPriority || cfg.ReplicationMaxWorkers != t.replicationMaxWorkers) { - globalReplicationPool.ResizeWorkerPriority(cfg.ReplicationPriority, cfg.ReplicationMaxWorkers) + if r := globalReplicationPool.GetNonBlocking(); r != nil && + (cfg.ReplicationPriority != t.replicationPriority || cfg.ReplicationMaxWorkers != t.replicationMaxWorkers || cfg.ReplicationMaxLWorkers != t.replicationMaxLWorkers) { + r.ResizeWorkerPriority(cfg.ReplicationPriority, cfg.ReplicationMaxWorkers, cfg.ReplicationMaxLWorkers) } t.replicationPriority = cfg.ReplicationPriority t.replicationMaxWorkers = cfg.ReplicationMaxWorkers + t.replicationMaxLWorkers = cfg.ReplicationMaxLWorkers // N B api.transition_workers will be deprecated if globalTransitionState != nil { @@ -180,12 +181,22 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) { t.transitionWorkers = cfg.TransitionWorkers t.staleUploadsExpiry = cfg.StaleUploadsExpiry - t.staleUploadsCleanupInterval = cfg.StaleUploadsCleanupInterval t.deleteCleanupInterval = cfg.DeleteCleanupInterval t.enableODirect = cfg.EnableODirect t.gzipObjects = cfg.GzipObjects t.rootAccess = cfg.RootAccess t.syncEvents = cfg.SyncEvents + t.objectMaxVersions = cfg.ObjectMaxVersions + + if t.staleUploadsCleanupInterval != cfg.StaleUploadsCleanupInterval { + t.staleUploadsCleanupInterval = cfg.StaleUploadsCleanupInterval + + // signal that cleanup interval has changed + select { + case staleUploadsCleanupIntervalChangedCh <- struct{}{}: + default: // in case the channel is blocked... + } + } } func (t *apiConfig) odirectEnabled() bool { @@ -284,15 +295,15 @@ func (t *apiConfig) getRequestsPoolCapacity() int { return cap(t.requestsPool) } -func (t *apiConfig) getRequestsPool() (chan struct{}, time.Duration) { +func (t *apiConfig) getRequestsPool() chan struct{} { t.mu.RLock() defer t.mu.RUnlock() if t.requestsPool == nil { - return nil, time.Duration(0) + return nil } - return t.requestsPool, t.requestsDeadline + return t.requestsPool } // maxClients throttles the S3 API calls @@ -314,40 +325,47 @@ func maxClients(f http.HandlerFunc) http.HandlerFunc { } } - pool, deadline := globalAPIConfig.getRequestsPool() + globalHTTPStats.addRequestsInQueue(1) + pool := globalAPIConfig.getRequestsPool() if pool == nil { + globalHTTPStats.addRequestsInQueue(-1) f.ServeHTTP(w, r) return } - globalHTTPStats.addRequestsInQueue(1) - if tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt); ok { tc.FuncName = "s3.MaxClients" } - deadlineTimer := time.NewTimer(deadline) - defer deadlineTimer.Stop() + w.Header().Set("X-RateLimit-Limit", strconv.Itoa(cap(pool))) + w.Header().Set("X-RateLimit-Remaining", strconv.Itoa(cap(pool)-len(pool))) + ctx := r.Context() select { case pool <- struct{}{}: defer func() { <-pool }() globalHTTPStats.addRequestsInQueue(-1) + if contextCanceled(ctx) { + w.WriteHeader(499) + return + } f.ServeHTTP(w, r) - case <-deadlineTimer.C: - // Send a http timeout message - writeErrorResponse(r.Context(), w, - errorCodes.ToAPIErr(ErrTooManyRequests), - r.URL) - globalHTTPStats.addRequestsInQueue(-1) - return case <-r.Context().Done(): + globalHTTPStats.addRequestsInQueue(-1) // When the client disconnects before getting the S3 handler // status code response, set the status code to 499 so this request // will be properly audited and traced. w.WriteHeader(499) + default: globalHTTPStats.addRequestsInQueue(-1) - return + if contextCanceled(ctx) { + w.WriteHeader(499) + return + } + // Send a http timeout message + writeErrorResponse(ctx, w, + errorCodes.ToAPIErr(ErrTooManyRequests), + r.URL) } } } @@ -358,14 +376,16 @@ func (t *apiConfig) getReplicationOpts() replicationPoolOpts { if t.replicationPriority == "" { return replicationPoolOpts{ - Priority: "auto", - MaxWorkers: WorkerMaxLimit, + Priority: "auto", + MaxWorkers: WorkerMaxLimit, + MaxLWorkers: LargeWorkerCount, } } return replicationPoolOpts{ - Priority: t.replicationPriority, - MaxWorkers: t.replicationMaxWorkers, + Priority: t.replicationPriority, + MaxWorkers: t.replicationMaxWorkers, + MaxLWorkers: t.replicationMaxLWorkers, } } @@ -386,3 +406,15 @@ func (t *apiConfig) isSyncEventsEnabled() bool { return t.syncEvents } + +func (t *apiConfig) getObjectMaxVersions() int64 { + t.mu.RLock() + defer t.mu.RUnlock() + + if t.objectMaxVersions <= 0 { + // defaults to 'IntMax' when unset. + return math.MaxInt64 + } + + return t.objectMaxVersions +} diff --git a/cmd/handler-utils.go b/cmd/handler-utils.go index 0b44fb6110258..7752cfece2db3 100644 --- a/cmd/handler-utils.go +++ b/cmd/handler-utils.go @@ -25,6 +25,7 @@ import ( "net/textproto" "regexp" "strings" + "sync/atomic" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/auth" @@ -32,7 +33,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mcontext" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) const ( @@ -49,13 +50,13 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError locationConstraint := createBucketLocationConfiguration{} err := xmlDecoder(r.Body, &locationConstraint, r.ContentLength) if err != nil && r.ContentLength != 0 { - logger.LogOnceIf(GlobalContext, err, "location-constraint-xml-parsing") + internalLogOnceIf(GlobalContext, err, "location-constraint-xml-parsing") // Treat all other failures as XML parsing errors. return "", ErrMalformedXML } // else for both err as nil or io.EOF location = locationConstraint.Location if location == "" { - location = globalSite.Region + location = globalSite.Region() } if !isValidLocation(location) { return location, ErrInvalidRegion @@ -67,7 +68,8 @@ func parseLocationConstraint(r *http.Request) (location string, s3Error APIError // Validates input location is same as configured region // of MinIO server. func isValidLocation(location string) bool { - return globalSite.Region == "" || globalSite.Region == location + region := globalSite.Region() + return region == "" || region == location } // Supported headers that needs to be extracted. @@ -82,6 +84,33 @@ var supportedHeaders = []string{ xhttp.AmzObjectTagging, "expires", xhttp.AmzBucketReplicationStatus, + "X-Minio-Replication-Server-Side-Encryption-Sealed-Key", + "X-Minio-Replication-Server-Side-Encryption-Seal-Algorithm", + "X-Minio-Replication-Server-Side-Encryption-Iv", + "X-Minio-Replication-Encrypted-Multipart", + "X-Minio-Replication-Actual-Object-Size", + ReplicationSsecChecksumHeader, + // Add more supported headers here. +} + +// mapping of internal headers to allowed replication headers +var validSSEReplicationHeaders = map[string]string{ + "X-Minio-Internal-Server-Side-Encryption-Sealed-Key": "X-Minio-Replication-Server-Side-Encryption-Sealed-Key", + "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "X-Minio-Replication-Server-Side-Encryption-Seal-Algorithm", + "X-Minio-Internal-Server-Side-Encryption-Iv": "X-Minio-Replication-Server-Side-Encryption-Iv", + "X-Minio-Internal-Encrypted-Multipart": "X-Minio-Replication-Encrypted-Multipart", + "X-Minio-Internal-Actual-Object-Size": "X-Minio-Replication-Actual-Object-Size", + // Add more supported headers here. +} + +// mapping of replication headers to internal headers +var replicationToInternalHeaders = map[string]string{ + "X-Minio-Replication-Server-Side-Encryption-Sealed-Key": "X-Minio-Internal-Server-Side-Encryption-Sealed-Key", + "X-Minio-Replication-Server-Side-Encryption-Seal-Algorithm": "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm", + "X-Minio-Replication-Server-Side-Encryption-Iv": "X-Minio-Internal-Server-Side-Encryption-Iv", + "X-Minio-Replication-Encrypted-Multipart": "X-Minio-Internal-Encrypted-Multipart", + "X-Minio-Replication-Actual-Object-Size": "X-Minio-Internal-Actual-Object-Size", + ReplicationSsecChecksumHeader: ReplicationSsecChecksumHeader, // Add more supported headers here. } @@ -164,7 +193,7 @@ func extractMetadata(ctx context.Context, mimesHeader ...textproto.MIMEHeader) ( // extractMetadata extracts metadata from map values. func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[string]string) error { if v == nil { - logger.LogIf(ctx, errInvalidArgument) + bugLogIf(ctx, errInvalidArgument) return errInvalidArgument } @@ -178,7 +207,11 @@ func extractMetadataFromMime(ctx context.Context, v textproto.MIMEHeader, m map[ for _, supportedHeader := range supportedHeaders { value, ok := nv[http.CanonicalHeaderKey(supportedHeader)] if ok { - m[supportedHeader] = strings.Join(value, ",") + if v, ok := replicationToInternalHeaders[supportedHeader]; ok { + m[v] = strings.Join(value, ",") + } else { + m[supportedHeader] = strings.Join(value, ",") + } } } @@ -212,7 +245,7 @@ func extractReqParams(r *http.Request) map[string]string { return nil } - region := globalSite.Region + region := globalSite.Region() cred := getReqAccessCred(r, region) principalID := cred.AccessKey @@ -259,7 +292,7 @@ func trimAwsChunkedContentEncoding(contentEnc string) (trimmedContentEnc string) return contentEnc } var newEncs []string - for _, enc := range strings.Split(contentEnc, ",") { + for enc := range strings.SplitSeq(contentEnc, ",") { if enc != streamingContentEncoding { newEncs = append(newEncs, enc) } @@ -296,8 +329,8 @@ func collectAPIStats(api string, f http.HandlerFunc) http.HandlerFunc { bucket, _ := path2BucketObject(resource) - _, err = globalBucketMetadataSys.Get(bucket) // check if this bucket exists. - countBktStat := bucket != "" && bucket != minioReservedBucket && err == nil + meta, err := globalBucketMetadataSys.Get(bucket) // check if this bucket exists. + countBktStat := bucket != "" && bucket != minioReservedBucket && err == nil && !meta.Created.IsZero() if countBktStat { globalBucketHTTPStats.updateHTTPStats(bucket, api, nil) } @@ -395,9 +428,31 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) { HTTPStatusCode: http.StatusUpgradeRequired, }, r.URL) default: + defer logger.AuditLog(r.Context(), w, r, mustGetClaimsFromToken(r)) + defer atomic.AddUint64(&globalHTTPStats.rejectedRequestsInvalid, 1) + + // When we are not running in S3 Express mode, generate appropriate error + // for x-amz-write-offset HEADER specified. + if _, ok := r.Header[xhttp.AmzWriteOffsetBytes]; ok { + tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt) + if ok { + tc.FuncName = "s3.AppendObject" + tc.ResponseRecorder.LogErrBody = true + } + + writeErrorResponse(r.Context(), w, getAPIError(ErrNotImplemented), r.URL) + return + } + + tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt) + if ok { + tc.FuncName = "s3.ValidRequest" + tc.ResponseRecorder.LogErrBody = true + } + writeErrorResponse(r.Context(), w, APIError{ Code: "BadRequest", - Description: fmt.Sprintf("An error occurred when parsing the HTTP request %s at '%s'", + Description: fmt.Sprintf("An unsupported API call for method: %s at '%s'", r.Method, r.URL.Path), HTTPStatusCode: http.StatusBadRequest, }, r.URL) @@ -411,11 +466,11 @@ func getHostName(r *http.Request) (hostName string) { } else { hostName = r.Host } - return + return hostName } // Proxy any request to an endpoint. -func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, ep ProxyEndpoint) (success bool) { +func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, ep ProxyEndpoint, returnErr bool) (success bool) { success = true // Make sure we remove any existing headers before @@ -430,7 +485,10 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e ErrorHandler: func(w http.ResponseWriter, r *http.Request, err error) { success = false if err != nil && !errors.Is(err, context.Canceled) { - logger.LogIf(GlobalContext, err) + proxyLogIf(GlobalContext, err) + } + if returnErr { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) } }, }) @@ -442,5 +500,5 @@ func proxyRequest(ctx context.Context, w http.ResponseWriter, r *http.Request, e r.URL.Host = ep.Host f.ServeHTTP(w, r) - return + return success } diff --git a/cmd/handler-utils_test.go b/cmd/handler-utils_test.go index f3ad27121a804..517f93fcc9fbd 100644 --- a/cmd/handler-utils_test.go +++ b/cmd/handler-utils_test.go @@ -33,7 +33,7 @@ import ( // Tests validate bucket LocationConstraint. func TestIsValidLocationConstraint(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -163,7 +163,7 @@ func TestExtractMetadataHeaders(t *testing.T) { // Validate if the extracting headers. for i, testCase := range testCases { metadata := make(map[string]string) - err := extractMetadataFromMime(context.Background(), textproto.MIMEHeader(testCase.header), metadata) + err := extractMetadataFromMime(t.Context(), textproto.MIMEHeader(testCase.header), metadata) if err != nil && !testCase.shouldFail { t.Fatalf("Test %d failed to extract metadata: %v", i+1, err) } diff --git a/cmd/healthcheck-handler.go b/cmd/healthcheck-handler.go index 19741228b5855..12368d1daf3a1 100644 --- a/cmd/healthcheck-handler.go +++ b/cmd/healthcheck-handler.go @@ -24,18 +24,40 @@ import ( "time" xhttp "github.com/minio/minio/internal/http" + "github.com/minio/minio/internal/kms" ) const unavailable = "offline" +func checkHealth(w http.ResponseWriter) ObjectLayer { + objLayer := newObjectLayerFn() + if objLayer == nil { + w.Header().Set(xhttp.MinIOServerStatus, unavailable) + writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) + return nil + } + + if !globalBucketMetadataSys.Initialized() { + w.Header().Set(xhttp.MinIOServerStatus, "bucket-metadata-offline") + writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) + return nil + } + + if !globalIAMSys.Initialized() { + w.Header().Set(xhttp.MinIOServerStatus, "iam-offline") + writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) + return nil + } + + return objLayer +} + // ClusterCheckHandler returns if the server is ready for requests. func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "ClusterCheckHandler") - objLayer := newObjectLayerFn() + objLayer := checkHealth(w) if objLayer == nil { - w.Header().Set(xhttp.MinIOServerStatus, unavailable) - writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) return } @@ -71,10 +93,8 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) { func ClusterReadCheckHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "ClusterReadCheckHandler") - objLayer := newObjectLayerFn() + objLayer := checkHealth(w) if objLayer == nil { - w.Header().Set(xhttp.MinIOServerStatus, unavailable) - writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone) return } @@ -134,7 +154,7 @@ func ReadinessCheckHandler(w http.ResponseWriter, r *http.Request) { ctx, cancel := context.WithTimeout(r.Context(), time.Minute) defer cancel() - if _, err := GlobalKMS.Stat(ctx); err != nil { + if _, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kms.Context{"healthcheck": ""}}); err != nil { switch r.Method { case http.MethodHead: apiErr := toAPIError(r.Context(), err) diff --git a/cmd/http-stats.go b/cmd/http-stats.go index 1393636f6401e..18a24e589e575 100644 --- a/cmd/http-stats.go +++ b/cmd/http-stats.go @@ -133,7 +133,7 @@ func (bh *bucketHTTPStats) updateHTTPStats(bucket, api string, w *xhttp.Response bucketHTTPRequestsDuration.With(prometheus.Labels{ "api": api, "bucket": bucket, - }).Observe(w.TimeToFirstByte.Seconds()) + }).Observe(w.TTFB().Seconds()) } bh.Lock() @@ -269,6 +269,28 @@ func (s *bucketConnStats) getS3InOutBytes() map[string]inOutBytes { return bucketStats } +// Return S3 total input/output bytes for each +func (s *bucketConnStats) getBucketS3InOutBytes(buckets []string) map[string]inOutBytes { + s.RLock() + defer s.RUnlock() + + if len(s.stats) == 0 || len(buckets) == 0 { + return nil + } + + bucketStats := make(map[string]inOutBytes, len(buckets)) + for _, bucket := range buckets { + if stats, ok := s.stats[bucket]; ok { + bucketStats[bucket] = inOutBytes{ + In: stats.s3InputBytes, + Out: stats.s3OutputBytes, + } + } + } + + return bucketStats +} + // delete metrics once bucket is deleted. func (s *bucketConnStats) delete(bucket string) { s.Lock() @@ -411,7 +433,7 @@ func (st *HTTPStats) updateStats(api string, w *xhttp.ResponseRecorder) { st.totalS3Requests.Inc(api) // Increment the prometheus http request response histogram with appropriate label - httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(w.TimeToFirstByte.Seconds()) + httpRequestsDuration.With(prometheus.Labels{"api": api}).Observe(w.TTFB().Seconds()) code := w.StatusCode diff --git a/cmd/http-tracer.go b/cmd/http-tracer.go index 439549edd9660..e7ad7462d4780 100644 --- a/cmd/http-tracer.go +++ b/cmd/http-tracer.go @@ -142,6 +142,7 @@ func httpTracerMiddleware(h http.Handler) http.Handler { Time: reqStartTime, Duration: reqEndTime.Sub(respRecorder.StartTime), Path: reqPath, + Bytes: int64(inputBytes + respRecorder.Size()), HTTP: &madmin.TraceHTTPStats{ ReqInfo: madmin.TraceRequestInfo{ Time: reqStartTime, @@ -163,7 +164,7 @@ func httpTracerMiddleware(h http.Handler) http.Handler { Latency: reqEndTime.Sub(respRecorder.StartTime), InputBytes: inputBytes, OutputBytes: respRecorder.Size(), - TimeToFirstByte: respRecorder.TimeToFirstByte, + TimeToFirstByte: respRecorder.TTFB(), }, }, } diff --git a/cmd/http-tracer_test.go b/cmd/http-tracer_test.go index 4979afea71598..a05e1de71a899 100644 --- a/cmd/http-tracer_test.go +++ b/cmd/http-tracer_test.go @@ -18,7 +18,11 @@ package cmd import ( + "sync" "testing" + "time" + + xhttp "github.com/minio/minio/internal/http" ) // Test redactLDAPPwd() @@ -52,3 +56,129 @@ func TestRedactLDAPPwd(t *testing.T) { } } } + +// TestHTTPStatsRaceCondition tests the race condition fix for HTTPStats. +// This test specifically addresses the race between: +// - Write operations via updateStats. +// - Read operations via toServerHTTPStats(false). +func TestRaulStatsRaceCondition(t *testing.T) { + httpStats := newHTTPStats() + // Simulate the concurrent scenario from the original race condition: + // Multiple HTTP request handlers updating stats concurrently, + // while background processes are reading the stats for persistence. + const numWriters = 100 // Simulate many HTTP request handlers. + const numReaders = 50 // Simulate background stats readers. + const opsPerGoroutine = 100 + + var wg sync.WaitGroup + for i := range numWriters { + wg.Add(1) + go func(writerID int) { + defer wg.Done() + for j := 0; j < opsPerGoroutine; j++ { + switch j % 4 { + case 0: + httpStats.updateStats("GetObject", &xhttp.ResponseRecorder{}) + case 1: + httpStats.totalS3Requests.Inc("PutObject") + case 2: + httpStats.totalS3Errors.Inc("DeleteObject") + case 3: + httpStats.currentS3Requests.Inc("ListObjects") + } + } + }(i) + } + + for i := range numReaders { + wg.Add(1) + go func(readerID int) { + defer wg.Done() + for range opsPerGoroutine { + _ = httpStats.toServerHTTPStats(false) + _ = httpStats.totalS3Requests.Load(false) + _ = httpStats.currentS3Requests.Load(false) + time.Sleep(1 * time.Microsecond) + } + }(i) + } + wg.Wait() + + finalStats := httpStats.toServerHTTPStats(false) + totalRequests := 0 + for _, v := range finalStats.TotalS3Requests.APIStats { + totalRequests += v + } + if totalRequests == 0 { + t.Error("Expected some total requests to be recorded, but got zero") + } + t.Logf("Total requests recorded: %d", totalRequests) + t.Logf("Race condition test passed - no races detected") +} + +// TestHTTPAPIStatsRaceCondition tests concurrent access to HTTPAPIStats specifically. +func TestRaulHTTPAPIStatsRaceCondition(t *testing.T) { + stats := &HTTPAPIStats{} + const numGoroutines = 50 + const opsPerGoroutine = 1000 + + var wg sync.WaitGroup + for i := range numGoroutines { + wg.Add(1) + go func(id int) { + defer wg.Done() + for j := 0; j < opsPerGoroutine; j++ { + stats.Inc("TestAPI") + } + }(i) + } + + for i := range numGoroutines / 2 { + wg.Add(1) + go func(id int) { + defer wg.Done() + for range opsPerGoroutine / 2 { + _ = stats.Load(false) + } + }(i) + } + wg.Wait() + + finalStats := stats.Load(false) + expected := numGoroutines * opsPerGoroutine + actual := finalStats["TestAPI"] + if actual != expected { + t.Errorf("Race condition detected: expected %d, got %d (lost %d increments)", + expected, actual, expected-actual) + } +} + +// TestBucketHTTPStatsRaceCondition tests concurrent access to bucket-level HTTP stats. +func TestRaulBucketHTTPStatsRaceCondition(t *testing.T) { + bucketStats := newBucketHTTPStats() + const numGoroutines = 50 + const opsPerGoroutine = 100 + + var wg sync.WaitGroup + for i := range numGoroutines { + wg.Add(1) + go func(id int) { + defer wg.Done() + bucketName := "test-bucket" + + for range opsPerGoroutine { + bucketStats.updateHTTPStats(bucketName, "GetObject", nil) + recorder := &xhttp.ResponseRecorder{} + bucketStats.updateHTTPStats(bucketName, "GetObject", recorder) + _ = bucketStats.load(bucketName) + } + }(i) + } + wg.Wait() + + stats := bucketStats.load("test-bucket") + if stats.totalS3Requests == nil { + t.Error("Expected bucket stats to be initialized") + } + t.Logf("Bucket HTTP stats race test passed") +} diff --git a/cmd/httprange.go b/cmd/httprange.go index db22299054242..80e64f769e98f 100644 --- a/cmd/httprange.go +++ b/cmd/httprange.go @@ -54,13 +54,14 @@ func (h *HTTPRangeSpec) GetLength(resourceSize int64) (rangeLength int64, err er case h.IsSuffixLength: specifiedLen := -h.Start - rangeLength = specifiedLen - if specifiedLen > resourceSize { - rangeLength = resourceSize - } + rangeLength = min(specifiedLen, resourceSize) case h.Start >= resourceSize: - return 0, errInvalidRange + return 0, InvalidRange{ + OffsetBegin: h.Start, + OffsetEnd: h.End, + ResourceSize: resourceSize, + } case h.End > -1: end := h.End @@ -94,10 +95,7 @@ func (h *HTTPRangeSpec) GetOffsetLength(resourceSize int64) (start, length int64 start = h.Start if h.IsSuffixLength { - start = resourceSize + h.Start - if start < 0 { - start = 0 - } + start = max(resourceSize+h.Start, 0) } return start, length, nil } @@ -193,7 +191,7 @@ func (h *HTTPRangeSpec) ToHeader() (string, error) { case h.Start > -1: end = "" default: - return "", fmt.Errorf("does not have valid range value") + return "", errors.New("does not have valid range value") } return fmt.Sprintf("bytes=%s-%s", start, end), nil } diff --git a/cmd/httprange_test.go b/cmd/httprange_test.go index 2ce9c4e9f2497..ea13a3800ccb4 100644 --- a/cmd/httprange_test.go +++ b/cmd/httprange_test.go @@ -72,7 +72,7 @@ func TestHTTPRequestRangeSpec(t *testing.T) { if err == nil { t.Errorf("Case %d: Did not get an expected error - got %v", i, rs) } - if err == errInvalidRange { + if isErrInvalidRange(err) { t.Errorf("Case %d: Got invalid range error instead of a parse error", i) } if rs != nil { @@ -95,7 +95,7 @@ func TestHTTPRequestRangeSpec(t *testing.T) { if err1 == nil { o, l, err2 = rs.GetOffsetLength(resourceSize) } - if err1 == errInvalidRange || (err1 == nil && err2 == errInvalidRange) { + if isErrInvalidRange(err1) || (err1 == nil && isErrInvalidRange(err2)) { continue } t.Errorf("Case %d: Expected errInvalidRange but: %v %v %d %d %v", i, rs, err1, o, l, err2) diff --git a/cmd/iam-etcd-store.go b/cmd/iam-etcd-store.go index 22e7f6ca6c6d1..850606fd514e3 100644 --- a/cmd/iam-etcd-store.go +++ b/cmd/iam-etcd-store.go @@ -30,7 +30,7 @@ import ( "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" + "github.com/puzpuzpuz/xsync/v3" "go.etcd.io/etcd/api/v3/mvccpb" etcd "go.etcd.io/etcd/client/v3" ) @@ -98,7 +98,7 @@ func (ies *IAMEtcdStore) getUsersSysType() UsersSysType { return ies.usersSysType } -func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, itemPath string, opts ...options) error { +func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item any, itemPath string, opts ...options) error { data, err := json.Marshal(item) if err != nil { return err @@ -114,7 +114,7 @@ func (ies *IAMEtcdStore) saveIAMConfig(ctx context.Context, item interface{}, it return saveKeyEtcd(ctx, ies.client, itemPath, data, opts...) } -func getIAMConfig(item interface{}, data []byte, itemPath string) error { +func getIAMConfig(item any, data []byte, itemPath string) error { data, err := decryptData(data, itemPath) if err != nil { return err @@ -123,7 +123,7 @@ func getIAMConfig(item interface{}, data []byte, itemPath string) error { return json.Unmarshal(data, item) } -func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error { +func (ies *IAMEtcdStore) loadIAMConfig(ctx context.Context, item any, path string) error { data, err := readKeyEtcd(ctx, ies.client, path) if err != nil { return err @@ -236,9 +236,8 @@ func (ies *IAMEtcdStore) addUser(ctx context.Context, user string, userType IAMU // for the expiring credentials. deleteKeyEtcd(ctx, ies.client, getUserIdentityPath(user, userType)) deleteKeyEtcd(ctx, ies.client, getMappedPolicyPath(user, userType, false)) - return nil } - return err + return nil } u.Credentials.Claims = jwtClaims.Map() } @@ -250,6 +249,18 @@ func (ies *IAMEtcdStore) addUser(ctx context.Context, user string, userType IAMU return nil } +func (ies *IAMEtcdStore) loadSecretKey(ctx context.Context, user string, userType IAMUserType) (string, error) { + var u UserIdentity + err := ies.loadIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) + if err != nil { + if errors.Is(err, errConfigNotFound) { + return "", errNoSuchUser + } + return "", err + } + return u.Credentials.SecretKey, nil +} + func (ies *IAMEtcdStore) loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error { var u UserIdentity err := ies.loadIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) @@ -325,11 +336,11 @@ func (ies *IAMEtcdStore) loadGroups(ctx context.Context, m map[string]GroupInfo) return nil } -func (ies *IAMEtcdStore) loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy, _ int) error { +func (ies *IAMEtcdStore) loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error { return ies.loadMappedPolicy(ctx, name, userType, isGroup, m) } -func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { +func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error { var p MappedPolicy err := ies.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup)) if err != nil { @@ -338,11 +349,11 @@ func (ies *IAMEtcdStore) loadMappedPolicy(ctx context.Context, name string, user } return err } - m[name] = p + m.Store(name, p) return nil } -func getMappedPolicy(ctx context.Context, kv *mvccpb.KeyValue, userType IAMUserType, isGroup bool, m map[string]MappedPolicy, basePrefix string) error { +func getMappedPolicy(kv *mvccpb.KeyValue, m *xsync.MapOf[string, MappedPolicy], basePrefix string) error { var p MappedPolicy err := getIAMConfig(&p, kv.Value, string(kv.Key)) if err != nil { @@ -352,11 +363,11 @@ func getMappedPolicy(ctx context.Context, kv *mvccpb.KeyValue, userType IAMUserT return err } name := extractPathPrefixAndSuffix(string(kv.Key), basePrefix, ".json") - m[name] = p + m.Store(name, p) return nil } -func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { +func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error { cctx, cancel := context.WithTimeout(ctx, defaultContextTimeout) defer cancel() var basePrefix string @@ -381,7 +392,7 @@ func (ies *IAMEtcdStore) loadMappedPolicies(ctx context.Context, userType IAMUse // Parse all policies mapping to create the proper data model for _, kv := range r.Kvs { - if err = getMappedPolicy(ctx, kv, userType, isGroup, m, basePrefix); err != nil && !errors.Is(err, errNoSuchPolicy) { + if err = getMappedPolicy(kv, m, basePrefix); err != nil && !errors.Is(err, errNoSuchPolicy) { return err } } @@ -459,7 +470,7 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa goto outerLoop } if err := watchResp.Err(); err != nil { - logger.LogIf(ctx, err) + iamLogIf(ctx, err) // log and retry. time.Sleep(1 * time.Second) // Upon an error on watch channel @@ -482,7 +493,6 @@ func (ies *IAMEtcdStore) watch(ctx context.Context, keyPath string) <-chan iamWa keyPath: string(event.Kv.Key), } } - } } } diff --git a/cmd/iam-object-store.go b/cmd/iam-object-store.go index 4c44a0115899c..89931148d5761 100644 --- a/cmd/iam-object-store.go +++ b/cmd/iam-object-store.go @@ -22,6 +22,7 @@ import ( "context" "errors" "fmt" + "maps" "path" "strings" "sync" @@ -30,11 +31,12 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/minio/madmin-go/v3" - "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/puzpuzpuz/xsync/v3" ) // IAMObjectStore implements IAMStorageAPI @@ -79,7 +81,7 @@ func (iamOS *IAMObjectStore) getUsersSysType() UsersSysType { return iamOS.usersSysType } -func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item interface{}, objPath string, opts ...options) error { +func (iamOS *IAMObjectStore) saveIAMConfig(ctx context.Context, item any, objPath string, opts ...options) error { json := jsoniter.ConfigCompatibleWithStandardLibrary data, err := json.Marshal(item) if err != nil { @@ -134,7 +136,7 @@ func (iamOS *IAMObjectStore) loadIAMConfigBytesWithMetadata(ctx context.Context, return data, meta, nil } -func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item interface{}, objPath string) error { +func (iamOS *IAMObjectStore) loadIAMConfig(ctx context.Context, item any, objPath string) error { data, _, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, objPath) if err != nil { return err @@ -182,19 +184,20 @@ func (iamOS *IAMObjectStore) loadPolicyDocWithRetry(ctx context.Context, policy } } -func (iamOS *IAMObjectStore) loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error { +func (iamOS *IAMObjectStore) loadPolicy(ctx context.Context, policy string) (PolicyDoc, error) { + var p PolicyDoc + data, objInfo, err := iamOS.loadIAMConfigBytesWithMetadata(ctx, getPolicyDocPath(policy)) if err != nil { if err == errConfigNotFound { - return errNoSuchPolicy + return p, errNoSuchPolicy } - return err + return p, err } - var p PolicyDoc err = p.parseJSON(data) if err != nil { - return err + return p, err } if p.Version == 0 { @@ -205,6 +208,14 @@ func (iamOS *IAMObjectStore) loadPolicyDoc(ctx context.Context, policy string, m p.UpdateDate = objInfo.ModTime } + return p, nil +} + +func (iamOS *IAMObjectStore) loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error { + p, err := iamOS.loadPolicy(ctx, policy) + if err != nil { + return err + } m[policy] = p return nil } @@ -225,21 +236,33 @@ func (iamOS *IAMObjectStore) loadPolicyDocs(ctx context.Context, m map[string]Po return nil } -func (iamOS *IAMObjectStore) loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error { +func (iamOS *IAMObjectStore) loadSecretKey(ctx context.Context, user string, userType IAMUserType) (string, error) { + var u UserIdentity + err := iamOS.loadIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) + if err != nil { + if errors.Is(err, errConfigNotFound) { + return "", errNoSuchUser + } + return "", err + } + return u.Credentials.SecretKey, nil +} + +func (iamOS *IAMObjectStore) loadUserIdentity(ctx context.Context, user string, userType IAMUserType) (UserIdentity, error) { var u UserIdentity err := iamOS.loadIAMConfig(ctx, &u, getUserIdentityPath(user, userType)) if err != nil { if err == errConfigNotFound { - return errNoSuchUser + return u, errNoSuchUser } - return err + return u, err } if u.Credentials.IsExpired() { // Delete expired identity - ignoring errors here. iamOS.deleteIAMConfig(ctx, getUserIdentityPath(user, userType)) iamOS.deleteIAMConfig(ctx, getMappedPolicyPath(user, userType, false)) - return nil + return u, errNoSuchUser } if u.Credentials.AccessKey == "" { @@ -254,10 +277,8 @@ func (iamOS *IAMObjectStore) loadUser(ctx context.Context, user string, userType // for the expiring credentials. iamOS.deleteIAMConfig(ctx, getUserIdentityPath(user, userType)) iamOS.deleteIAMConfig(ctx, getMappedPolicyPath(user, userType, false)) - return nil } - return err - + return u, errNoSuchUser } u.Credentials.Claims = jwtClaims.Map() } @@ -266,6 +287,34 @@ func (iamOS *IAMObjectStore) loadUser(ctx context.Context, user string, userType u.Credentials.Description = u.Credentials.Comment } + return u, nil +} + +func (iamOS *IAMObjectStore) loadUserConcurrent(ctx context.Context, userType IAMUserType, users ...string) ([]UserIdentity, error) { + userIdentities := make([]UserIdentity, len(users)) + g := errgroup.WithNErrs(len(users)) + + for index := range users { + g.Go(func() error { + userName := path.Dir(users[index]) + user, err := iamOS.loadUserIdentity(ctx, userName, userType) + if err != nil && !errors.Is(err, errNoSuchUser) { + return fmt.Errorf("unable to load the user `%s`: %w", userName, err) + } + userIdentities[index] = user + return nil + }, index) + } + + err := errors.Join(g.Wait()...) + return userIdentities, err +} + +func (iamOS *IAMObjectStore) loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error { + u, err := iamOS.loadUserIdentity(ctx, user, userType) + if err != nil { + return err + } m[user] = u return nil } @@ -325,9 +374,7 @@ func (iamOS *IAMObjectStore) loadGroups(ctx context.Context, m map[string]GroupI return nil } -func (iamOS *IAMObjectStore) loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, - m map[string]MappedPolicy, retries int, -) error { +func (iamOS *IAMObjectStore) loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error { for { retry: var p MappedPolicy @@ -344,28 +391,53 @@ func (iamOS *IAMObjectStore) loadMappedPolicyWithRetry(ctx context.Context, name goto retry } - m[name] = p + m.Store(name, p) return nil } } -func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, - m map[string]MappedPolicy, -) error { +func (iamOS *IAMObjectStore) loadMappedPolicyInternal(ctx context.Context, name string, userType IAMUserType, isGroup bool) (MappedPolicy, error) { var p MappedPolicy err := iamOS.loadIAMConfig(ctx, &p, getMappedPolicyPath(name, userType, isGroup)) if err != nil { if err == errConfigNotFound { - return errNoSuchPolicy + return p, errNoSuchPolicy } - return err + return p, err } + return p, nil +} + +func (iamOS *IAMObjectStore) loadMappedPolicyConcurrent(ctx context.Context, userType IAMUserType, isGroup bool, users ...string) ([]MappedPolicy, error) { + mappedPolicies := make([]MappedPolicy, len(users)) + g := errgroup.WithNErrs(len(users)) + + for index := range users { + g.Go(func() error { + userName := strings.TrimSuffix(users[index], ".json") + userMP, err := iamOS.loadMappedPolicyInternal(ctx, userName, userType, isGroup) + if err != nil && !errors.Is(err, errNoSuchPolicy) { + return fmt.Errorf("unable to load the user policy map `%s`: %w", userName, err) + } + mappedPolicies[index] = userMP + return nil + }, index) + } + + err := errors.Join(g.Wait()...) + return mappedPolicies, err +} - m[name] = p +func (iamOS *IAMObjectStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error { + p, err := iamOS.loadMappedPolicyInternal(ctx, name, userType, isGroup) + if err != nil { + return err + } + m.Store(name, p) return nil } -func (iamOS *IAMObjectStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { +func (iamOS *IAMObjectStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error { var basePath string if isGroup { basePath = iamConfigPolicyDBGroupsPrefix @@ -396,158 +468,289 @@ func (iamOS *IAMObjectStore) loadMappedPolicies(ctx context.Context, userType IA } var ( - usersListKey = "users/" - svcAccListKey = "service-accounts/" - groupsListKey = "groups/" - policiesListKey = "policies/" - stsListKey = "sts/" - policyDBUsersListKey = "policydb/users/" - policyDBSTSUsersListKey = "policydb/sts-users/" - policyDBServiceAccountsListKey = "policydb/service-accounts/" - policyDBGroupsListKey = "policydb/groups/" - - // List of directories from which to read iam data into memory. - allListKeys = []string{ - usersListKey, - svcAccListKey, - groupsListKey, - policiesListKey, - stsListKey, - policyDBUsersListKey, - policyDBSTSUsersListKey, - policyDBServiceAccountsListKey, - policyDBGroupsListKey, - } - - // List of directories to skip: we do not read STS directories for better - // performance. STS credentials would be stored in memory when they are - // first used. - iamLoadSkipListKeySet = set.CreateStringSet( - stsListKey, - policyDBSTSUsersListKey, - ) + usersListKey = "users/" + svcAccListKey = "service-accounts/" + groupsListKey = "groups/" + policiesListKey = "policies/" + stsListKey = "sts/" + policyDBPrefix = "policydb/" + policyDBUsersListKey = "policydb/users/" + policyDBSTSUsersListKey = "policydb/sts-users/" + policyDBGroupsListKey = "policydb/groups/" ) -func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (map[string][]string, error) { - res := make(map[string][]string) +func findSecondIndex(s string, substr string) int { + first := strings.Index(s, substr) + if first == -1 { + return -1 + } + second := strings.Index(s[first+1:], substr) + if second == -1 { + return -1 + } + return first + second + 1 +} + +// splitPath splits a path into a top-level directory and a child item. The +// parent directory retains the trailing slash. +func splitPath(s string, secondIndex bool) (string, string) { + var i int + if secondIndex { + i = findSecondIndex(s, "/") + } else { + i = strings.Index(s, "/") + } + if i == -1 { + return s, "" + } + // Include the trailing slash in the parent directory. + return s[:i+1], s[i+1:] +} + +func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (res map[string][]string, err error) { + res = make(map[string][]string) ctx, cancel := context.WithCancel(ctx) defer cancel() - for _, listKey := range allListKeys { - if iamLoadSkipListKeySet.Contains(listKey) { - continue + for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator) { + if item.Err != nil { + return nil, item.Err } - for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator+listKey) { - if item.Err != nil { - return nil, item.Err - } - res[listKey] = append(res[listKey], item.Item) + + secondIndex := strings.HasPrefix(item.Item, policyDBPrefix) + listKey, trimmedItem := splitPath(item.Item, secondIndex) + if listKey == iamFormatFile { + continue } + + res[listKey] = append(res[listKey], trimmedItem) } + return res, nil } -// PurgeExpiredSTS - purge expired STS credentials from object store. -func (iamOS *IAMObjectStore) PurgeExpiredSTS(ctx context.Context) error { - if iamOS.objAPI == nil { - return errServerNotInitialized - } +const ( + maxIAMLoadOpTime = 5 * time.Second +) - bootstrapTraceMsg("purging expired STS credentials") - // Scan STS users on disk and purge expired ones. We do not need to hold a - // lock with store.lock() here. - for item := range listIAMConfigItems(ctx, iamOS.objAPI, iamConfigPrefix+SlashSeparator+stsListKey) { - if item.Err != nil { - return item.Err - } - userName := path.Dir(item.Item) - // loadUser() will delete expired user during the load - we do not need - // to keep the loaded user around in memory, so we reinitialize the map - // each time. - m := map[string]UserIdentity{} - if err := iamOS.loadUser(ctx, userName, stsUser, m); err != nil && err != errNoSuchUser { - logger.LogIf(GlobalContext, fmt.Errorf("unable to load user during STS purge: %w (%s)", err, item.Item)) - } +func (iamOS *IAMObjectStore) loadPolicyDocConcurrent(ctx context.Context, policies ...string) ([]PolicyDoc, error) { + policyDocs := make([]PolicyDoc, len(policies)) + g := errgroup.WithNErrs(len(policies)) + for index := range policies { + g.Go(func() error { + policyName := path.Dir(policies[index]) + policyDoc, err := iamOS.loadPolicy(ctx, policyName) + if err != nil && !errors.Is(err, errNoSuchPolicy) { + return fmt.Errorf("unable to load the policy doc `%s`: %w", policyName, err) + } + policyDocs[index] = policyDoc + return nil + }, index) } - return nil + + err := errors.Join(g.Wait()...) + return policyDocs, err } // Assumes cache is locked by caller. -func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error { +func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache, firstTime bool) error { + bootstrapTraceMsgFirstTime := func(s string) { + if firstTime { + bootstrapTraceMsg(s) + } + } + if iamOS.objAPI == nil { return errServerNotInitialized } - bootstrapTraceMsg("loading all IAM items") + bootstrapTraceMsgFirstTime("loading all IAM items") + + setDefaultCannedPolicies(cache.iamPolicyDocsMap) + listStartTime := UTCNow() listedConfigItems, err := iamOS.listAllIAMConfigItems(ctx) if err != nil { return fmt.Errorf("unable to list IAM data: %w", err) } + if took := time.Since(listStartTime); took > maxIAMLoadOpTime { + var s strings.Builder + for k, v := range listedConfigItems { + s.WriteString(fmt.Sprintf(" %s: %d items\n", k, len(v))) + } + logger.Info("listAllIAMConfigItems took %.2fs with contents:\n%s", took.Seconds(), s.String()) + } // Loads things in the same order as `LoadIAMCache()` - bootstrapTraceMsg("loading policy documents") + bootstrapTraceMsgFirstTime("loading policy documents") + policyLoadStartTime := UTCNow() policiesList := listedConfigItems[policiesListKey] - for _, item := range policiesList { - policyName := path.Dir(item) - if err := iamOS.loadPolicyDoc(ctx, policyName, cache.iamPolicyDocsMap); err != nil && !errors.Is(err, errNoSuchPolicy) { - return fmt.Errorf("unable to load the policy doc `%s`: %w", policyName, err) + count := 32 // number of parallel IAM loaders + for { + if len(policiesList) < count { + policyDocs, err := iamOS.loadPolicyDocConcurrent(ctx, policiesList...) + if err != nil { + return err + } + for index := range policiesList { + if policyDocs[index].Policy.Version != "" { + policyName := path.Dir(policiesList[index]) + cache.iamPolicyDocsMap[policyName] = policyDocs[index] + } + } + break + } + + policyDocs, err := iamOS.loadPolicyDocConcurrent(ctx, policiesList[:count]...) + if err != nil { + return err + } + + for index := range policiesList[:count] { + if policyDocs[index].Policy.Version != "" { + policyName := path.Dir(policiesList[index]) + cache.iamPolicyDocsMap[policyName] = policyDocs[index] + } } + + policiesList = policiesList[count:] + } + + if took := time.Since(policyLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("Policy docs load took %.2fs (for %d items)", took.Seconds(), len(policiesList)) } - setDefaultCannedPolicies(cache.iamPolicyDocsMap) if iamOS.usersSysType == MinIOUsersSysType { - bootstrapTraceMsg("loading regular IAM users") + bootstrapTraceMsgFirstTime("loading regular IAM users") + regUsersLoadStartTime := UTCNow() regUsersList := listedConfigItems[usersListKey] - for _, item := range regUsersList { - userName := path.Dir(item) - if err := iamOS.loadUser(ctx, userName, regUser, cache.iamUsersMap); err != nil && err != errNoSuchUser { - return fmt.Errorf("unable to load the user `%s`: %w", userName, err) + + for { + if len(regUsersList) < count { + users, err := iamOS.loadUserConcurrent(ctx, regUser, regUsersList...) + if err != nil { + return err + } + for index := range regUsersList { + if users[index].Credentials.AccessKey != "" { + userName := path.Dir(regUsersList[index]) + cache.iamUsersMap[userName] = users[index] + } + } + break } + + users, err := iamOS.loadUserConcurrent(ctx, regUser, regUsersList[:count]...) + if err != nil { + return err + } + + for index := range regUsersList[:count] { + if users[index].Credentials.AccessKey != "" { + userName := path.Dir(regUsersList[index]) + cache.iamUsersMap[userName] = users[index] + } + } + + regUsersList = regUsersList[count:] } - bootstrapTraceMsg("loading regular IAM groups") + if took := time.Since(regUsersLoadStartTime); took > maxIAMLoadOpTime { + actualLoaded := len(cache.iamUsersMap) + logger.Info("Reg. users load took %.2fs (for %d items with %d expired items)", took.Seconds(), + len(regUsersList), len(regUsersList)-actualLoaded) + } + + bootstrapTraceMsgFirstTime("loading regular IAM groups") + groupsLoadStartTime := UTCNow() groupsList := listedConfigItems[groupsListKey] for _, item := range groupsList { group := path.Dir(item) if err := iamOS.loadGroup(ctx, group, cache.iamGroupsMap); err != nil && err != errNoSuchGroup { - return fmt.Errorf("unable to load the group `%s`: %w", group, err) + return fmt.Errorf("unable to load the group: %w", err) } } + if took := time.Since(groupsLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("Groups load took %.2fs (for %d items)", took.Seconds(), len(groupsList)) + } } - bootstrapTraceMsg("loading user policy mapping") + bootstrapTraceMsgFirstTime("loading user policy mapping") + userPolicyMappingLoadStartTime := UTCNow() userPolicyMappingsList := listedConfigItems[policyDBUsersListKey] - for _, item := range userPolicyMappingsList { - userName := strings.TrimSuffix(item, ".json") - if err := iamOS.loadMappedPolicy(ctx, userName, regUser, false, cache.iamUserPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { - return fmt.Errorf("unable to load the policy mapping for the user `%s`: %w", userName, err) + for { + if len(userPolicyMappingsList) < count { + mappedPolicies, err := iamOS.loadMappedPolicyConcurrent(ctx, regUser, false, userPolicyMappingsList...) + if err != nil { + return err + } + + for index := range userPolicyMappingsList { + if mappedPolicies[index].Policies != "" { + userName := strings.TrimSuffix(userPolicyMappingsList[index], ".json") + cache.iamUserPolicyMap.Store(userName, mappedPolicies[index]) + } + } + + break + } + + mappedPolicies, err := iamOS.loadMappedPolicyConcurrent(ctx, regUser, false, userPolicyMappingsList[:count]...) + if err != nil { + return err } + + for index := range userPolicyMappingsList[:count] { + if mappedPolicies[index].Policies != "" { + userName := strings.TrimSuffix(userPolicyMappingsList[index], ".json") + cache.iamUserPolicyMap.Store(userName, mappedPolicies[index]) + } + } + + userPolicyMappingsList = userPolicyMappingsList[count:] } - bootstrapTraceMsg("loading group policy mapping") + if took := time.Since(userPolicyMappingLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("User policy mappings load took %.2fs (for %d items)", took.Seconds(), len(userPolicyMappingsList)) + } + + bootstrapTraceMsgFirstTime("loading group policy mapping") + groupPolicyMappingLoadStartTime := UTCNow() groupPolicyMappingsList := listedConfigItems[policyDBGroupsListKey] for _, item := range groupPolicyMappingsList { groupName := strings.TrimSuffix(item, ".json") if err := iamOS.loadMappedPolicy(ctx, groupName, regUser, true, cache.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { - return fmt.Errorf("unable to load the policy mapping for the group `%s`: %w", groupName, err) + return fmt.Errorf("unable to load the policy mapping for the group: %w", err) } } + if took := time.Since(groupPolicyMappingLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("Group policy mappings load took %.2fs (for %d items)", took.Seconds(), len(groupPolicyMappingsList)) + } - bootstrapTraceMsg("loading service accounts") + bootstrapTraceMsgFirstTime("loading service accounts") + svcAccLoadStartTime := UTCNow() svcAccList := listedConfigItems[svcAccListKey] svcUsersMap := make(map[string]UserIdentity, len(svcAccList)) for _, item := range svcAccList { userName := path.Dir(item) if err := iamOS.loadUser(ctx, userName, svcUser, svcUsersMap); err != nil && err != errNoSuchUser { - return fmt.Errorf("unable to load the service account `%s`: %w", userName, err) + return fmt.Errorf("unable to load the service account: %w", err) } } + if took := time.Since(svcAccLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("Service accounts load took %.2fs (for %d items with %d expired items)", took.Seconds(), + len(svcAccList), len(svcAccList)-len(svcUsersMap)) + } + + bootstrapTraceMsg("loading STS account policy mapping") + stsPolicyMappingLoadStartTime := UTCNow() + var stsPolicyMappingsCount int for _, svcAcc := range svcUsersMap { svcParent := svcAcc.Credentials.ParentUser if _, ok := cache.iamUsersMap[svcParent]; !ok { + stsPolicyMappingsCount++ // If a service account's parent user is not in iamUsersMap, the // parent is an STS account. Such accounts may have a policy mapped // on the parent user, so we load them. This is not needed for the @@ -562,16 +765,62 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam // OIDC/AssumeRoleWithCustomToken/AssumeRoleWithCertificate). err := iamOS.loadMappedPolicy(ctx, svcParent, stsUser, false, cache.iamSTSPolicyMap) if err != nil && !errors.Is(err, errNoSuchPolicy) { - return fmt.Errorf("unable to load the policy mapping for the STS user `%s`: %w", svcParent, err) + return fmt.Errorf("unable to load the policy mapping for the STS user: %w", err) } } } - // Copy svcUsersMap to cache.iamUsersMap - for k, v := range svcUsersMap { - cache.iamUsersMap[k] = v + if took := time.Since(stsPolicyMappingLoadStartTime); took > maxIAMLoadOpTime { + logger.Info("STS policy mappings load took %.2fs (for %d items)", took.Seconds(), stsPolicyMappingsCount) } + // Copy svcUsersMap to cache.iamUsersMap + maps.Copy(cache.iamUsersMap, svcUsersMap) + cache.buildUserGroupMemberships() + + purgeStart := time.Now() + + // Purge expired STS credentials. + + // Scan STS users on disk and purge expired ones. + stsAccountsFromStore := map[string]UserIdentity{} + stsAccPoliciesFromStore := xsync.NewMapOf[string, MappedPolicy]() + for _, item := range listedConfigItems[stsListKey] { + userName := path.Dir(item) + // loadUser() will delete expired user during the load. + err := iamOS.loadUser(ctx, userName, stsUser, stsAccountsFromStore) + if err != nil && !errors.Is(err, errNoSuchUser) { + iamLogIf(ctx, err) + } + // No need to return errors for failed expiration of STS users + } + + // Loading the STS policy mappings from disk ensures that stale entries + // (removed during loadUser() in the loop above) are removed from memory. + for _, item := range listedConfigItems[policyDBSTSUsersListKey] { + stsName := strings.TrimSuffix(item, ".json") + err := iamOS.loadMappedPolicy(ctx, stsName, stsUser, false, stsAccPoliciesFromStore) + if err != nil && !errors.Is(err, errNoSuchPolicy) { + iamLogIf(ctx, err) + } + // No need to return errors for failed expiration of STS users + } + + took := time.Since(purgeStart).Seconds() + if took > maxDurationSecondsForLog { + // Log if we took a lot of time to load. + logger.Info("IAM expired STS purge took %.2fs", took) + } + + // Store the newly populated map in the iam cache. This takes care of + // removing stale entries from the existing map. + cache.iamSTSAccountsMap = stsAccountsFromStore + + stsAccPoliciesFromStore.Range(func(k string, v MappedPolicy) bool { + cache.iamSTSPolicyMap.Store(k, v) + return true + }) + return nil } @@ -623,38 +872,37 @@ func (iamOS *IAMObjectStore) deleteGroupInfo(ctx context.Context, name string) e return err } -// helper type for listIAMConfigItems -type itemOrErr struct { - Item string - Err error -} - -// Lists files or dirs in the minioMetaBucket at the given path -// prefix. If dirs is true, only directories are listed, otherwise -// only objects are listed. All returned items have the pathPrefix -// removed from their names. -func listIAMConfigItems(ctx context.Context, objAPI ObjectLayer, pathPrefix string) <-chan itemOrErr { - ch := make(chan itemOrErr) +// Lists objects in the minioMetaBucket at the given path prefix. All returned +// items have the pathPrefix removed from their names. +func listIAMConfigItems(ctx context.Context, objAPI ObjectLayer, pathPrefix string) <-chan itemOrErr[string] { + ch := make(chan itemOrErr[string]) go func() { defer xioutil.SafeClose(ch) // Allocate new results channel to receive ObjectInfo. - objInfoCh := make(chan ObjectInfo) + objInfoCh := make(chan itemOrErr[ObjectInfo]) if err := objAPI.Walk(ctx, minioMetaBucket, pathPrefix, objInfoCh, WalkOptions{}); err != nil { select { - case ch <- itemOrErr{Err: err}: + case ch <- itemOrErr[string]{Err: err}: case <-ctx.Done(): } return } for obj := range objInfoCh { - item := strings.TrimPrefix(obj.Name, pathPrefix) + if obj.Err != nil { + select { + case ch <- itemOrErr[string]{Err: obj.Err}: + case <-ctx.Done(): + return + } + } + item := strings.TrimPrefix(obj.Item.Name, pathPrefix) item = strings.TrimSuffix(item, SlashSeparator) select { - case ch <- itemOrErr{Item: item}: + case ch <- itemOrErr[string]{Item: item}: case <-ctx.Done(): return } diff --git a/cmd/iam-object-store_test.go b/cmd/iam-object-store_test.go new file mode 100644 index 0000000000000..dc0ce1326c15e --- /dev/null +++ b/cmd/iam-object-store_test.go @@ -0,0 +1,53 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "testing" +) + +func TestSplitPath(t *testing.T) { + cases := []struct { + path string + secondIndex bool + expectedListKey, expectedItem string + }{ + {"format.json", false, "format.json", ""}, + {"users/tester.json", false, "users/", "tester.json"}, + {"groups/test/group.json", false, "groups/", "test/group.json"}, + {"policydb/groups/testgroup.json", true, "policydb/groups/", "testgroup.json"}, + { + "policydb/sts-users/uid=slash/user,ou=people,ou=swengg,dc=min,dc=io.json", true, + "policydb/sts-users/", "uid=slash/user,ou=people,ou=swengg,dc=min,dc=io.json", + }, + { + "policydb/sts-users/uid=slash/user/twice,ou=people,ou=swengg,dc=min,dc=io.json", true, + "policydb/sts-users/", "uid=slash/user/twice,ou=people,ou=swengg,dc=min,dc=io.json", + }, + { + "policydb/groups/cn=project/d,ou=groups,ou=swengg,dc=min,dc=io.json", true, + "policydb/groups/", "cn=project/d,ou=groups,ou=swengg,dc=min,dc=io.json", + }, + } + for i, test := range cases { + listKey, item := splitPath(test.path, test.secondIndex) + if listKey != test.expectedListKey || item != test.expectedItem { + t.Errorf("unexpected result on test[%v]: expected[%s, %s] but got [%s, %s]", i, test.expectedListKey, test.expectedItem, listKey, item) + } + } +} diff --git a/cmd/iam-store.go b/cmd/iam-store.go index 565425231e1f0..46465e00bb821 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -23,18 +23,25 @@ import ( "encoding/json" "errors" "fmt" + "maps" + "path" "sort" "strings" + "sync" "time" jsoniter "github.com/json-iterator/go" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/auth" + "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/identity/openid" "github.com/minio/minio/internal/jwt" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/puzpuzpuz/xsync/v3" + "golang.org/x/sync/singleflight" ) const ( @@ -153,7 +160,7 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string type UserIdentity struct { Version int `json:"version"` Credentials auth.Credentials `json:"credentials"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` } func newUserIdentity(cred auth.Credentials) UserIdentity { @@ -165,7 +172,7 @@ type GroupInfo struct { Version int `json:"version"` Status string `json:"status"` Members []string `json:"members"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` } func newGroupInfo(members []string) GroupInfo { @@ -176,13 +183,23 @@ func newGroupInfo(members []string) GroupInfo { type MappedPolicy struct { Version int `json:"version"` Policies string `json:"policy"` - UpdatedAt time.Time `json:"updatedAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt"` +} + +// mappedPoliciesToMap copies the map of mapped policies to a regular map. +func mappedPoliciesToMap(m *xsync.MapOf[string, MappedPolicy]) map[string]MappedPolicy { + policies := make(map[string]MappedPolicy, m.Size()) + m.Range(func(k string, v MappedPolicy) bool { + policies[k] = v + return true + }) + return policies } // converts a mapped policy into a slice of distinct policies func (mp MappedPolicy) toSlice() []string { var policies []string - for _, policy := range strings.Split(mp.Policies, ",") { + for policy := range strings.SplitSeq(mp.Policies, ",") { if strings.TrimSpace(policy) == "" { continue } @@ -203,8 +220,8 @@ func newMappedPolicy(policy string) MappedPolicy { type PolicyDoc struct { Version int `json:",omitempty"` Policy policy.Policy - CreateDate time.Time `json:",omitempty"` - UpdateDate time.Time `json:",omitempty"` + CreateDate time.Time + UpdateDate time.Time } func newPolicyDoc(p policy.Policy) PolicyDoc { @@ -277,32 +294,32 @@ type iamCache struct { // map of regular username to credentials iamUsersMap map[string]UserIdentity // map of regular username to policy names - iamUserPolicyMap map[string]MappedPolicy + iamUserPolicyMap *xsync.MapOf[string, MappedPolicy] // STS accounts are loaded on demand and not via the periodic IAM reload. // map of STS access key to credentials iamSTSAccountsMap map[string]UserIdentity // map of STS access key to policy names - iamSTSPolicyMap map[string]MappedPolicy + iamSTSPolicyMap *xsync.MapOf[string, MappedPolicy] // map of group names to group info iamGroupsMap map[string]GroupInfo // map of user names to groups they are a member of iamUserGroupMemberships map[string]set.StringSet // map of group names to policy names - iamGroupPolicyMap map[string]MappedPolicy + iamGroupPolicyMap *xsync.MapOf[string, MappedPolicy] } func newIamCache() *iamCache { return &iamCache{ iamPolicyDocsMap: map[string]PolicyDoc{}, iamUsersMap: map[string]UserIdentity{}, - iamUserPolicyMap: map[string]MappedPolicy{}, + iamUserPolicyMap: xsync.NewMapOf[string, MappedPolicy](), iamSTSAccountsMap: map[string]UserIdentity{}, - iamSTSPolicyMap: map[string]MappedPolicy{}, + iamSTSPolicyMap: xsync.NewMapOf[string, MappedPolicy](), iamGroupsMap: map[string]GroupInfo{}, iamUserGroupMemberships: map[string]set.StringSet{}, - iamGroupPolicyMap: map[string]MappedPolicy{}, + iamGroupPolicyMap: xsync.NewMapOf[string, MappedPolicy](), } } @@ -343,6 +360,67 @@ func (c *iamCache) removeGroupFromMembershipsMap(group string) { } } +func (c *iamCache) policyDBGetGroups(store *IAMStoreSys, userPolicyPresent bool, groups ...string) ([]string, error) { + var policies []string + for _, group := range groups { + if store.getUsersSysType() == MinIOUsersSysType { + g, ok := c.iamGroupsMap[group] + if !ok { + continue + } + + // Group is disabled, so we return no policy - this + // ensures the request is denied. + if g.Status == statusDisabled { + continue + } + } + + policy, ok := c.iamGroupPolicyMap.Load(group) + if !ok { + continue + } + + policies = append(policies, policy.toSlice()...) + } + + found := len(policies) > 0 + if found { + return policies, nil + } + + if userPolicyPresent { + // if user mapping present and no group policies found + // rely on user policy for access, instead of fallback. + return nil, nil + } + + var mu sync.Mutex + + // no mappings found, fallback for all groups. + g := errgroup.WithNErrs(len(groups)).WithConcurrency(10) // load like 10 groups at a time. + + for index := range groups { + g.Go(func() error { + err := store.loadMappedPolicy(context.TODO(), groups[index], regUser, true, c.iamGroupPolicyMap) + if err != nil && !errors.Is(err, errNoSuchPolicy) { + return err + } + if errors.Is(err, errNoSuchPolicy) { + return nil + } + policy, _ := c.iamGroupPolicyMap.Load(groups[index]) + mu.Lock() + policies = append(policies, policy.toSlice()...) + mu.Unlock() + return nil + }, index) + } + + err := errors.Join(g.Wait()...) + return policies, err +} + // policyDBGet - lower-level helper; does not take locks. // // If a group is passed, it returns policies associated with the group. @@ -354,12 +432,18 @@ func (c *iamCache) removeGroupFromMembershipsMap(group string) { // information in IAM (i.e sys.iam*Map) - this info is stored only in the STS // generated credentials. Thus we skip looking up group memberships, user map, // and group map and check the appropriate policy maps directly. -func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([]string, time.Time, error) { +func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool, policyPresent bool) ([]string, time.Time, error) { if isGroup { if store.getUsersSysType() == MinIOUsersSysType { g, ok := c.iamGroupsMap[name] if !ok { - return nil, time.Time{}, errNoSuchGroup + if err := store.loadGroup(context.Background(), name, c.iamGroupsMap); err != nil { + return nil, time.Time{}, err + } + g, ok = c.iamGroupsMap[name] + if !ok { + return nil, time.Time{}, errNoSuchGroup + } } // Group is disabled, so we return no policy - this @@ -369,47 +453,122 @@ func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([ } } - return c.iamGroupPolicyMap[name].toSlice(), c.iamGroupPolicyMap[name].UpdatedAt, nil - } - - // When looking for a user's policies, we also check if the user - // and the groups they are member of are enabled. - u, ok := c.iamUsersMap[name] - if ok { - if !u.Credentials.IsValid() { - return nil, time.Time{}, nil + policy, ok := c.iamGroupPolicyMap.Load(name) + if ok { + return policy.toSlice(), policy.UpdatedAt, nil } + if !policyPresent { + if err := store.loadMappedPolicy(context.TODO(), name, regUser, true, c.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + policy, _ = c.iamGroupPolicyMap.Load(name) + return policy.toSlice(), policy.UpdatedAt, nil + } + return nil, time.Time{}, nil } - // For internal IDP regular/service account user accounts, the policy - // mapping is iamUserPolicyMap. For STS accounts, the parent user would be - // passed here and we lookup the mapping in iamSTSPolicyMap. - mp, ok := c.iamUserPolicyMap[name] - if !ok { - // Since user "name" could be a parent user of an STS account, we lookup - // mappings for those too. - mp, ok = c.iamSTSPolicyMap[name] + // returned policy could be empty, we use set to de-duplicate. + var policies set.StringSet + var updatedAt time.Time + + if store.getUsersSysType() == LDAPUsersSysType { + // For LDAP policy mapping is part of STS users, we only need to lookup + // those mappings. + mp, ok := c.iamSTSPolicyMap.Load(name) if !ok { // Attempt to load parent user mapping for STS accounts - store.loadMappedPolicy(context.TODO(), name, stsUser, false, c.iamSTSPolicyMap) - mp = c.iamSTSPolicyMap[name] + if err := store.loadMappedPolicy(context.TODO(), name, stsUser, false, c.iamSTSPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + mp, _ = c.iamSTSPolicyMap.Load(name) } - } + policies = set.CreateStringSet(mp.toSlice()...) + updatedAt = mp.UpdatedAt + } else { + // When looking for a user's policies, we also check if the user + // and the groups they are member of are enabled. + u, ok := c.iamUsersMap[name] + if ok { + if !u.Credentials.IsValid() { + return nil, time.Time{}, nil + } + } + + // For internal IDP regular/service account user accounts, the policy + // mapping is iamUserPolicyMap. For STS accounts, the parent user would be + // passed here and we lookup the mapping in iamSTSPolicyMap. + mp, ok := c.iamUserPolicyMap.Load(name) + if !ok { + if err := store.loadMappedPolicy(context.TODO(), name, regUser, false, c.iamUserPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + mp, ok = c.iamUserPolicyMap.Load(name) + if !ok { + // Since user "name" could be a parent user of an STS account, we look up + // mappings for those too. + mp, ok = c.iamSTSPolicyMap.Load(name) + if !ok { + // Attempt to load parent user mapping for STS accounts + if err := store.loadMappedPolicy(context.TODO(), name, stsUser, false, c.iamSTSPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + mp, _ = c.iamSTSPolicyMap.Load(name) + } + } + } + policies = set.CreateStringSet(mp.toSlice()...) + + for _, group := range u.Credentials.Groups { + g, ok := c.iamGroupsMap[group] + if ok { + // Group is disabled, so we return no policy - this + // ensures the request is denied. + if g.Status == statusDisabled { + return nil, time.Time{}, nil + } + } + + policy, ok := c.iamGroupPolicyMap.Load(group) + if !ok { + if err := store.loadMappedPolicy(context.TODO(), group, regUser, true, c.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + policy, _ = c.iamGroupPolicyMap.Load(group) + } - // returned policy could be empty - policies := mp.toSlice() + for _, p := range policy.toSlice() { + policies.Add(p) + } + } + updatedAt = mp.UpdatedAt + } for _, group := range c.iamUserGroupMemberships[name].ToSlice() { - // Skip missing or disabled groups - gi, ok := c.iamGroupsMap[group] - if !ok || gi.Status == statusDisabled { - continue + if store.getUsersSysType() == MinIOUsersSysType { + g, ok := c.iamGroupsMap[group] + if ok { + // Group is disabled, so we return no policy - this + // ensures the request is denied. + if g.Status == statusDisabled { + return nil, time.Time{}, nil + } + } + } + + policy, ok := c.iamGroupPolicyMap.Load(group) + if !ok { + if err := store.loadMappedPolicy(context.TODO(), group, regUser, true, c.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, time.Time{}, err + } + policy, _ = c.iamGroupPolicyMap.Load(group) } - policies = append(policies, c.iamGroupPolicyMap[group].toSlice()...) + for _, p := range policy.toSlice() { + policies.Add(p) + } } - return policies, mp.UpdatedAt, nil + return policies.ToSlice(), updatedAt, nil } func (c *iamCache) updateUserWithClaims(key string, u UserIdentity) error { @@ -444,14 +603,15 @@ type IAMStorageAPI interface { loadPolicyDocWithRetry(ctx context.Context, policy string, m map[string]PolicyDoc, retries int) error loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error + loadSecretKey(ctx context.Context, user string, userType IAMUserType) (string, error) loadUsers(ctx context.Context, userType IAMUserType, m map[string]UserIdentity) error loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error loadGroups(ctx context.Context, m map[string]GroupInfo) error - loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error - loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy, retries int) error - loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error - saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error - loadIAMConfig(ctx context.Context, item interface{}, path string) error + loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error + loadMappedPolicyWithRetry(ctx context.Context, name string, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy], retries int) error + loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m *xsync.MapOf[string, MappedPolicy]) error + saveIAMConfig(ctx context.Context, item any, path string, opts ...options) error + loadIAMConfig(ctx context.Context, item any, path string) error deleteIAMConfig(ctx context.Context, path string) error savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error @@ -478,38 +638,28 @@ func setDefaultCannedPolicies(policies map[string]PolicyDoc) { } } -// PurgeExpiredSTS - purges expired STS credentials. -func (store *IAMStoreSys) PurgeExpiredSTS(ctx context.Context) error { - iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore) - if !ok { - // No purging is done for non-object storage. - return nil - } - return iamOS.PurgeExpiredSTS(ctx) -} - // LoadIAMCache reads all IAM items and populates a new iamCache object and // replaces the in-memory cache object. func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) error { - bootstrapTraceMsg := func(s string) { + bootstrapTraceMsgFirstTime := func(s string) { if firstTime { bootstrapTraceMsg(s) } } - bootstrapTraceMsg("loading IAM data") + bootstrapTraceMsgFirstTime("loading IAM data") newCache := newIamCache() loadedAt := time.Now() if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok { - err := iamOS.loadAllFromObjStore(ctx, newCache) + err := iamOS.loadAllFromObjStore(ctx, newCache, firstTime) if err != nil { return err } } else { - - bootstrapTraceMsg("loading policy documents") + // Only non-object IAM store (i.e. only etcd backend). + bootstrapTraceMsgFirstTime("loading policy documents") if err := store.loadPolicyDocs(ctx, newCache.iamPolicyDocsMap); err != nil { return err } @@ -518,29 +668,29 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro setDefaultCannedPolicies(newCache.iamPolicyDocsMap) if store.getUsersSysType() == MinIOUsersSysType { - bootstrapTraceMsg("loading regular users") + bootstrapTraceMsgFirstTime("loading regular users") if err := store.loadUsers(ctx, regUser, newCache.iamUsersMap); err != nil { return err } - bootstrapTraceMsg("loading regular groups") + bootstrapTraceMsgFirstTime("loading regular groups") if err := store.loadGroups(ctx, newCache.iamGroupsMap); err != nil { return err } } - bootstrapTraceMsg("loading user policy mapping") + bootstrapTraceMsgFirstTime("loading user policy mapping") // load polices mapped to users if err := store.loadMappedPolicies(ctx, regUser, false, newCache.iamUserPolicyMap); err != nil { return err } - bootstrapTraceMsg("loading group policy mapping") + bootstrapTraceMsgFirstTime("loading group policy mapping") // load policies mapped to groups if err := store.loadMappedPolicies(ctx, regUser, true, newCache.iamGroupPolicyMap); err != nil { return err } - bootstrapTraceMsg("loading service accounts") + bootstrapTraceMsgFirstTime("loading service accounts") // load service accounts if err := store.loadUsers(ctx, svcUser, newCache.iamUsersMap); err != nil { return err @@ -560,7 +710,7 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro // An in-memory cache must be replaced only if we know for sure that the // values loaded from disk are not stale. They might be stale if the // cached.updatedAt is more recent than the refresh cycle began. - if cache.updatedAt.Before(loadedAt) { + if cache.updatedAt.Before(loadedAt) || firstTime { // No one has updated anything since the config was loaded, // so we just replace whatever is on the disk into memory. cache.iamGroupPolicyMap = newCache.iamGroupPolicyMap @@ -574,9 +724,10 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro // here is to account for STS policy mapping changes that should apply // for service accounts derived from such STS accounts (i.e. LDAP STS // accounts). - for k, v := range newCache.iamSTSPolicyMap { - cache.iamSTSPolicyMap[k] = v - } + newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool { + cache.iamSTSPolicyMap.Store(k, v) + return true + }) cache.updatedAt = time.Now() } @@ -588,6 +739,9 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro // layer. type IAMStoreSys struct { IAMStorageAPI + + group *singleflight.Group + policy *singleflight.Group } // HasWatcher - returns if the storage system has a watcher. @@ -615,12 +769,10 @@ func (store *IAMStoreSys) GetMappedPolicy(name string, isGroup bool) (MappedPoli defer store.runlock() if isGroup { - v, ok := cache.iamGroupPolicyMap[name] + v, ok := cache.iamGroupPolicyMap.Load(name) return v, ok } - - v, ok := cache.iamUserPolicyMap[name] - return v, ok + return cache.iamUserPolicyMap.Load(name) } // GroupNotificationHandler - updates in-memory cache on notification of @@ -639,7 +791,7 @@ func (store *IAMStoreSys) GroupNotificationHandler(ctx context.Context, group st // group does not exist - so remove from memory. cache.removeGroupFromMembershipsMap(group) delete(cache.iamGroupsMap, group) - delete(cache.iamGroupPolicyMap, group) + cache.iamGroupPolicyMap.Delete(group) cache.updatedAt = time.Now() return nil @@ -670,20 +822,36 @@ func (store *IAMStoreSys) PolicyDBGet(name string, groups ...string) ([]string, cache := store.rlock() defer store.runlock() - policies, _, err := cache.policyDBGet(store, name, false) - if err != nil { - return nil, err - } + getPolicies := func() ([]string, error) { + policies, _, err := cache.policyDBGet(store, name, false, false) + if err != nil { + return nil, err + } - for _, group := range groups { - ps, _, err := cache.policyDBGet(store, group, true) + userPolicyPresent := len(policies) > 0 + + groupPolicies, err := cache.policyDBGetGroups(store, userPolicyPresent, groups...) if err != nil { return nil, err } - policies = append(policies, ps...) - } - return policies, nil + policies = append(policies, groupPolicies...) + return policies, nil + } + if store.policy != nil { + val, err, _ := store.policy.Do(name, func() (any, error) { + return getPolicies() + }) + if err != nil { + return nil, err + } + res, ok := val.([]string) + if !ok { + return nil, errors.New("unexpected policy type") + } + return res, nil + } + return getPolicies() } // AddUsersToGroup - adds users to group, creating the group if needed. @@ -818,7 +986,7 @@ func (store *IAMStoreSys) RemoveUsersFromGroup(ctx context.Context, group string // Delete from server memory delete(cache.iamGroupsMap, group) - delete(cache.iamGroupPolicyMap, group) + cache.iamGroupPolicyMap.Delete(group) cache.updatedAt = time.Now() return cache.updatedAt, nil } @@ -861,7 +1029,7 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc cache := store.rlock() defer store.runlock() - ps, updatedAt, err := cache.policyDBGet(store, group, true) + ps, updatedAt, err := cache.policyDBGet(store, group, true, false) if err != nil { return gd, err } @@ -890,39 +1058,57 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc }, nil } -// ListGroups - lists groups. Since this is not going to be a frequent -// operation, we fetch this info from storage, and refresh the cache as well. -func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) { - cache := store.lock() - defer store.unlock() - - if store.getUsersSysType() == MinIOUsersSysType { - m := map[string]GroupInfo{} - err = store.loadGroups(ctx, m) +// updateGroups updates the group from the persistent store, and also related policy mapping if any. +func (store *IAMStoreSys) updateGroups(ctx context.Context, cache *iamCache) (res []string, err error) { + groupSet := set.NewStringSet() + if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok { + listedConfigItems, err := iamOS.listAllIAMConfigItems(ctx) if err != nil { - return + return nil, err } - cache.iamGroupsMap = m - cache.updatedAt = time.Now() - for k := range cache.iamGroupsMap { - res = append(res, k) + if store.getUsersSysType() == MinIOUsersSysType { + groupsList := listedConfigItems[groupsListKey] + for _, item := range groupsList { + group := path.Dir(item) + if err = iamOS.loadGroup(ctx, group, cache.iamGroupsMap); err != nil && !errors.Is(err, errNoSuchGroup) { + return nil, fmt.Errorf("unable to load the group: %w", err) + } + groupSet.Add(group) + } } - } - if store.getUsersSysType() == LDAPUsersSysType { - m := map[string]MappedPolicy{} - err = store.loadMappedPolicies(ctx, stsUser, true, m) - if err != nil { - return - } - cache.iamGroupPolicyMap = m - cache.updatedAt = time.Now() - for k := range cache.iamGroupPolicyMap { - res = append(res, k) + groupPolicyMappingsList := listedConfigItems[policyDBGroupsListKey] + for _, item := range groupPolicyMappingsList { + group := strings.TrimSuffix(item, ".json") + if err = iamOS.loadMappedPolicy(ctx, group, regUser, true, cache.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) { + return nil, fmt.Errorf("unable to load the policy mapping for the group: %w", err) + } + groupSet.Add(group) } + + return groupSet.ToSlice(), nil + } + + // For etcd just return from cache. + for k := range cache.iamGroupsMap { + groupSet.Add(k) } - return + cache.iamGroupPolicyMap.Range(func(k string, v MappedPolicy) bool { + groupSet.Add(k) + return true + }) + + return groupSet.ToSlice(), nil +} + +// ListGroups - lists groups. Since this is not going to be a frequent +// operation, we fetch this info from storage, and refresh the cache as well. +func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) { + cache := store.lock() + defer store.unlock() + + return store.updateGroups(ctx, cache) } // listGroups - lists groups - fetch groups from cache @@ -937,11 +1123,12 @@ func (store *IAMStoreSys) listGroups(ctx context.Context) (res []string, err err } if store.getUsersSysType() == LDAPUsersSysType { - for k := range cache.iamGroupPolicyMap { + cache.iamGroupPolicyMap.Range(func(k string, _ MappedPolicy) bool { res = append(res, k) - } + return true + }) } - return + return res, err } // PolicyDBUpdate - adds or removes given policies to/from the user or group's @@ -952,7 +1139,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro ) { if name == "" { err = errInvalidArgument - return + return updatedAt, addedOrRemoved, effectivePolicies, err } cache := store.lock() @@ -962,29 +1149,29 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro var mp MappedPolicy if !isGroup { if userType == stsUser { - stsMap := map[string]MappedPolicy{} + stsMap := xsync.NewMapOf[string, MappedPolicy]() // Attempt to load parent user mapping for STS accounts store.loadMappedPolicy(context.TODO(), name, stsUser, false, stsMap) - mp = stsMap[name] + mp, _ = stsMap.Load(name) } else { - mp = cache.iamUserPolicyMap[name] + mp, _ = cache.iamUserPolicyMap.Load(name) } } else { if store.getUsersSysType() == MinIOUsersSysType { g, ok := cache.iamGroupsMap[name] if !ok { err = errNoSuchGroup - return + return updatedAt, addedOrRemoved, effectivePolicies, err } if g.Status == statusDisabled { err = errGroupDisabled - return + return updatedAt, addedOrRemoved, effectivePolicies, err } } - mp = cache.iamGroupPolicyMap[name] + mp, _ = cache.iamGroupPolicyMap.Load(name) } // Compute net policy change effect and updated policy mapping @@ -999,7 +1186,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro for _, p := range policiesToUpdate.ToSlice() { if _, found := cache.iamPolicyDocsMap[p]; !found { err = errNoSuchPolicy - return + return updatedAt, addedOrRemoved, effectivePolicies, err } } newPolicySet = existingPolicySet.Union(policiesToUpdate) @@ -1011,7 +1198,7 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro // We return an error if the requested policy update will have no effect. if policiesToUpdate.IsEmpty() { err = errNoPolicyToAttachOrDetach - return + return updatedAt, addedOrRemoved, effectivePolicies, err } newPolicies := newPolicySet.ToSlice() @@ -1023,30 +1210,29 @@ func (store *IAMStoreSys) PolicyDBUpdate(ctx context.Context, name string, isGro // in this case, we delete the mapping from the store. if len(newPolicies) == 0 { if err = store.deleteMappedPolicy(ctx, name, userType, isGroup); err != nil && !errors.Is(err, errNoSuchPolicy) { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } if !isGroup { if userType == stsUser { - delete(cache.iamSTSPolicyMap, name) + cache.iamSTSPolicyMap.Delete(name) } else { - delete(cache.iamUserPolicyMap, name) + cache.iamUserPolicyMap.Delete(name) } } else { - delete(cache.iamGroupPolicyMap, name) + cache.iamGroupPolicyMap.Delete(name) } } else { - if err = store.saveMappedPolicy(ctx, name, userType, isGroup, newPolicyMapping); err != nil { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } if !isGroup { if userType == stsUser { - cache.iamSTSPolicyMap[name] = newPolicyMapping + cache.iamSTSPolicyMap.Store(name, newPolicyMapping) } else { - cache.iamUserPolicyMap[name] = newPolicyMapping + cache.iamUserPolicyMap.Store(name, newPolicyMapping) } } else { - cache.iamGroupPolicyMap[name] = newPolicyMapping + cache.iamGroupPolicyMap.Store(name, newPolicyMapping) } } @@ -1081,12 +1267,12 @@ func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string, } if !isGroup { if userType == stsUser { - delete(cache.iamSTSPolicyMap, name) + cache.iamSTSPolicyMap.Delete(name) } else { - delete(cache.iamUserPolicyMap, name) + cache.iamUserPolicyMap.Delete(name) } } else { - delete(cache.iamGroupPolicyMap, name) + cache.iamGroupPolicyMap.Delete(name) } cache.updatedAt = time.Now() return cache.updatedAt, nil @@ -1105,12 +1291,12 @@ func (store *IAMStoreSys) PolicyDBSet(ctx context.Context, name, policy string, } if !isGroup { if userType == stsUser { - cache.iamSTSPolicyMap[name] = mp + cache.iamSTSPolicyMap.Store(name, mp) } else { - cache.iamUserPolicyMap[name] = mp + cache.iamUserPolicyMap.Store(name, mp) } } else { - cache.iamGroupPolicyMap[name] = mp + cache.iamGroupPolicyMap.Store(name, mp) } cache.updatedAt = time.Now() return mp.UpdatedAt, nil @@ -1134,33 +1320,35 @@ func (store *IAMStoreSys) PolicyNotificationHandler(ctx context.Context, policy delete(cache.iamPolicyDocsMap, policy) // update user policy map - for u, mp := range cache.iamUserPolicyMap { + cache.iamUserPolicyMap.Range(func(u string, mp MappedPolicy) bool { pset := mp.policySet() if !pset.Contains(policy) { - continue + return true } if store.getUsersSysType() == MinIOUsersSysType { _, ok := cache.iamUsersMap[u] if !ok { // happens when account is deleted or // expired. - delete(cache.iamUserPolicyMap, u) - continue + cache.iamUserPolicyMap.Delete(u) + return true } } pset.Remove(policy) - cache.iamUserPolicyMap[u] = newMappedPolicy(strings.Join(pset.ToSlice(), ",")) - } + cache.iamUserPolicyMap.Store(u, newMappedPolicy(strings.Join(pset.ToSlice(), ","))) + return true + }) // update group policy map - for g, mp := range cache.iamGroupPolicyMap { + cache.iamGroupPolicyMap.Range(func(g string, mp MappedPolicy) bool { pset := mp.policySet() if !pset.Contains(policy) { - continue + return true } pset.Remove(policy) - cache.iamGroupPolicyMap[g] = newMappedPolicy(strings.Join(pset.ToSlice(), ",")) - } + cache.iamGroupPolicyMap.Store(g, newMappedPolicy(strings.Join(pset.ToSlice(), ","))) + return true + }) cache.updatedAt = time.Now() return nil @@ -1186,26 +1374,28 @@ func (store *IAMStoreSys) DeletePolicy(ctx context.Context, policy string, isFro // we do allow deletion. users := []string{} groups := []string{} - for u, mp := range cache.iamUserPolicyMap { + cache.iamUserPolicyMap.Range(func(u string, mp MappedPolicy) bool { pset := mp.policySet() if store.getUsersSysType() == MinIOUsersSysType { if _, ok := cache.iamUsersMap[u]; !ok { // This case can happen when a temporary account is // deleted or expired - remove it from userPolicyMap. - delete(cache.iamUserPolicyMap, u) - continue + cache.iamUserPolicyMap.Delete(u) + return true } } if pset.Contains(policy) { users = append(users, u) } - } - for g, mp := range cache.iamGroupPolicyMap { + return true + }) + cache.iamGroupPolicyMap.Range(func(g string, mp MappedPolicy) bool { pset := mp.policySet() if pset.Contains(policy) { groups = append(groups, g) } - } + return true + }) if len(users) != 0 || len(groups) != 0 { return errPolicyInUse } @@ -1392,16 +1582,48 @@ func filterPolicies(cache *iamCache, policyName string, bucketName string) (stri return strings.Join(policies, ","), policy.MergePolicies(toMerge...) } -// FilterPolicies - accepts a comma separated list of policy names as a string -// and bucket and returns only policies that currently exist in MinIO. If -// bucketName is non-empty, additionally filters policies matching the bucket. -// The first returned value is the list of currently existing policies, and the -// second is their combined policy definition. -func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (string, policy.Policy) { +// MergePolicies - accepts a comma separated list of policy names as a string +// and returns only policies that currently exist in MinIO. It includes hot loading +// of policies if not in the memory +func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Policy) { + var policies []string + var missingPolicies []string + var toMerge []policy.Policy + cache := store.rlock() - defer store.runlock() + for _, policy := range newMappedPolicy(policyName).toSlice() { + if policy == "" { + continue + } + p, found := cache.iamPolicyDocsMap[policy] + if !found { + missingPolicies = append(missingPolicies, policy) + continue + } + policies = append(policies, policy) + toMerge = append(toMerge, p.Policy) + } + store.runlock() + + if len(missingPolicies) > 0 { + m := make(map[string]PolicyDoc) + for _, policy := range missingPolicies { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + _ = store.loadPolicyDoc(ctx, policy, m) + cancel() + } + + cache := store.lock() + maps.Copy(cache.iamPolicyDocsMap, m) + store.unlock() + + for policy, p := range m { + policies = append(policies, policy) + toMerge = append(toMerge, p.Policy) + } + } - return filterPolicies(cache, policyName, bucketName) + return strings.Join(policies, ","), policy.MergePolicies(toMerge...) } // GetBucketUsers - returns users (not STS or service accounts) that have access @@ -1422,11 +1644,11 @@ func (store *IAMStoreSys) GetBucketUsers(bucket string) (map[string]madmin.UserI continue } var policies []string - mp, ok := cache.iamUserPolicyMap[k] + mp, ok := cache.iamUserPolicyMap.Load(k) if ok { policies = append(policies, mp.Policies) for _, group := range cache.iamUserGroupMemberships[k].ToSlice() { - if nmp, ok := cache.iamGroupPolicyMap[group]; ok { + if nmp, ok := cache.iamGroupPolicyMap.Load(group); ok { policies = append(policies, nmp.Policies) } } @@ -1461,8 +1683,9 @@ func (store *IAMStoreSys) GetUsers() map[string]madmin.UserInfo { if v.IsTemp() || v.IsServiceAccount() { continue } + pl, _ := cache.iamUserPolicyMap.Load(k) result[k] = madmin.UserInfo{ - PolicyName: cache.iamUserPolicyMap[k].Policies, + PolicyName: pl.Policies, Status: func() madmin.AccountStatus { if v.IsValid() { return madmin.AccountEnabled @@ -1470,7 +1693,7 @@ func (store *IAMStoreSys) GetUsers() map[string]madmin.UserInfo { return madmin.AccountDisabled }(), MemberOf: cache.iamUserGroupMemberships[k].ToSlice(), - UpdatedAt: cache.iamUserPolicyMap[k].UpdatedAt, + UpdatedAt: pl.UpdatedAt, } } @@ -1483,12 +1706,14 @@ func (store *IAMStoreSys) GetUsersWithMappedPolicies() map[string]string { defer store.runlock() result := make(map[string]string) - for k, v := range cache.iamUserPolicyMap { + cache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool { result[k] = v.Policies - } - for k, v := range cache.iamSTSPolicyMap { + return true + }) + cache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool { result[k] = v.Policies - } + return true + }) return result } @@ -1517,14 +1742,14 @@ func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error break } } - mappedPolicy, ok := cache.iamUserPolicyMap[name] + mappedPolicy, ok := cache.iamUserPolicyMap.Load(name) if !ok { - mappedPolicy, ok = cache.iamSTSPolicyMap[name] + mappedPolicy, ok = cache.iamSTSPolicyMap.Load(name) } if !ok { // Attempt to load parent user mapping for STS accounts store.loadMappedPolicy(context.TODO(), name, stsUser, false, cache.iamSTSPolicyMap) - mappedPolicy, ok = cache.iamSTSPolicyMap[name] + mappedPolicy, ok = cache.iamSTSPolicyMap.Load(name) if !ok { return u, errNoSuchUser } @@ -1545,9 +1770,9 @@ func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error if cred.IsTemp() || cred.IsServiceAccount() { return u, errIAMActionNotAllowed } - + pl, _ := cache.iamUserPolicyMap.Load(name) return madmin.UserInfo{ - PolicyName: cache.iamUserPolicyMap[name].Policies, + PolicyName: pl.Policies, Status: func() madmin.AccountStatus { if cred.IsValid() { return madmin.AccountEnabled @@ -1555,7 +1780,7 @@ func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error return madmin.AccountDisabled }(), MemberOf: cache.iamUserGroupMemberships[name].ToSlice(), - UpdatedAt: cache.iamUserPolicyMap[name].UpdatedAt, + UpdatedAt: pl.UpdatedAt, }, nil } @@ -1568,10 +1793,12 @@ func (store *IAMStoreSys) PolicyMappingNotificationHandler(ctx context.Context, cache := store.lock() defer store.unlock() - var m map[string]MappedPolicy + var m *xsync.MapOf[string, MappedPolicy] switch { case isGroup: m = cache.iamGroupPolicyMap + case userType == stsUser: + m = cache.iamSTSPolicyMap default: m = cache.iamUserPolicyMap } @@ -1579,7 +1806,7 @@ func (store *IAMStoreSys) PolicyMappingNotificationHandler(ctx context.Context, if errors.Is(err, errNoSuchPolicy) { // This means that the policy mapping was deleted, so we update // the cache. - delete(m, userOrGroup) + m.Delete(userOrGroup) cache.updatedAt = time.Now() err = nil @@ -1644,7 +1871,7 @@ func (store *IAMStoreSys) UserNotificationHandler(ctx context.Context, accessKey } // 3. Delete any mapped policy - delete(cache.iamUserPolicyMap, accessKey) + cache.iamUserPolicyMap.Delete(accessKey) return nil } @@ -1730,22 +1957,32 @@ func (store *IAMStoreSys) DeleteUser(ctx context.Context, accessKey string, user delete(cache.iamUsersMap, u.AccessKey) case u.IsTemp(): _ = store.deleteUserIdentity(ctx, u.AccessKey, stsUser) + delete(cache.iamSTSAccountsMap, u.AccessKey) delete(cache.iamUsersMap, u.AccessKey) } + if store.group != nil { + store.group.Forget(u.AccessKey) + } } } } // It is ok to ignore deletion error on the mapped policy store.deleteMappedPolicy(ctx, accessKey, userType, false) - delete(cache.iamUserPolicyMap, accessKey) + cache.iamUserPolicyMap.Delete(accessKey) err := store.deleteUserIdentity(ctx, accessKey, userType) if err == errNoSuchUser { // ignore if user is already deleted. err = nil } + if userType == stsUser { + delete(cache.iamSTSAccountsMap, accessKey) + } delete(cache.iamUsersMap, accessKey) + if store.group != nil { + store.group.Forget(accessKey) + } cache.updatedAt = time.Now() @@ -1778,7 +2015,7 @@ func (store *IAMStoreSys) SetTempUser(ctx context.Context, accessKey string, cre return time.Time{}, err } - cache.iamSTSPolicyMap[cred.ParentUser] = mp + cache.iamSTSPolicyMap.Store(cred.ParentUser, mp) } u := newUserIdentity(cred) @@ -1793,6 +2030,50 @@ func (store *IAMStoreSys) SetTempUser(ctx context.Context, accessKey string, cre return u.UpdatedAt, nil } +// RevokeTokens - revokes all temporary credentials, or those with matching type, +// associated with the parent user. +func (store *IAMStoreSys) RevokeTokens(ctx context.Context, parentUser string, tokenRevokeType string) error { + if parentUser == "" { + return errInvalidArgument + } + + cache := store.lock() + defer store.unlock() + + secret, err := getTokenSigningKey() + if err != nil { + return err + } + + var revoked bool + for _, ui := range cache.iamSTSAccountsMap { + if ui.Credentials.ParentUser != parentUser { + continue + } + if tokenRevokeType != "" { + claims, err := getClaimsFromTokenWithSecret(ui.Credentials.SessionToken, secret) + if err != nil { + continue // skip if token is invalid + } + // skip if token type is given and does not match + if v, _ := claims.Lookup(tokenRevokeTypeClaim); v != tokenRevokeType { + continue + } + } + if err := store.deleteUserIdentity(ctx, ui.Credentials.AccessKey, stsUser); err != nil { + return err + } + delete(cache.iamSTSAccountsMap, ui.Credentials.AccessKey) + revoked = true + } + + if revoked { + cache.updatedAt = time.Now() + } + + return nil +} + // DeleteUsers - given a set of users or access keys, deletes them along with // any derived credentials (STS or service accounts) and any associated policy // mappings. @@ -1815,12 +2096,18 @@ func (store *IAMStoreSys) DeleteUsers(ctx context.Context, users []string) error if usersToDelete.Contains(user) || usersToDelete.Contains(cred.ParentUser) { // Delete this user account and its policy mapping store.deleteMappedPolicy(ctx, user, userType, false) - delete(cache.iamUserPolicyMap, user) + cache.iamUserPolicyMap.Delete(user) // we are only logging errors, not handling them. err := store.deleteUserIdentity(ctx, user, userType) - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) + if userType == stsUser { + delete(cache.iamSTSAccountsMap, user) + } delete(cache.iamUsersMap, user) + if store.group != nil { + store.group.Forget(user) + } deleted = true } @@ -1842,24 +2129,30 @@ type ParentUserInfo struct { // GetAllParentUsers - returns all distinct "parent-users" associated with STS // or service credentials, mapped to all distinct roleARNs associated with the // parent user. The dummy role ARN is associated with parent users from -// policy-claim based OpenID providers. +// policy-claim based OpenID providers. The root credential as a parent +// user is not included in the result. func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo { cache := store.rlock() defer store.runlock() + return store.getParentUsers(cache) +} + +// assumes store is locked by caller. +func (store *IAMStoreSys) getParentUsers(cache *iamCache) map[string]ParentUserInfo { res := map[string]ParentUserInfo{} for _, ui := range cache.iamUsersMap { cred := ui.Credentials // Only consider service account or STS credentials with // non-empty session tokens. - if !(cred.IsServiceAccount() || cred.IsTemp()) || + if (!cred.IsServiceAccount() && !cred.IsTemp()) || cred.SessionToken == "" { continue } var ( err error - claims map[string]interface{} = cred.Claims + claims *jwt.MapClaims ) if cred.IsServiceAccount() { @@ -1876,23 +2169,22 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo { if err != nil { continue } - if cred.ParentUser == "" { + if cred.ParentUser == "" || cred.ParentUser == globalActiveCred.AccessKey { continue } subClaimValue := cred.ParentUser - if v, ok := claims[subClaim]; ok { - subFromToken, ok := v.(string) - if ok { - subClaimValue = subFromToken - } + if v, ok := claims.Lookup(subClaim); ok { + subClaimValue = v + } + if v, ok := claims.Lookup(ldapActualUser); ok { + subClaimValue = v } roleArn := openid.DummyRoleARN.String() - s, ok := claims[roleArnClaim] - val, ok2 := s.(string) - if ok && ok2 { - roleArn = val + s, ok := claims.Lookup(roleArnClaim) + if ok { + roleArn = s } v, ok := res[cred.ParentUser] if ok { @@ -1911,46 +2203,102 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo { return res } -// Assumes store is locked by caller. If users is empty, returns all user mappings. -func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string, - userPredicate func(string) bool, -) []madmin.UserPolicyEntities { - var r []madmin.UserPolicyEntities - usersSet := set.CreateStringSet(users...) - for user, mappedPolicy := range cache.iamUserPolicyMap { +// GetAllSTSUserMappings - Loads all STS user policy mappings from storage and +// returns them. Also gets any STS users that do not have policy mappings but have +// Service Accounts or STS keys (This is useful if the user is part of a group) +func (store *IAMStoreSys) GetAllSTSUserMappings(userPredicate func(string) bool) (map[string]string, error) { + cache := store.rlock() + defer store.runlock() + + stsMap := make(map[string]string) + m := xsync.NewMapOf[string, MappedPolicy]() + if err := store.loadMappedPolicies(context.Background(), stsUser, false, m); err != nil { + return nil, err + } + + m.Range(func(user string, mappedPolicy MappedPolicy) bool { if userPredicate != nil && !userPredicate(user) { - continue + return true } + stsMap[user] = mappedPolicy.Policies + return true + }) - if !usersSet.IsEmpty() && !usersSet.Contains(user) { - continue + for user := range store.getParentUsers(cache) { + if _, ok := stsMap[user]; !ok { + if userPredicate != nil && !userPredicate(user) { + continue + } + stsMap[user] = "" } - - ps := mappedPolicy.toSlice() - sort.Strings(ps) - r = append(r, madmin.UserPolicyEntities{ - User: user, - Policies: ps, - }) } + return stsMap, nil +} + +// Assumes store is locked by caller. If userMap is empty, returns all user mappings. +func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, userMap map[string]set.StringSet, + userPredicate func(string) bool, decodeFunc func(string) string, +) []madmin.UserPolicyEntities { + stsMap := xsync.NewMapOf[string, MappedPolicy]() + resMap := make(map[string]madmin.UserPolicyEntities, len(userMap)) - stsMap := map[string]MappedPolicy{} - for _, user := range users { + for user, groupSet := range userMap { // Attempt to load parent user mapping for STS accounts store.loadMappedPolicy(context.TODO(), user, stsUser, false, stsMap) + decodeUser := user + if decodeFunc != nil { + decodeUser = decodeFunc(user) + } + blankEntities := madmin.UserPolicyEntities{User: decodeUser} + if !groupSet.IsEmpty() { + blankEntities.MemberOfMappings = store.listGroupPolicyMappings(cache, groupSet, nil, decodeFunc) + } + resMap[user] = blankEntities } - for user, mappedPolicy := range stsMap { + var r []madmin.UserPolicyEntities + cache.iamUserPolicyMap.Range(func(user string, mappedPolicy MappedPolicy) bool { if userPredicate != nil && !userPredicate(user) { - continue + return true + } + + entitiesWithMemberOf, ok := resMap[user] + if !ok { + if len(userMap) > 0 { + return true + } + decodeUser := user + if decodeFunc != nil { + decodeUser = decodeFunc(user) + } + entitiesWithMemberOf = madmin.UserPolicyEntities{User: decodeUser} } ps := mappedPolicy.toSlice() sort.Strings(ps) - r = append(r, madmin.UserPolicyEntities{ - User: user, - Policies: ps, - }) + entitiesWithMemberOf.Policies = ps + resMap[user] = entitiesWithMemberOf + return true + }) + + stsMap.Range(func(user string, mappedPolicy MappedPolicy) bool { + if userPredicate != nil && !userPredicate(user) { + return true + } + + entitiesWithMemberOf := resMap[user] + + ps := mappedPolicy.toSlice() + sort.Strings(ps) + entitiesWithMemberOf.Policies = ps + resMap[user] = entitiesWithMemberOf + return true + }) + + for _, v := range resMap { + if v.Policies != nil || v.MemberOfMappings != nil { + r = append(r, v) + } } sort.Slice(r, func(i, j int) bool { @@ -1961,27 +2309,33 @@ func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string } // Assumes store is locked by caller. If groups is empty, returns all group mappings. -func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []string, - groupPredicate func(string) bool, +func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groupsSet set.StringSet, + groupPredicate func(string) bool, decodeFunc func(string) string, ) []madmin.GroupPolicyEntities { var r []madmin.GroupPolicyEntities - groupsSet := set.CreateStringSet(groups...) - for group, mappedPolicy := range cache.iamGroupPolicyMap { + + cache.iamGroupPolicyMap.Range(func(group string, mappedPolicy MappedPolicy) bool { if groupPredicate != nil && !groupPredicate(group) { - continue + return true } if !groupsSet.IsEmpty() && !groupsSet.Contains(group) { - continue + return true + } + + decodeGroup := group + if decodeFunc != nil { + decodeGroup = decodeFunc(group) } ps := mappedPolicy.toSlice() sort.Strings(ps) r = append(r, madmin.GroupPolicyEntities{ - Group: group, + Group: decodeGroup, Policies: ps, }) - } + return true + }) sort.Slice(r, func(i, j int) bool { return r[i].Group < r[j].Group @@ -1991,15 +2345,18 @@ func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []stri } // Assumes store is locked by caller. If policies is empty, returns all policy mappings. -func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, - userPredicate, groupPredicate func(string) bool, +func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, queryPolSet set.StringSet, + userPredicate, groupPredicate func(string) bool, decodeFunc func(string) string, ) []madmin.PolicyEntities { - queryPolSet := set.CreateStringSet(policies...) - policyToUsersMap := make(map[string]set.StringSet) - for user, mappedPolicy := range cache.iamUserPolicyMap { + cache.iamUserPolicyMap.Range(func(user string, mappedPolicy MappedPolicy) bool { if userPredicate != nil && !userPredicate(user) { - continue + return true + } + + decodeUser := user + if decodeFunc != nil { + decodeUser = decodeFunc(user) } commonPolicySet := mappedPolicy.policySet() @@ -2009,13 +2366,14 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, for _, policy := range commonPolicySet.ToSlice() { s, ok := policyToUsersMap[policy] if !ok { - policyToUsersMap[policy] = set.CreateStringSet(user) + policyToUsersMap[policy] = set.CreateStringSet(decodeUser) } else { - s.Add(user) + s.Add(decodeUser) policyToUsersMap[policy] = s } } - } + return true + }) if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok { for item := range listIAMConfigItems(context.Background(), iamOS.objAPI, iamConfigPrefix+SlashSeparator+policyDBSTSUsersListKey) { @@ -2024,6 +2382,11 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, continue } + decodeUser := user + if decodeFunc != nil { + decodeUser = decodeFunc(user) + } + var mappedPolicy MappedPolicy store.loadIAMConfig(context.Background(), &mappedPolicy, getMappedPolicyPath(user, stsUser, false)) @@ -2034,19 +2397,55 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, for _, policy := range commonPolicySet.ToSlice() { s, ok := policyToUsersMap[policy] if !ok { - policyToUsersMap[policy] = set.CreateStringSet(user) + policyToUsersMap[policy] = set.CreateStringSet(decodeUser) } else { - s.Add(user) + s.Add(decodeUser) policyToUsersMap[policy] = s } } } } + if iamOS, ok := store.IAMStorageAPI.(*IAMEtcdStore); ok { + m := xsync.NewMapOf[string, MappedPolicy]() + err := iamOS.loadMappedPolicies(context.Background(), stsUser, false, m) + if err == nil { + m.Range(func(user string, mappedPolicy MappedPolicy) bool { + if userPredicate != nil && !userPredicate(user) { + return true + } + + decodeUser := user + if decodeFunc != nil { + decodeUser = decodeFunc(user) + } + + commonPolicySet := mappedPolicy.policySet() + if !queryPolSet.IsEmpty() { + commonPolicySet = commonPolicySet.Intersection(queryPolSet) + } + for _, policy := range commonPolicySet.ToSlice() { + s, ok := policyToUsersMap[policy] + if !ok { + policyToUsersMap[policy] = set.CreateStringSet(decodeUser) + } else { + s.Add(decodeUser) + policyToUsersMap[policy] = s + } + } + return true + }) + } + } policyToGroupsMap := make(map[string]set.StringSet) - for group, mappedPolicy := range cache.iamGroupPolicyMap { + cache.iamGroupPolicyMap.Range(func(group string, mappedPolicy MappedPolicy) bool { if groupPredicate != nil && !groupPredicate(group) { - continue + return true + } + + decodeGroup := group + if decodeFunc != nil { + decodeGroup = decodeFunc(group) } commonPolicySet := mappedPolicy.policySet() @@ -2056,13 +2455,14 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, for _, policy := range commonPolicySet.ToSlice() { s, ok := policyToGroupsMap[policy] if !ok { - policyToGroupsMap[policy] = set.CreateStringSet(group) + policyToGroupsMap[policy] = set.CreateStringSet(decodeGroup) } else { - s.Add(group) + s.Add(decodeGroup) policyToGroupsMap[policy] = s } } - } + return true + }) m := make(map[string]madmin.PolicyEntities, len(policyToGroupsMap)) for policy, groups := range policyToGroupsMap { @@ -2097,24 +2497,24 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string, } // ListPolicyMappings - return users/groups mapped to policies. -func (store *IAMStoreSys) ListPolicyMappings(q madmin.PolicyEntitiesQuery, - userPredicate, groupPredicate func(string) bool, +func (store *IAMStoreSys) ListPolicyMappings(q cleanEntitiesQuery, + userPredicate, groupPredicate func(string) bool, decodeFunc func(string) string, ) madmin.PolicyEntitiesResult { cache := store.rlock() defer store.runlock() var result madmin.PolicyEntitiesResult - isAllPoliciesQuery := len(q.Users) == 0 && len(q.Groups) == 0 && len(q.Policy) == 0 + isAllPoliciesQuery := len(q.Users) == 0 && len(q.Groups) == 0 && len(q.Policies) == 0 if len(q.Users) > 0 { - result.UserMappings = store.listUserPolicyMappings(cache, q.Users, userPredicate) + result.UserMappings = store.listUserPolicyMappings(cache, q.Users, userPredicate, decodeFunc) } if len(q.Groups) > 0 { - result.GroupMappings = store.listGroupPolicyMappings(cache, q.Groups, groupPredicate) + result.GroupMappings = store.listGroupPolicyMappings(cache, q.Groups, groupPredicate, decodeFunc) } - if len(q.Policy) > 0 || isAllPoliciesQuery { - result.PolicyMappings = store.listPolicyMappings(cache, q.Policy, userPredicate, groupPredicate) + if len(q.Policies) > 0 || isAllPoliciesQuery { + result.PolicyMappings = store.listPolicyMappings(cache, q.Policies, userPredicate, groupPredicate, decodeFunc) } return result } @@ -2251,13 +2651,15 @@ func (store *IAMStoreSys) UpdateServiceAccount(ctx context.Context, accessKey st // Extracted session policy name string can be removed as its not useful // at this point. - delete(m, sessionPolicyNameExtracted) + m.Delete(sessionPolicyNameExtracted) + + nosp := opts.sessionPolicy == nil || opts.sessionPolicy.Version == "" && len(opts.sessionPolicy.Statements) == 0 // sessionPolicy is nil and there is embedded policy attached we remove // embedded policy at that point. - if _, ok := m[policy.SessionPolicyName]; ok && opts.sessionPolicy == nil { - delete(m, policy.SessionPolicyName) - m[iamPolicyClaimNameSA()] = inheritedPolicyType + if _, ok := m.Lookup(policy.SessionPolicyName); ok && nosp { + m.Delete(policy.SessionPolicyName) + m.Set(iamPolicyClaimNameSA(), inheritedPolicyType) } if opts.sessionPolicy != nil { // session policies is being updated @@ -2265,21 +2667,23 @@ func (store *IAMStoreSys) UpdateServiceAccount(ctx context.Context, accessKey st return updatedAt, err } - policyBuf, err := json.Marshal(opts.sessionPolicy) - if err != nil { - return updatedAt, err - } + if opts.sessionPolicy.Version != "" && len(opts.sessionPolicy.Statements) > 0 { + policyBuf, err := json.Marshal(opts.sessionPolicy) + if err != nil { + return updatedAt, err + } - if len(policyBuf) > 2048 { - return updatedAt, errSessionPolicyTooLarge - } + if len(policyBuf) > maxSVCSessionPolicySize { + return updatedAt, errSessionPolicyTooLarge + } - // Overwrite session policy claims. - m[policy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf) - m[iamPolicyClaimNameSA()] = embeddedPolicyType + // Overwrite session policy claims. + m.Set(policy.SessionPolicyName, base64.StdEncoding.EncodeToString(policyBuf)) + m.Set(iamPolicyClaimNameSA(), embeddedPolicyType) + } } - cr.SessionToken, err = auth.JWTSignWithAccessKey(accessKey, m, cr.SecretKey) + cr.SessionToken, err = auth.JWTSignWithAccessKey(accessKey, m.Map(), cr.SecretKey) if err != nil { return updatedAt, err } @@ -2371,6 +2775,31 @@ func (store *IAMStoreSys) ListSTSAccounts(ctx context.Context, accessKey string) return stsAccounts, nil } +// ListAccessKeys - lists all access keys (sts/service accounts) +func (store *IAMStoreSys) ListAccessKeys(ctx context.Context) ([]auth.Credentials, error) { + cache := store.rlock() + defer store.runlock() + + accessKeys := store.getSTSAndServiceAccounts(cache) + for i, accessKey := range accessKeys { + accessKeys[i].SecretKey = "" + if accessKey.IsTemp() { + secret, err := getTokenSigningKey() + if err != nil { + return nil, err + } + claims, err := getClaimsFromTokenWithSecret(accessKey.SessionToken, secret) + if err != nil { + continue // ignore invalid session tokens + } + accessKeys[i].Claims = claims.MapClaims + } + accessKeys[i].SessionToken = "" + } + + return accessKeys, nil +} + // AddUser - adds/updates long term user account to storage. func (store *IAMStoreSys) AddUser(ctx context.Context, accessKey string, ureq madmin.AddOrUpdateUserReq) (updatedAt time.Time, err error) { cache := store.lock() @@ -2433,6 +2862,10 @@ func (store *IAMStoreSys) GetSTSAndServiceAccounts() []auth.Credentials { cache := store.rlock() defer store.runlock() + return store.getSTSAndServiceAccounts(cache) +} + +func (store *IAMStoreSys) getSTSAndServiceAccounts(cache *iamCache) []auth.Credentials { var res []auth.Credentials for _, u := range cache.iamUsersMap { cred := u.Credentials @@ -2471,83 +2904,155 @@ func (store *IAMStoreSys) UpdateUserIdentity(ctx context.Context, cred auth.Cred } // LoadUser - attempts to load user info from storage and updates cache. -func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) { - cache := store.lock() - defer store.unlock() - - cache.updatedAt = time.Now() +func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error { + groupLoad := env.Get("_MINIO_IAM_GROUP_REFRESH", config.EnableOff) == config.EnableOn - _, found := cache.iamUsersMap[accessKey] + newCachePopulate := func() (val any, err error) { + newCache := newIamCache() - // Check for regular user access key - if !found { - store.loadUser(ctx, accessKey, regUser, cache.iamUsersMap) - if _, found = cache.iamUsersMap[accessKey]; found { - // load mapped policies - store.loadMappedPolicyWithRetry(ctx, accessKey, regUser, false, cache.iamUserPolicyMap, 3) - } - } + // Check for service account first + store.loadUser(ctx, accessKey, svcUser, newCache.iamUsersMap) - // Check for service account - if !found { - store.loadUser(ctx, accessKey, svcUser, cache.iamUsersMap) - var svc UserIdentity - svc, found = cache.iamUsersMap[accessKey] + svc, found := newCache.iamUsersMap[accessKey] if found { // Load parent user and mapped policies. if store.getUsersSysType() == MinIOUsersSysType { - store.loadUser(ctx, svc.Credentials.ParentUser, regUser, cache.iamUsersMap) - store.loadMappedPolicyWithRetry(ctx, svc.Credentials.ParentUser, regUser, false, cache.iamUserPolicyMap, 3) + err = store.loadUser(ctx, svc.Credentials.ParentUser, regUser, newCache.iamUsersMap) + // NOTE: we are not worried about loading errors from policies. + store.loadMappedPolicyWithRetry(ctx, svc.Credentials.ParentUser, regUser, false, newCache.iamUserPolicyMap, 3) } else { - // In case of LDAP the parent user's policy mapping needs to be - // loaded into sts map - store.loadMappedPolicyWithRetry(ctx, svc.Credentials.ParentUser, stsUser, false, cache.iamSTSPolicyMap, 3) + // In case of LDAP the parent user's policy mapping needs to be loaded into sts map + // NOTE: we are not worried about loading errors from policies. + store.loadMappedPolicyWithRetry(ctx, svc.Credentials.ParentUser, stsUser, false, newCache.iamSTSPolicyMap, 3) } } - } - // Check for STS account - stsAccountFound := false - var stsUserCred UserIdentity - if !found { - store.loadUser(ctx, accessKey, stsUser, cache.iamSTSAccountsMap) - if stsUserCred, found = cache.iamSTSAccountsMap[accessKey]; found { - // Load mapped policy - store.loadMappedPolicyWithRetry(ctx, stsUserCred.Credentials.ParentUser, stsUser, false, cache.iamSTSPolicyMap, 3) - stsAccountFound = true + if !found { + err = store.loadUser(ctx, accessKey, regUser, newCache.iamUsersMap) + if _, found = newCache.iamUsersMap[accessKey]; found { + // NOTE: we are not worried about loading errors from policies. + store.loadMappedPolicyWithRetry(ctx, accessKey, regUser, false, newCache.iamUserPolicyMap, 3) + } } - } - // Load any associated policy definitions - if !stsAccountFound { - for _, policy := range cache.iamUserPolicyMap[accessKey].toSlice() { - if _, found = cache.iamPolicyDocsMap[policy]; !found { - store.loadPolicyDocWithRetry(ctx, policy, cache.iamPolicyDocsMap, 3) + // Check for STS account + var stsUserCred UserIdentity + if !found { + err = store.loadUser(ctx, accessKey, stsUser, newCache.iamSTSAccountsMap) + if stsUserCred, found = newCache.iamSTSAccountsMap[accessKey]; found { + // Load mapped policy + // NOTE: we are not worried about loading errors from policies. + store.loadMappedPolicyWithRetry(ctx, stsUserCred.Credentials.ParentUser, stsUser, false, newCache.iamSTSPolicyMap, 3) } } - } else { - for _, policy := range cache.iamSTSPolicyMap[stsUserCred.Credentials.AccessKey].toSlice() { - if _, found = cache.iamPolicyDocsMap[policy]; !found { - store.loadPolicyDocWithRetry(ctx, policy, cache.iamPolicyDocsMap, 3) + + // Load any associated policy definitions + pols, _ := newCache.iamUserPolicyMap.Load(accessKey) + for _, policy := range pols.toSlice() { + if _, found = newCache.iamPolicyDocsMap[policy]; !found { + // NOTE: we are not worried about loading errors from policies. + store.loadPolicyDocWithRetry(ctx, policy, newCache.iamPolicyDocsMap, 3) + } + } + + pols, _ = newCache.iamSTSPolicyMap.Load(stsUserCred.Credentials.AccessKey) + for _, policy := range pols.toSlice() { + if _, found = newCache.iamPolicyDocsMap[policy]; !found { + // NOTE: we are not worried about loading errors from policies. + store.loadPolicyDocWithRetry(ctx, policy, newCache.iamPolicyDocsMap, 3) } } + + if groupLoad { + // NOTE: we are not worried about loading errors from groups. + store.updateGroups(ctx, newCache) + newCache.buildUserGroupMemberships() + } + + return newCache, err + } + + var ( + val any + err error + ) + if store.group != nil { + val, err, _ = store.group.Do(accessKey, newCachePopulate) + } else { + val, err = newCachePopulate() } -} -func extractJWTClaims(u UserIdentity) (*jwt.MapClaims, error) { - jwtClaims, err := auth.ExtractClaims(u.Credentials.SessionToken, u.Credentials.SecretKey) + // Return error right away if any. if err != nil { + if errors.Is(err, errNoSuchUser) || errors.Is(err, errConfigNotFound) { + return nil + } + return err + } + + newCache, ok := val.(*iamCache) + if !ok { + return nil + } + + cache := store.lock() + defer store.unlock() + + // We need to merge the new cache with the existing cache because the + // periodic IAM reload is partial. The periodic load here is to account. + newCache.iamGroupPolicyMap.Range(func(k string, v MappedPolicy) bool { + cache.iamGroupPolicyMap.Store(k, v) + return true + }) + + maps.Copy(cache.iamGroupsMap, newCache.iamGroupsMap) + + maps.Copy(cache.iamPolicyDocsMap, newCache.iamPolicyDocsMap) + + maps.Copy(cache.iamUserGroupMemberships, newCache.iamUserGroupMemberships) + + newCache.iamUserPolicyMap.Range(func(k string, v MappedPolicy) bool { + cache.iamUserPolicyMap.Store(k, v) + return true + }) + + maps.Copy(cache.iamUsersMap, newCache.iamUsersMap) + + maps.Copy(cache.iamSTSAccountsMap, newCache.iamSTSAccountsMap) + + newCache.iamSTSPolicyMap.Range(func(k string, v MappedPolicy) bool { + cache.iamSTSPolicyMap.Store(k, v) + return true + }) + + cache.updatedAt = time.Now() + + return nil +} + +func extractJWTClaims(u UserIdentity) (jwtClaims *jwt.MapClaims, err error) { + keys := make([]string, 0, 3) + + // Append credentials secret key itself + keys = append(keys, u.Credentials.SecretKey) + + // Use site-replication credentials if found + if globalSiteReplicationSys.isEnabled() { secretKey, err := getTokenSigningKey() if err != nil { return nil, err } - // Session tokens for STS creds will be generated with root secret or site-replicator-0 secret - jwtClaims, err = auth.ExtractClaims(u.Credentials.SessionToken, secretKey) - if err != nil { - return nil, err + keys = append(keys, secretKey) + } + + // Iterate over all keys and return with the first successful claim extraction + for _, key := range keys { + jwtClaims, err = getClaimsFromTokenWithSecret(u.Credentials.SessionToken, key) + if err == nil { + break } } - return jwtClaims, nil + return jwtClaims, err } func validateSvcExpirationInUTC(expirationInUTC time.Time) error { diff --git a/cmd/iam.go b/cmd/iam.go index 23343ebde06dd..18cdc1483963b 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "maps" "math/rand" "path" "sort" @@ -45,11 +46,13 @@ import ( "github.com/minio/minio/internal/config/policy/opa" polplugin "github.com/minio/minio/internal/config/policy/plugin" xhttp "github.com/minio/minio/internal/http" - xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/jwt" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/ldap" + "github.com/minio/pkg/v3/policy" etcd "go.etcd.io/etcd/client/v3" + "golang.org/x/sync/singleflight" ) // UsersSysType - defines the type of users and groups system that is @@ -76,6 +79,10 @@ const ( inheritedPolicyType = "inherited-policy" ) +const ( + maxSVCSessionPolicySize = 4096 +) + // IAMSys - config system. type IAMSys struct { // Need to keep them here to keep alignment - ref: https://golang.org/pkg/sync/atomic/#pkg-note-BUG @@ -171,9 +178,21 @@ func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) { } if etcdClient == nil { - sys.store = &IAMStoreSys{newIAMObjectStore(objAPI, sys.usersSysType)} + var ( + group *singleflight.Group + policy *singleflight.Group + ) + if env.Get("_MINIO_IAM_SINGLE_FLIGHT", config.EnableOn) == config.EnableOn { + group = &singleflight.Group{} + policy = &singleflight.Group{} + } + sys.store = &IAMStoreSys{ + IAMStorageAPI: newIAMObjectStore(objAPI, sys.usersSysType), + group: group, + policy: policy, + } } else { - sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)} + sys.store = &IAMStoreSys{IAMStorageAPI: newIAMEtcdStore(etcdClient, sys.usersSysType)} } } @@ -204,18 +223,21 @@ func (sys *IAMSys) Load(ctx context.Context, firstTime bool) error { if !globalSiteReplicatorCred.IsValid() { sa, _, err := sys.getServiceAccount(ctx, siteReplicatorSvcAcc) if err == nil { - globalSiteReplicatorCred.Set(sa.Credentials) + globalSiteReplicatorCred.Set(sa.Credentials.SecretKey) } } if firstTime { bootstrapTraceMsg(fmt.Sprintf("globalIAMSys.Load(): (duration: %s)", loadDuration)) + if globalIsDistErasure { + logger.Info("IAM load(startup) finished. (duration: %s)", loadDuration) + } } select { case <-sys.configLoaded: default: - xioutil.SafeClose(sys.configLoaded) + close(sys.configLoaded) } return nil } @@ -227,62 +249,134 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc s := globalServerConfig globalServerConfigMu.RUnlock() - openidConfig, err := openid.LookupConfig(s, - NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err)) - } + sys.Lock() + sys.iamRefreshInterval = iamRefreshInterval + sys.Unlock() - // Initialize if LDAP is enabled - ldapConfig, err := xldap.Lookup(s, globalRootCAs) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err)) - } + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + var ( + openidInit bool + ldapInit bool + authNInit bool + authZInit bool + ) stsTLSConfig, err := xtls.Lookup(s[config.IdentityTLSSubSys][config.Default]) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize X.509/TLS STS API: %w", err), logger.WarningKind) + } else { + if stsTLSConfig.InsecureSkipVerify { + iamLogIf(ctx, fmt.Errorf("Enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify), logger.WarningKind) + } + sys.Lock() + sys.STSTLSConfig = stsTLSConfig + sys.Unlock() } - if stsTLSConfig.InsecureSkipVerify { - logger.LogIf(ctx, fmt.Errorf("CRITICAL: enabling %s is not recommended in a production environment", xtls.EnvIdentityTLSSkipVerify)) - } + for { + if !openidInit { + openidConfig, err := openid.LookupConfig(s, + xhttp.WithUserAgent(NewHTTPTransport(), func() string { + return getUserAgent(getMinioMode()) + }), xhttp.DrainBody, globalSite.Region()) + if err != nil { + iamLogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err), logger.WarningKind) + } else { + openidInit = true + sys.Lock() + sys.OpenIDConfig = openidConfig + sys.Unlock() + } + } - authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], - NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err)) - } + if !ldapInit { + // Initialize if LDAP is enabled + ldapConfig, err := xldap.Lookup(s, globalRootCAs) + if err != nil { + iamLogIf(ctx, fmt.Errorf("Unable to load LDAP configuration (LDAP configuration will be disabled!): %w", err), logger.WarningKind) + } else { + ldapInit = true + sys.Lock() + sys.LDAPConfig = ldapConfig + sys.Unlock() + } + } - setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg)) + if !authNInit { + authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()) + if err != nil { + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err), logger.WarningKind) + } else { + authNInit = true + setGlobalAuthNPlugin(idplugin.New(GlobalContext, authNPluginCfg)) + } + } - authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err)) - } + if !authZInit { + authZPluginCfg, err := polplugin.LookupConfig(s, GetDefaultConnSettings(), xhttp.DrainBody) + if err != nil { + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err), logger.WarningKind) + } else { + authZInit = true + } + if authZPluginCfg.URL == nil { + opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], + NewHTTPTransport(), xhttp.DrainBody) + if err != nil { + iamLogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) + } else { + authZPluginCfg.URL = opaCfg.URL + authZPluginCfg.AuthToken = opaCfg.AuthToken + authZPluginCfg.Transport = opaCfg.Transport + authZPluginCfg.CloseRespFn = opaCfg.CloseRespFn + authZInit = true + } + } + if authZInit { + setGlobalAuthZPlugin(polplugin.New(authZPluginCfg)) + } + } - if authZPluginCfg.URL == nil { - opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], - NewHTTPTransport(), xhttp.DrainBody) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) - } else { - authZPluginCfg.URL = opaCfg.URL - authZPluginCfg.AuthToken = opaCfg.AuthToken - authZPluginCfg.Transport = opaCfg.Transport - authZPluginCfg.CloseRespFn = opaCfg.CloseRespFn + if !openidInit || !ldapInit || !authNInit || !authZInit { + retryInterval := time.Duration(r.Float64() * float64(3*time.Second)) + if !openidInit { + logger.Info("Waiting for OpenID to be initialized.. (retrying in %s)", retryInterval) + } + if !ldapInit { + logger.Info("Waiting for LDAP to be initialized.. (retrying in %s)", retryInterval) + } + if !authNInit { + logger.Info("Waiting for AuthN to be initialized.. (retrying in %s)", retryInterval) + } + if !authZInit { + logger.Info("Waiting for AuthZ to be initialized.. (retrying in %s)", retryInterval) + } + time.Sleep(retryInterval) + continue } - } - setGlobalAuthZPlugin(polplugin.New(authZPluginCfg)) + break + } - sys.Lock() - sys.LDAPConfig = ldapConfig - sys.OpenIDConfig = openidConfig - sys.STSTLSConfig = stsTLSConfig - sys.iamRefreshInterval = iamRefreshInterval // Initialize IAM store + sys.Lock() + sys.initStore(objAPI, etcdClient) + + // Initialize RoleARNs + sys.rolesMap = make(map[arn.ARN]string) + + // From OpenID + maps.Copy(sys.rolesMap, sys.OpenIDConfig.GetRoleInfo()) + + // From AuthN plugin if enabled. + if authn := newGlobalAuthNPluginFn(); authn != nil { + maps.Copy(sys.rolesMap, authn.GetRoleInfo()) + } + + sys.printIAMRoles() sys.Unlock() retryCtx, cancel := context.WithCancel(ctx) @@ -290,62 +384,51 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc // Indicate to our routine to exit cleanly upon return. defer cancel() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - // Migrate storage format if needed. for { // Migrate IAM configuration, if necessary. if err := saveIAMFormat(retryCtx, sys.store); err != nil { if configRetriableErrors(err) { - logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err) - time.Sleep(time.Duration(r.Float64() * float64(time.Second))) + retryInterval := time.Duration(r.Float64() * float64(time.Second)) + logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v) (retrying in %s)", err, retryInterval) + time.Sleep(retryInterval) continue } - logger.LogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err)) + iamLogIf(ctx, fmt.Errorf("IAM sub-system is partially initialized, unable to write the IAM format: %w", err), logger.WarningKind) return } break } + cache := sys.store.lock() + setDefaultCannedPolicies(cache.iamPolicyDocsMap) + sys.store.unlock() + // Load IAM data from storage. for { if err := sys.Load(retryCtx, true); err != nil { if configRetriableErrors(err) { - logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v)", err) - time.Sleep(time.Duration(r.Float64() * float64(time.Second))) + retryInterval := time.Duration(r.Float64() * float64(time.Second)) + logger.Info("Waiting for all MinIO IAM sub-system to be initialized.. possible cause (%v) (retrying in %s)", err, retryInterval) + time.Sleep(retryInterval) continue } if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err)) + iamLogIf(ctx, fmt.Errorf("Unable to initialize IAM sub-system, some users may not be available: %w", err), logger.WarningKind) } } break } refreshInterval := sys.iamRefreshInterval - go sys.periodicRoutines(ctx, refreshInterval) - // Load RoleARNs - sys.rolesMap = make(map[arn.ARN]string) - - // From OpenID - if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil { - sys.validateAndAddRolePolicyMappings(ctx, riMap) - } - - // From AuthN plugin if enabled. - if authn := newGlobalAuthNPluginFn(); authn != nil { - riMap := authn.GetRoleInfo() - sys.validateAndAddRolePolicyMappings(ctx, riMap) - } - - sys.printIAMRoles() - bootstrapTraceMsg("finishing IAM loading") } +const maxDurationSecondsForLog = 5 + func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Duration) { // Watch for IAM config changes for iamStorageWatcher. watcher, isWatcher := sys.store.IAMStorageAPI.(iamStorageWatcher) @@ -355,52 +438,51 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat for event := range ch { if err := sys.loadWatchedEvent(ctx, event); err != nil { // we simply log errors - logger.LogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err)) + iamLogIf(ctx, fmt.Errorf("Failure in loading watch event: %v", err), logger.WarningKind) } } }() } r := rand.New(rand.NewSource(time.Now().UnixNano())) - // Add a random interval of up to 20% of the base interval. - randInterval := func() time.Duration { - return time.Duration(r.Float64() * float64(baseInterval) * 0.2) + + // Calculate the waitInterval between periodic refreshes so that each server + // independently picks a (uniformly distributed) random time in an interval + // of size = baseInterval. + // + // For example: + // + // - if baseInterval=10s, then 5s <= waitInterval() < 15s + // + // - if baseInterval=10m, then 5m <= waitInterval() < 15m + waitInterval := func() time.Duration { + // Calculate a random value such that 0 <= value < baseInterval + randAmt := time.Duration(r.Float64() * float64(baseInterval)) + return baseInterval/2 + randAmt } - var maxDurationSecondsForLog float64 = 5 - timer := time.NewTimer(baseInterval + randInterval()) + timer := time.NewTimer(waitInterval()) defer timer.Stop() + lastPurgeHour := -1 for { select { case <-timer.C: // Load all IAM items (except STS creds) periodically. refreshStart := time.Now() if err := sys.Load(ctx, false); err != nil { - logger.LogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err)) + iamLogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (duration: %s): %v", time.Since(refreshStart), err), logger.WarningKind) } else { took := time.Since(refreshStart).Seconds() if took > maxDurationSecondsForLog { // Log if we took a lot of time to load. - logger.Info("IAM refresh took %.2fs", took) + logger.Info("IAM refresh took (duration: %.2fs)", took) } } - // The following actions are performed about once in 4 times that - // IAM is refreshed: - if r.Intn(4) == 0 { - // Purge expired STS credentials. - purgeStart := time.Now() - if err := sys.store.PurgeExpiredSTS(ctx); err != nil { - logger.LogIf(ctx, fmt.Errorf("Failure in periodic STS purge for IAM (took %.2fs): %v", time.Since(purgeStart).Seconds(), err)) - } else { - took := time.Since(purgeStart).Seconds() - if took > maxDurationSecondsForLog { - // Log if we took a lot of time to load. - logger.Info("IAM expired STS purge took %.2fs", took) - } - } - + // Run purge routines once in each hour. + if refreshStart.Hour() != lastPurgeHour { + lastPurgeHour = refreshStart.Hour() // Poll and remove accounts for those users who were removed // from LDAP/OpenID. if sys.LDAPConfig.Enabled() { @@ -412,40 +494,13 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat } } - timer.Reset(baseInterval + randInterval()) + timer.Reset(waitInterval()) case <-ctx.Done(): return } } } -func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[arn.ARN]string) { - // Validate that policies associated with roles are defined. If - // authZ plugin is set, role policies are just claims sent to - // the plugin and they need not exist. - // - // If some mapped policies do not exist, we print some error - // messages but continue any way - they can be fixed in the - // running server by creating the policies after start up. - for arn, rolePolicies := range m { - specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet() - validPolicies, _ := sys.store.FilterPolicies(rolePolicies, "") - knownPoliciesSet := newMappedPolicy(validPolicies).policySet() - unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet) - if len(unknownPoliciesSet) > 0 { - authz := newGlobalAuthZPluginFn() - if authz == nil { - // Print a warning that some policies mapped to a role are not defined. - errMsg := fmt.Errorf( - "The policies \"%s\" mapped to role ARN %s are not defined - this role may not work as expected.", - unknownPoliciesSet.ToSlice(), arn.String()) - logger.LogIf(ctx, errMsg) - } - } - sys.rolesMap[arn] = rolePolicies - } -} - // Prints IAM role ARNs. func (sys *IAMSys) printIAMRoles() { if len(sys.rolesMap) == 0 { @@ -559,10 +614,10 @@ func (sys *IAMSys) DeletePolicy(ctx context.Context, policyName string, notifyPe } // Notify all other MinIO peers to delete policy - for _, nerr := range globalNotificationSys.DeletePolicy(policyName) { + for _, nerr := range globalNotificationSys.DeletePolicy(ctx, policyName) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } @@ -624,16 +679,26 @@ func (sys *IAMSys) SetPolicy(ctx context.Context, policyName string, p policy.Po if !sys.HasWatcher() { // Notify all other MinIO peers to reload policy - for _, nerr := range globalNotificationSys.LoadPolicy(policyName) { + for _, nerr := range globalNotificationSys.LoadPolicy(ctx, policyName) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } return updatedAt, nil } +// RevokeTokens - revokes all STS tokens, or those of specified type, for a user +// If `tokenRevokeType` is empty, all tokens are revoked. +func (sys *IAMSys) RevokeTokens(ctx context.Context, accessKey, tokenRevokeType string) error { + if !sys.Initialized() { + return errServerNotInitialized + } + + return sys.store.RevokeTokens(ctx, accessKey, tokenRevokeType) +} + // DeleteUser - delete user (only for long-term users not STS users). func (sys *IAMSys) DeleteUser(ctx context.Context, accessKey string, notifyPeers bool) error { if !sys.Initialized() { @@ -646,10 +711,10 @@ func (sys *IAMSys) DeleteUser(ctx context.Context, accessKey string, notifyPeers // Notify all other MinIO peers to delete user. if notifyPeers && !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.DeleteUser(accessKey) { + for _, nerr := range globalNotificationSys.DeleteUser(ctx, accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -665,17 +730,17 @@ func (sys *IAMSys) CurrentPolicies(policyName string) string { return "" } - policies, _ := sys.store.FilterPolicies(policyName, "") + policies, _ := sys.store.MergePolicies(policyName) return policies } func (sys *IAMSys) notifyForUser(ctx context.Context, accessKey string, isTemp bool) { // Notify all other MinIO peers to reload user. if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadUser(accessKey, isTemp) { + for _, nerr := range globalNotificationSys.LoadUser(ctx, accessKey, isTemp) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -779,11 +844,15 @@ func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInf select { case <-sys.configLoaded: - ldapUsers := make(map[string]madmin.UserInfo) - for user, policy := range sys.store.GetUsersWithMappedPolicies() { + stsMap, err := sys.store.GetAllSTSUserMappings(sys.LDAPConfig.IsLDAPUserDN) + if err != nil { + return nil, err + } + ldapUsers := make(map[string]madmin.UserInfo, len(stsMap)) + for user, policy := range stsMap { ldapUsers[user] = madmin.UserInfo{ PolicyName: policy, - Status: madmin.AccountEnabled, + Status: statusEnabled, } } return ldapUsers, nil @@ -792,6 +861,57 @@ func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInf } } +type cleanEntitiesQuery struct { + Users map[string]set.StringSet + Groups set.StringSet + Policies set.StringSet +} + +// createCleanEntitiesQuery - maps users to their groups and normalizes user or group DNs if ldap. +func (sys *IAMSys) createCleanEntitiesQuery(q madmin.PolicyEntitiesQuery, ldap bool) cleanEntitiesQuery { + cleanQ := cleanEntitiesQuery{ + Users: make(map[string]set.StringSet), + Groups: set.CreateStringSet(q.Groups...), + Policies: set.CreateStringSet(q.Policy...), + } + + if ldap { + // Validate and normalize users, then fetch and normalize their groups + // Also include unvalidated users for backward compatibility. + for _, user := range q.Users { + lookupRes, actualGroups, _ := sys.LDAPConfig.GetValidatedDNWithGroups(user) + if lookupRes != nil { + groupSet := set.CreateStringSet(actualGroups...) + + // duplicates can be overwritten, fetched groups should be identical. + cleanQ.Users[lookupRes.NormDN] = groupSet + } + // Search for non-normalized DN as well for backward compatibility. + if _, ok := cleanQ.Users[user]; !ok { + cleanQ.Users[user] = nil + } + } + + // Validate and normalize groups. + for _, group := range q.Groups { + lookupRes, underDN, _ := sys.LDAPConfig.GetValidatedGroupDN(nil, group) + if lookupRes != nil && underDN { + cleanQ.Groups.Add(lookupRes.NormDN) + } + } + } else { + for _, user := range q.Users { + info, err := sys.store.GetUserInfo(user) + var groupSet set.StringSet + if err == nil { + groupSet = set.CreateStringSet(info.MemberOf...) + } + cleanQ.Users[user] = groupSet + } + } + return cleanQ +} + // QueryLDAPPolicyEntities - queries policy associations for LDAP users/groups/policies. func (sys *IAMSys) QueryLDAPPolicyEntities(ctx context.Context, q madmin.PolicyEntitiesQuery) (*madmin.PolicyEntitiesResult, error) { if !sys.Initialized() { @@ -804,7 +924,8 @@ func (sys *IAMSys) QueryLDAPPolicyEntities(ctx context.Context, q madmin.PolicyE select { case <-sys.configLoaded: - pe := sys.store.ListPolicyMappings(q, sys.LDAPConfig.IsLDAPUserDN, sys.LDAPConfig.IsLDAPGroupDN) + cleanQuery := sys.createCleanEntitiesQuery(q, true) + pe := sys.store.ListPolicyMappings(cleanQuery, sys.LDAPConfig.IsLDAPUserDN, sys.LDAPConfig.IsLDAPGroupDN, sys.LDAPConfig.DecodeDN) pe.Timestamp = UTCNow() return &pe, nil case <-ctx.Done(): @@ -878,6 +999,7 @@ func (sys *IAMSys) QueryPolicyEntities(ctx context.Context, q madmin.PolicyEntit select { case <-sys.configLoaded: + cleanQuery := sys.createCleanEntitiesQuery(q, false) var userPredicate, groupPredicate func(string) bool if sys.LDAPConfig.Enabled() { userPredicate = func(s string) bool { @@ -887,7 +1009,7 @@ func (sys *IAMSys) QueryPolicyEntities(ctx context.Context, q madmin.PolicyEntit return !sys.LDAPConfig.IsLDAPGroupDN(s) } } - pe := sys.store.ListPolicyMappings(q, userPredicate, groupPredicate) + pe := sys.store.ListPolicyMappings(cleanQuery, userPredicate, groupPredicate, nil) pe.Timestamp = UTCNow() return &pe, nil case <-ctx.Done(): @@ -907,7 +1029,7 @@ func (sys *IAMSys) SetUserStatus(ctx context.Context, accessKey string, status m updatedAt, err = sys.store.SetUserStatus(ctx, accessKey, status) if err != nil { - return + return updatedAt, err } sys.notifyForUser(ctx, accessKey, false) @@ -917,10 +1039,10 @@ func (sys *IAMSys) SetUserStatus(ctx context.Context, accessKey string, status m func (sys *IAMSys) notifyForServiceAccount(ctx context.Context, accessKey string) { // Notify all other Minio peers to reload the service account if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadServiceAccount(accessKey) { + for _, nerr := range globalNotificationSys.LoadServiceAccount(ctx, accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -934,7 +1056,7 @@ type newServiceAccountOpts struct { expiration *time.Time allowSiteReplicatorAccount bool // allow creating internal service account for site-replication. - claims map[string]interface{} + claims map[string]any } // NewServiceAccount - create a new service account @@ -964,7 +1086,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro if err != nil { return auth.Credentials{}, time.Time{}, err } - if len(policyBuf) > 2048 { + if len(policyBuf) > maxSVCSessionPolicySize { return auth.Credentials{}, time.Time{}, errSessionPolicyTooLarge } } @@ -977,7 +1099,7 @@ func (sys *IAMSys) NewServiceAccount(ctx context.Context, parentUser string, gro if siteReplicatorSvcAcc == opts.accessKey && !opts.allowSiteReplicatorAccount { return auth.Credentials{}, time.Time{}, errIAMActionNotAllowed } - m := make(map[string]interface{}) + m := make(map[string]any) m[parentClaim] = parentUser if len(policyBuf) > 0 { @@ -1097,6 +1219,20 @@ func (sys *IAMSys) ListSTSAccounts(ctx context.Context, accessKey string) ([]aut } } +// ListAllAccessKeys - lists all access keys (sts/service accounts) +func (sys *IAMSys) ListAllAccessKeys(ctx context.Context) ([]auth.Credentials, error) { + if !sys.Initialized() { + return nil, errServerNotInitialized + } + + select { + case <-sys.configLoaded: + return sys.store.ListAccessKeys(ctx) + case <-ctx.Done(): + return nil, ctx.Err() + } +} + // GetServiceAccount - wrapper method to get information about a service account func (sys *IAMSys) GetServiceAccount(ctx context.Context, accessKey string) (auth.Credentials, *policy.Policy, error) { sa, embeddedPolicy, err := sys.getServiceAccount(ctx, accessKey) @@ -1141,9 +1277,18 @@ func (sys *IAMSys) getServiceAccount(ctx context.Context, accessKey string) (Use // GetTemporaryAccount - wrapper method to get information about a temporary account func (sys *IAMSys) GetTemporaryAccount(ctx context.Context, accessKey string) (auth.Credentials, *policy.Policy, error) { + if !sys.Initialized() { + return auth.Credentials{}, nil, errServerNotInitialized + } tmpAcc, embeddedPolicy, err := sys.getTempAccount(ctx, accessKey) if err != nil { - return auth.Credentials{}, nil, err + if err == errNoSuchTempAccount { + sys.store.LoadUser(ctx, accessKey) + tmpAcc, embeddedPolicy, err = sys.getTempAccount(ctx, accessKey) + } + if err != nil { + return auth.Credentials{}, nil, err + } } // Hide secret & session keys tmpAcc.Credentials.SecretKey = "" @@ -1200,15 +1345,11 @@ func (sys *IAMSys) getAccountWithClaims(ctx context.Context, accessKey string) ( } // GetClaimsForSvcAcc - gets the claims associated with the service account. -func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]interface{}, error) { +func (sys *IAMSys) GetClaimsForSvcAcc(ctx context.Context, accessKey string) (map[string]any, error) { if !sys.Initialized() { return nil, errServerNotInitialized } - if sys.usersSysType != LDAPUsersSysType { - return nil, nil - } - sa, ok := sys.store.GetUser(accessKey) if !ok || !sa.Credentials.IsServiceAccount() { return nil, errNoSuchServiceAccount @@ -1238,10 +1379,10 @@ func (sys *IAMSys) DeleteServiceAccount(ctx context.Context, accessKey string, n } if notifyPeers && !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.DeleteServiceAccount(accessKey) { + for _, nerr := range globalNotificationSys.DeleteServiceAccount(ctx, accessKey) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1256,14 +1397,14 @@ func (sys *IAMSys) CreateUser(ctx context.Context, accessKey string, ureq madmin return updatedAt, errServerNotInitialized } - if sys.usersSysType != MinIOUsersSysType { - return updatedAt, errIAMActionNotAllowed - } - if !auth.IsAccessKeyValid(accessKey) { return updatedAt, auth.ErrInvalidAccessKeyLength } + if auth.ContainsReservedChars(accessKey) { + return updatedAt, auth.ErrContainsReservedChars + } + if !auth.IsSecretKeyValid(ureq.SecretKey) { return updatedAt, auth.ErrInvalidSecretKeyLength } @@ -1316,14 +1457,14 @@ func (sys *IAMSys) purgeExpiredCredentialsForExternalSSO(ctx context.Context) { roleArns := puInfo.roleArns.ToSlice() var roleArn string if len(roleArns) == 0 { - logger.LogIf(GlobalContext, + iamLogIf(GlobalContext, fmt.Errorf("parentUser: %s had no roleArns mapped!", parentUser)) continue } roleArn = roleArns[0] u, err := sys.OpenIDConfig.LookupUser(roleArn, puInfo.subClaimValue) if err != nil { - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) continue } // If user is set to "disabled", we will remove them @@ -1342,18 +1483,23 @@ func (sys *IAMSys) purgeExpiredCredentialsForExternalSSO(ctx context.Context) { func (sys *IAMSys) purgeExpiredCredentialsForLDAP(ctx context.Context) { parentUsers := sys.store.GetAllParentUsers() var allDistNames []string - for parentUser := range parentUsers { + for parentUser, info := range parentUsers { if !sys.LDAPConfig.IsLDAPUserDN(parentUser) { continue } - allDistNames = append(allDistNames, parentUser) + if info.subClaimValue != "" { + // we need to ask LDAP about the actual user DN not normalized DN. + allDistNames = append(allDistNames, info.subClaimValue) + } else { + allDistNames = append(allDistNames, parentUser) + } } expiredUsers, err := sys.LDAPConfig.GetNonEligibleUserDistNames(allDistNames) if err != nil { // Log and return on error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } @@ -1366,11 +1512,13 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) { // 1. Collect all LDAP users with active creds. allCreds := sys.store.GetSTSAndServiceAccounts() // List of unique LDAP (parent) user DNs that have active creds - var parentUsers []string - // Map of LDAP user to list of active credential objects + var parentUserActualDNList []string + // Map of LDAP user (internal representation) to list of active credential objects parentUserToCredsMap := make(map[string][]auth.Credentials) // DN to ldap username mapping for each LDAP user - parentUserToLDAPUsernameMap := make(map[string]string) + actualDNToLDAPUsernameMap := make(map[string]string) + // External (actual) LDAP DN to internal normalized representation + actualDNToParentUserMap := make(map[string]string) for _, cred := range allCreds { // Expired credentials don't need parent user updates. if cred.IsExpired() { @@ -1413,34 +1561,38 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) { continue } - ldapUsername, ok := jwtClaims.Lookup(ldapUserN) - if !ok { + ldapUsername, okUserN := jwtClaims.Lookup(ldapUserN) + ldapActualDN, okDN := jwtClaims.Lookup(ldapActualUser) + if !okUserN || !okDN { // skip this cred - we dont have the // username info needed continue } // Collect each new cred.ParentUser into parentUsers - parentUsers = append(parentUsers, cred.ParentUser) + parentUserActualDNList = append(parentUserActualDNList, ldapActualDN) // Update the ldapUsernameMap - parentUserToLDAPUsernameMap[cred.ParentUser] = ldapUsername + actualDNToLDAPUsernameMap[ldapActualDN] = ldapUsername + + // Update the actualDNToParentUserMap + actualDNToParentUserMap[ldapActualDN] = cred.ParentUser } parentUserToCredsMap[cred.ParentUser] = append(parentUserToCredsMap[cred.ParentUser], cred) - } // 2. Query LDAP server for groups of the LDAP users collected. - updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUsers, parentUserToLDAPUsernameMap) + updatedGroups, err := sys.LDAPConfig.LookupGroupMemberships(parentUserActualDNList, actualDNToLDAPUsernameMap) if err != nil { // Log and return on error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return } // 3. Update creds for those users whose groups are changed - for _, parentUser := range parentUsers { - currGroupsSet := updatedGroups[parentUser] + for _, parentActualDN := range parentUserActualDNList { + currGroupsSet := updatedGroups[parentActualDN] + parentUser := actualDNToParentUserMap[parentActualDN] currGroups := currGroupsSet.ToSlice() for _, cred := range parentUserToCredsMap[parentUser] { gSet := set.CreateStringSet(cred.Groups...) @@ -1458,46 +1610,284 @@ func (sys *IAMSys) updateGroupMembershipsForLDAP(ctx context.Context) { cred.Groups = currGroups if err := sys.store.UpdateUserIdentity(ctx, cred); err != nil { // Log and continue error - perhaps it'll work the next time. - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) } } } } -// GetUser - get user credentials -func (sys *IAMSys) GetUser(ctx context.Context, accessKey string) (u UserIdentity, ok bool) { +// NormalizeLDAPAccessKeypairs - normalize the access key pairs (service +// accounts) for LDAP users. This normalizes the parent user and the group names +// whenever the parent user parses validly as a DN. +func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap map[string]madmin.SRSvcAccCreate, +) (skippedAccessKeys []string, err error) { + conn, err := sys.LDAPConfig.LDAP.Connect() + if err != nil { + return skippedAccessKeys, err + } + defer conn.Close() + + // Bind to the lookup user account + if err = sys.LDAPConfig.LDAP.LookupBind(conn); err != nil { + return skippedAccessKeys, err + } + + var collectedErrors []error + updatedKeysMap := make(map[string]madmin.SRSvcAccCreate) + for ak, createReq := range accessKeyMap { + parent := createReq.Parent + groups := createReq.Groups + + _, err := ldap.NormalizeDN(parent) + if err != nil { + // not a valid DN, ignore. + continue + } + + hasDiff := false + + // For the parent value, we require that the parent exists in the LDAP + // server and is under a configured base DN. + validatedParent, isUnderBaseDN, err := sys.LDAPConfig.GetValidatedUserDN(conn, parent) + if err != nil { + collectedErrors = append(collectedErrors, fmt.Errorf("could not validate parent exists in LDAP directory: %w", err)) + continue + } + if validatedParent == nil || !isUnderBaseDN { + skippedAccessKeys = append(skippedAccessKeys, ak) + continue + } + + if validatedParent.NormDN != parent { + hasDiff = true + } + + var normalizedGroups []string + for _, group := range groups { + // For a group, we store the normalized DN even if it not under a + // configured base DN. + validatedGroup, _, err := sys.LDAPConfig.GetValidatedGroupDN(conn, group) + if err != nil { + collectedErrors = append(collectedErrors, fmt.Errorf("could not validate group exists in LDAP directory: %w", err)) + continue + } + if validatedGroup == nil { + // DN group was not found in the LDAP directory for access-key + continue + } + + if validatedGroup.NormDN != group { + hasDiff = true + } + normalizedGroups = append(normalizedGroups, validatedGroup.NormDN) + } + + if hasDiff { + updatedCreateReq := createReq + updatedCreateReq.Parent = validatedParent.NormDN + updatedCreateReq.Groups = normalizedGroups + + updatedKeysMap[ak] = updatedCreateReq + } + } + + // if there are any errors, return a collected error. + if len(collectedErrors) > 0 { + return skippedAccessKeys, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...)) + } + + // Replace the map values with the updated ones + maps.Copy(accessKeyMap, updatedKeysMap) + + return skippedAccessKeys, nil +} + +func (sys *IAMSys) getStoredLDAPPolicyMappingKeys(ctx context.Context, isGroup bool) set.StringSet { + entityKeysInStorage := set.NewStringSet() + cache := sys.store.rlock() + defer sys.store.runlock() + cachedPolicyMap := cache.iamSTSPolicyMap + if isGroup { + cachedPolicyMap = cache.iamGroupPolicyMap + } + cachedPolicyMap.Range(func(k string, v MappedPolicy) bool { + entityKeysInStorage.Add(k) + return true + }) + + return entityKeysInStorage +} + +// NormalizeLDAPMappingImport - validates the LDAP policy mappings. Keys in the +// given map may not correspond to LDAP DNs - these keys are ignored. +// +// For validated mappings, it updates the key in the given map to be in +// normalized form. +func (sys *IAMSys) NormalizeLDAPMappingImport(ctx context.Context, isGroup bool, + policyMap map[string]MappedPolicy, +) ([]string, error) { + conn, err := sys.LDAPConfig.LDAP.Connect() + if err != nil { + return []string{}, err + } + defer conn.Close() + + // Bind to the lookup user account + if err = sys.LDAPConfig.LDAP.LookupBind(conn); err != nil { + return []string{}, err + } + + // We map keys that correspond to LDAP DNs and validate that they exist in + // the LDAP server. + dnValidator := sys.LDAPConfig.GetValidatedUserDN + if isGroup { + dnValidator = sys.LDAPConfig.GetValidatedGroupDN + } + + // map of normalized DN keys to original keys. + normalizedDNKeysMap := make(map[string][]string) + var collectedErrors []error + var skipped []string + for k := range policyMap { + _, err := ldap.NormalizeDN(k) + if err != nil { + // not a valid DN, ignore. + continue + } + validatedDN, underBaseDN, err := dnValidator(conn, k) + if err != nil { + collectedErrors = append(collectedErrors, fmt.Errorf("could not validate `%s` exists in LDAP directory: %w", k, err)) + continue + } + if validatedDN == nil || !underBaseDN { + skipped = append(skipped, k) + continue + } + + if validatedDN.NormDN != k { + normalizedDNKeysMap[validatedDN.NormDN] = append(normalizedDNKeysMap[validatedDN.NormDN], k) + } + } + + // if there are any errors, return a collected error. + if len(collectedErrors) > 0 { + return []string{}, fmt.Errorf("errors validating LDAP DN: %w", errors.Join(collectedErrors...)) + } + + entityKeysInStorage := sys.getStoredLDAPPolicyMappingKeys(ctx, isGroup) + + for normKey, origKeys := range normalizedDNKeysMap { + if len(origKeys) > 1 { + // If there are multiple DN keys that normalize to the same value, + // check if the policy mappings are equal, if they are we don't need + // to return an error. + policiesDiffer := false + firstMappedPolicies := policyMap[origKeys[0]].policySet() + for i := 1; i < len(origKeys); i++ { + otherMappedPolicies := policyMap[origKeys[i]].policySet() + if !firstMappedPolicies.Equals(otherMappedPolicies) { + policiesDiffer = true + break + } + } + + if policiesDiffer { + return []string{}, fmt.Errorf("multiple DNs map to the same LDAP DN[%s]: %v; please remove DNs that are not needed", + normKey, origKeys) + } + + if len(origKeys[1:]) > 0 { + // Log that extra DN mappings will not be imported. + iamLogEvent(ctx, "import-ldap-normalize: extraneous DN mappings found for LDAP DN[%s]: %v will not be imported", origKeys[0], origKeys[1:]) + } + + // Policies mapped to the DN's are the same, so we remove the extra + // ones from the map. + for i := 1; i < len(origKeys); i++ { + delete(policyMap, origKeys[i]) + + // Remove the mapping from storage by setting the policy to "". + if entityKeysInStorage.Contains(origKeys[i]) { + // Ignore any deletion error. + _, delErr := sys.PolicyDBSet(ctx, origKeys[i], "", stsUser, isGroup) + if delErr != nil { + logErr := fmt.Errorf("failed to delete extraneous LDAP DN mapping for `%s`: %w", origKeys[i], delErr) + iamLogIf(ctx, logErr) + } + } + } + } + + // Replacing origKeys[0] with normKey in the policyMap + + // len(origKeys) is always > 0, so here len(origKeys) == 1 + mappingValue := policyMap[origKeys[0]] + delete(policyMap, origKeys[0]) + policyMap[normKey] = mappingValue + iamLogEvent(ctx, "import-ldap-normalize: normalized LDAP DN mapping from `%s` to `%s`", origKeys[0], normKey) + + // Remove the mapping from storage by setting the policy to "". + if entityKeysInStorage.Contains(origKeys[0]) { + // Ignore any deletion error. + _, delErr := sys.PolicyDBSet(ctx, origKeys[0], "", stsUser, isGroup) + if delErr != nil { + logErr := fmt.Errorf("failed to delete extraneous LDAP DN mapping for `%s`: %w", origKeys[0], delErr) + iamLogIf(ctx, logErr) + } + } + } + return skipped, nil +} + +// CheckKey validates the incoming accessKey +func (sys *IAMSys) CheckKey(ctx context.Context, accessKey string) (u UserIdentity, ok bool, err error) { if !sys.Initialized() { - return u, false + return u, false, nil } if accessKey == globalActiveCred.AccessKey { - return newUserIdentity(globalActiveCred), true + return newUserIdentity(globalActiveCred), true, nil } loadUserCalled := false select { case <-sys.configLoaded: default: - sys.store.LoadUser(ctx, accessKey) + err = sys.store.LoadUser(ctx, accessKey) loadUserCalled = true } u, ok = sys.store.GetUser(accessKey) if !ok && !loadUserCalled { - sys.store.LoadUser(ctx, accessKey) + err = sys.store.LoadUser(ctx, accessKey) + loadUserCalled = true + u, ok = sys.store.GetUser(accessKey) } - return u, ok && u.Credentials.IsValid() + if !ok && loadUserCalled && err != nil { + iamLogOnceIf(ctx, err, accessKey) + + // return 503 to application + return u, false, errIAMNotInitialized + } + + return u, ok && u.Credentials.IsValid(), nil +} + +// GetUser - get user credentials +func (sys *IAMSys) GetUser(ctx context.Context, accessKey string) (u UserIdentity, ok bool) { + u, ok, _ = sys.CheckKey(ctx, accessKey) + return u, ok } // Notify all other MinIO peers to load group. func (sys *IAMSys) notifyForGroup(ctx context.Context, group string) { if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadGroup(group) { + for _, nerr := range globalNotificationSys.LoadGroup(ctx, group) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1510,8 +1900,8 @@ func (sys *IAMSys) AddUsersToGroup(ctx context.Context, group string, members [] return updatedAt, errServerNotInitialized } - if sys.usersSysType != MinIOUsersSysType { - return updatedAt, errIAMActionNotAllowed + if auth.ContainsReservedChars(group) { + return updatedAt, errGroupNameContainsReservedChars } updatedAt, err = sys.store.AddUsersToGroup(ctx, group, members) @@ -1585,7 +1975,9 @@ func (sys *IAMSys) ListGroups(ctx context.Context) (r []string, err error) { } } -// PolicyDBSet - sets a policy for a user or group in the PolicyDB - the user doesn't have to exist since sometimes they are virtuals +// PolicyDBSet - sets a policy for a user or group in the PolicyDB. This does +// not validate if the user/group exists - that is the responsibility of the +// caller. func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userType IAMUserType, isGroup bool) (updatedAt time.Time, err error) { if !sys.Initialized() { return updatedAt, errServerNotInitialized @@ -1593,15 +1985,15 @@ func (sys *IAMSys) PolicyDBSet(ctx context.Context, name, policy string, userTyp updatedAt, err = sys.store.PolicyDBSet(ctx, name, policy, userType, isGroup) if err != nil { - return + return updatedAt, err } // Notify all other MinIO peers to reload policy if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadPolicyMapping(name, userType, isGroup) { + for _, nerr := range globalNotificationSys.LoadPolicyMapping(ctx, name, userType, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } @@ -1616,7 +2008,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool, ) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) { if !sys.Initialized() { err = errServerNotInitialized - return + return updatedAt, addedOrRemoved, effectivePolicies, err } userOrGroup := r.User @@ -1629,24 +2021,24 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool, if isGroup { _, err = sys.GetGroupDescription(userOrGroup) if err != nil { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } } else { var isTemp bool isTemp, _, err = sys.IsTempUser(userOrGroup) if err != nil && err != errNoSuchUser { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } if isTemp { err = errIAMActionNotAllowed - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // When the user is root credential you are not allowed to // add policies for root user. if userOrGroup == globalActiveCred.AccessKey { err = errIAMActionNotAllowed - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // Validate that user exists. @@ -1654,27 +2046,27 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool, _, userExists = sys.GetUser(ctx, userOrGroup) if !userExists { err = errNoSuchUser - return + return updatedAt, addedOrRemoved, effectivePolicies, err } } updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, userOrGroup, isGroup, regUser, r.Policies, isAttach) if err != nil { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // Notify all other MinIO peers to reload policy if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadPolicyMapping(userOrGroup, regUser, isGroup) { + for _, nerr := range globalNotificationSys.LoadPolicyMapping(ctx, userOrGroup, regUser, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: userOrGroup, @@ -1685,7 +2077,7 @@ func (sys *IAMSys) PolicyDBUpdateBuiltin(ctx context.Context, isAttach bool, UpdatedAt: updatedAt, })) - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // PolicyDBUpdateLDAP - adds or removes policies from a user or a group verified @@ -1695,60 +2087,82 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool, ) (updatedAt time.Time, addedOrRemoved, effectivePolicies []string, err error) { if !sys.Initialized() { err = errServerNotInitialized - return + return updatedAt, addedOrRemoved, effectivePolicies, err } var dn string + var dnResult *ldap.DNSearchResult var isGroup bool if r.User != "" { - dn, err = sys.LDAPConfig.DoesUsernameExist(r.User) + dnResult, err = sys.LDAPConfig.GetValidatedDNForUsername(r.User) if err != nil { - logger.LogIf(ctx, err) - return + iamLogIf(ctx, err) + return updatedAt, addedOrRemoved, effectivePolicies, err } - if dn == "" { - // Still attempt to detach if provided user is a DN. + if dnResult == nil { + // dn not found - still attempt to detach if provided user is a DN. if !isAttach && sys.LDAPConfig.IsLDAPUserDN(r.User) { - dn = r.User + dn = sys.LDAPConfig.QuickNormalizeDN(r.User) } else { err = errNoSuchUser - return + return updatedAt, addedOrRemoved, effectivePolicies, err } + } else { + dn = dnResult.NormDN } isGroup = false } else { - if isAttach { - var exists bool - if exists, err = sys.LDAPConfig.DoesGroupDNExist(r.Group); err != nil { - logger.LogIf(ctx, err) - return - } else if !exists { + var underBaseDN bool + if dnResult, underBaseDN, err = sys.LDAPConfig.GetValidatedGroupDN(nil, r.Group); err != nil { + iamLogIf(ctx, err) + return updatedAt, addedOrRemoved, effectivePolicies, err + } + if dnResult == nil || !underBaseDN { + if !isAttach { + dn = sys.LDAPConfig.QuickNormalizeDN(r.Group) + } else { err = errNoSuchGroup - return + return updatedAt, addedOrRemoved, effectivePolicies, err } + } else { + // We use the group DN returned by the LDAP server (this may not + // equal the input group name, but we assume it is canonical). + dn = dnResult.NormDN } - dn = r.Group isGroup = true } + // Backward compatibility in detaching non-normalized DNs. + if !isAttach { + var oldDN string + if isGroup { + oldDN = r.Group + } else { + oldDN = r.User + } + if oldDN != dn { + sys.store.PolicyDBUpdate(ctx, oldDN, isGroup, stsUser, r.Policies, isAttach) + } + } + userType := stsUser - updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate(ctx, dn, isGroup, - userType, r.Policies, isAttach) + updatedAt, addedOrRemoved, effectivePolicies, err = sys.store.PolicyDBUpdate( + ctx, dn, isGroup, userType, r.Policies, isAttach) if err != nil { - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // Notify all other MinIO peers to reload policy if !sys.HasWatcher() { - for _, nerr := range globalNotificationSys.LoadPolicyMapping(dn, userType, isGroup) { + for _, nerr := range globalNotificationSys.LoadPolicyMapping(ctx, dn, userType, isGroup) { if nerr.Err != nil { logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) - logger.LogIf(ctx, nerr.Err) + iamLogIf(ctx, nerr.Err) } } } - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: dn, @@ -1759,7 +2173,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool, UpdatedAt: updatedAt, })) - return + return updatedAt, addedOrRemoved, effectivePolicies, err } // PolicyDBGet - gets policy set on a user or group. If a list of groups is @@ -1811,16 +2225,15 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string) case roleArn != "": arn, err := arn.Parse(roleArn) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) + iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) return false } svcPolicies = newMappedPolicy(sys.rolesMap[arn]).toSlice() - default: // Check policy for parent user of service account. svcPolicies, err = sys.PolicyDBGet(parentUser, args.Groups...) if err != nil { - logger.LogIf(GlobalContext, err) + iamLogIf(GlobalContext, err) return false } @@ -1840,7 +2253,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string) var combinedPolicy policy.Policy // Policies were found, evaluate all of them. if !isOwnerDerived { - availablePoliciesStr, c := sys.store.FilterPolicies(strings.Join(svcPolicies, ","), "") + availablePoliciesStr, c := sys.store.MergePolicies(strings.Join(svcPolicies, ",")) if availablePoliciesStr == "" { return false } @@ -1895,7 +2308,7 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { // If a roleARN is present, the role policy is applied. arn, err := arn.Parse(roleArn) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) + iamLogIf(GlobalContext, fmt.Errorf("error parsing role ARN %s: %v", roleArn, err)) return false } policies = newMappedPolicy(sys.rolesMap[arn]).toSlice() @@ -1903,9 +2316,9 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { default: // Otherwise, inherit parent user's policy var err error - policies, err = sys.store.PolicyDBGet(parentUser, args.Groups...) + policies, err = sys.PolicyDBGet(parentUser, args.Groups...) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err)) + iamLogIf(GlobalContext, fmt.Errorf("error fetching policies on %s: %v", parentUser, err)) return false } @@ -1921,7 +2334,6 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { } policies = policySet.ToSlice() } - } // Defensive code: Do not allow any operation if no policy is found in the session token @@ -1932,22 +2344,16 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool { // 2. Combine the mapped policies into a single combined policy. var combinedPolicy policy.Policy + // Policies were found, evaluate all of them. if !isOwnerDerived { - var err error - combinedPolicy, err = sys.store.GetPolicy(strings.Join(policies, ",")) - if errors.Is(err, errNoSuchPolicy) { - for _, pname := range policies { - _, err := sys.store.GetPolicy(pname) - if errors.Is(err, errNoSuchPolicy) { - // all policies presented in the claim should exist - logger.LogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID())) - return false - } - } - logger.LogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!")) + availablePoliciesStr, c := sys.store.MergePolicies(strings.Join(policies, ",")) + if availablePoliciesStr == "" { + // all policies presented in the claim should exist + iamLogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", policies, iamPolicyClaimNameOpenID())) + return false } - + combinedPolicy = c } // 3. If an inline session-policy is present, evaluate it. @@ -1970,7 +2376,7 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli // Now check if we have a sessionPolicy. spolicy, ok := args.Claims[sessionPolicyNameExtracted] if !ok { - return + return hasSessionPolicy, isAllowed } hasSessionPolicy = true @@ -1979,46 +2385,41 @@ func isAllowedBySessionPolicyForServiceAccount(args policy.Args) (hasSessionPoli if !ok { // Sub policy if set, should be a string reject // malformed/malicious requests. - return + return hasSessionPolicy, isAllowed } // Check if policy is parseable. subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) if err != nil { // Log any error in input session policy config. - logger.LogIf(GlobalContext, err) - return + iamLogIf(GlobalContext, err) + return hasSessionPolicy, isAllowed } // SPECIAL CASE: For service accounts, any valid JSON is allowed as a // policy, regardless of whether the number of statements is 0, this // includes `null`, `{}` and `{"Statement": null}`. In fact, MinIO Console // sends `null` when no policy is set and the intended behavior is that the - // service account should inherit parent policy. - // - // However, for a policy like `{"Statement":[]}`, the intention is to not - // provide any permissions via the session policy - i.e. the service account - // can do nothing (such a JSON could be generated by an external application - // as the policy for the service account). Inheriting the parent policy in - // such a case, is a security issue. Ideally, we should not allow such - // behavior, but for compatibility with the Console, we currently allow it. - // - // TODO: - // - // 1. fix console behavior and allow this inheritance for service accounts - // created before a certain (TBD) future date. - // - // 2. do not allow empty statement policies for service accounts. + // service account should inherit parent policy. So when policy is empty in + // all fields we return hasSessionPolicy=false. if subPolicy.Version == "" && subPolicy.Statements == nil && subPolicy.ID == "" { hasSessionPolicy = false - return + return hasSessionPolicy, isAllowed } // As the session policy exists, even if the parent is the root account, it // must be restricted by it. So, we set `.IsOwner` to false here // unconditionally. + // + // We also set `DenyOnly` arg to false here - this is an IMPORTANT corner + // case: DenyOnly is used only for allowing an account to do actions related + // to its own account (like create service accounts for itself, among + // others). However when a session policy is present, we need to validate + // that the action is actually allowed, rather than checking if the action + // is only disallowed. sessionPolicyArgs := args sessionPolicyArgs.IsOwner = false + sessionPolicyArgs.DenyOnly = false // Sub policy is set and valid. return hasSessionPolicy, subPolicy.IsAllowed(sessionPolicyArgs) @@ -2031,7 +2432,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe // Now check if we have a sessionPolicy. spolicy, ok := args.Claims[sessionPolicyNameExtracted] if !ok { - return + return hasSessionPolicy, isAllowed } hasSessionPolicy = true @@ -2040,27 +2441,35 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe if !ok { // Sub policy if set, should be a string reject // malformed/malicious requests. - return + return hasSessionPolicy, isAllowed } // Check if policy is parseable. subPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(spolicyStr))) if err != nil { // Log any error in input session policy config. - logger.LogIf(GlobalContext, err) - return + iamLogIf(GlobalContext, err) + return hasSessionPolicy, isAllowed } // Policy without Version string value reject it. if subPolicy.Version == "" { - return + return hasSessionPolicy, isAllowed } // As the session policy exists, even if the parent is the root account, it // must be restricted by it. So, we set `.IsOwner` to false here // unconditionally. + // + // We also set `DenyOnly` arg to false here - this is an IMPORTANT corner + // case: DenyOnly is used only for allowing an account to do actions related + // to its own account (like create service accounts for itself, among + // others). However when a session policy is present, we need to validate + // that the action is actually allowed, rather than checking if the action + // is only disallowed. sessionPolicyArgs := args sessionPolicyArgs.IsOwner = false + sessionPolicyArgs.DenyOnly = false // Sub policy is set and valid. return hasSessionPolicy, subPolicy.IsAllowed(sessionPolicyArgs) @@ -2068,7 +2477,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe // GetCombinedPolicy returns a combined policy combining all policies func (sys *IAMSys) GetCombinedPolicy(policies ...string) policy.Policy { - _, policy := sys.store.FilterPolicies(strings.Join(policies, ","), "") + _, policy := sys.store.MergePolicies(strings.Join(policies, ",")) return policy } @@ -2085,7 +2494,7 @@ func (sys *IAMSys) IsAllowed(args policy.Args) bool { if authz := newGlobalAuthZPluginFn(); authz != nil { ok, err := authz.IsAllowed(args) if err != nil { - logger.LogIf(GlobalContext, err) + authZLogIf(GlobalContext, err) } return ok } diff --git a/cmd/is-dir-empty_linux.go b/cmd/is-dir-empty_linux.go index c205955f89ae5..99945d671689b 100644 --- a/cmd/is-dir-empty_linux.go +++ b/cmd/is-dir-empty_linux.go @@ -25,22 +25,19 @@ import ( ) // Returns true if no error and there is no object or prefix inside this directory -func isDirEmpty(dirname string) bool { - var stat syscall.Stat_t - if err := syscall.Stat(dirname, &stat); err != nil { - return false - } - if stat.Mode&syscall.S_IFMT == syscall.S_IFDIR && stat.Nlink == 2 { - return true - } - // On filesystems such as btrfs, nfs this is not true, so fallback - // to performing readdir() instead. - if stat.Mode&syscall.S_IFMT == syscall.S_IFDIR && stat.Nlink < 2 { +func isDirEmpty(dirname string, legacy bool) bool { + if legacy { + // On filesystems such as btrfs, nfs this is not true, so fallback + // to performing readdir() instead. entries, err := readDirN(dirname, 1) if err != nil { return false } return len(entries) == 0 } - return false + var stat syscall.Stat_t + if err := syscall.Stat(dirname, &stat); err != nil { + return false + } + return stat.Mode&syscall.S_IFMT == syscall.S_IFDIR && stat.Nlink == 2 } diff --git a/cmd/is-dir-empty_other.go b/cmd/is-dir-empty_other.go index e9ccdc0211642..ab7b2f7e8c5e4 100644 --- a/cmd/is-dir-empty_other.go +++ b/cmd/is-dir-empty_other.go @@ -21,7 +21,7 @@ package cmd // isDirEmpty - returns true if there is no error and no object and prefix inside this directory -func isDirEmpty(dirname string) bool { +func isDirEmpty(dirname string, _ bool) bool { entries, err := readDirN(dirname, 1) if err != nil { return false diff --git a/cmd/jwt.go b/cmd/jwt.go index 39740bb7053a8..c86b8f676ab60 100644 --- a/cmd/jwt.go +++ b/cmd/jwt.go @@ -19,16 +19,15 @@ package cmd import ( "errors" + "maps" "net/http" "time" jwtgo "github.com/golang-jwt/jwt/v4" jwtreq "github.com/golang-jwt/jwt/v4/request" - lru "github.com/hashicorp/golang-lru" "github.com/minio/minio/internal/auth" xjwt "github.com/minio/minio/internal/jwt" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) const ( @@ -37,8 +36,8 @@ const ( // Default JWT token for web handlers is one day. defaultJWTExpiry = 24 * time.Hour - // Inter-node JWT token expiry is 15 minutes. - defaultInterNodeJWTExpiry = 15 * time.Minute + // Inter-node JWT token expiry is 100 years approx. + defaultInterNodeJWTExpiry = 100 * 365 * 24 * time.Hour ) var ( @@ -50,40 +49,10 @@ var ( errMalformedAuth = errors.New("Malformed authentication input") ) -// cachedAuthenticateNode will cache authenticateNode results for given values up to ttl. -func cachedAuthenticateNode(ttl time.Duration) func(accessKey, secretKey, audience string) (string, error) { - type key struct { - accessKey, secretKey, audience string - } - type value struct { - created time.Time - res string - err error - } - cache, err := lru.NewARC(100) - if err != nil { - logger.LogIf(GlobalContext, err) - return authenticateNode - } - return func(accessKey, secretKey, audience string) (string, error) { - k := key{accessKey: accessKey, secretKey: secretKey, audience: audience} - v, ok := cache.Get(k) - if ok { - if val, ok := v.(*value); ok && time.Since(val.created) < ttl { - return val.res, val.err - } - } - s, err := authenticateNode(accessKey, secretKey, audience) - cache.Add(k, &value{created: time.Now(), res: s, err: err}) - return s, err - } -} - -func authenticateNode(accessKey, secretKey, audience string) (string, error) { +func authenticateNode(accessKey, secretKey string) (string, error) { claims := xjwt.NewStandardClaims() claims.SetExpiry(UTCNow().Add(defaultInterNodeJWTExpiry)) claims.SetAccessKey(accessKey) - claims.SetAudience(audience) jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims) return jwt.SignedString([]byte(secretKey)) @@ -142,9 +111,7 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b return nil, nil, false, errAuthentication } - for k, v := range eclaims { - claims.MapClaims[k] = v - } + maps.Copy(claims.MapClaims, eclaims) // if root access is disabled, disable all its service accounts and temporary credentials. if ucred.ParentUser == globalActiveCred.AccessKey && !globalAPIConfig.permitRootAccess() { @@ -164,14 +131,9 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b return claims, groups, owner, nil } -// newCachedAuthToken returns a token that is cached up to 15 seconds. -// If globalActiveCred is updated it is reflected at once. -func newCachedAuthToken() func(audience string) string { - fn := cachedAuthenticateNode(15 * time.Second) - return func(audience string) string { - cred := globalActiveCred - token, err := fn(cred.AccessKey, cred.SecretKey, audience) - logger.CriticalIf(GlobalContext, err) - return token +// newCachedAuthToken returns the cached token. +func newCachedAuthToken() func() string { + return func() string { + return globalNodeAuthToken } } diff --git a/cmd/jwt_test.go b/cmd/jwt_test.go index 2e74547f0c6da..a45ce35cb334b 100644 --- a/cmd/jwt_test.go +++ b/cmd/jwt_test.go @@ -22,7 +22,6 @@ import ( "net/http" "os" "testing" - "time" jwtgo "github.com/golang-jwt/jwt/v4" xjwt "github.com/minio/minio/internal/jwt" @@ -38,7 +37,7 @@ func getTokenString(accessKey, secretKey string) (string, error) { // Tests web request authenticator. func TestWebRequestAuthenticate(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -95,7 +94,7 @@ func TestWebRequestAuthenticate(t *testing.T) { } func BenchmarkParseJWTStandardClaims(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -108,7 +107,7 @@ func BenchmarkParseJWTStandardClaims(b *testing.B) { } creds := globalActiveCred - token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "") + token, err := authenticateNode(creds.AccessKey, creds.SecretKey) if err != nil { b.Fatal(err) } @@ -126,7 +125,7 @@ func BenchmarkParseJWTStandardClaims(b *testing.B) { } func BenchmarkParseJWTMapClaims(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -139,7 +138,7 @@ func BenchmarkParseJWTMapClaims(b *testing.B) { } creds := globalActiveCred - token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "") + token, err := authenticateNode(creds.AccessKey, creds.SecretKey) if err != nil { b.Fatal(err) } @@ -159,7 +158,7 @@ func BenchmarkParseJWTMapClaims(b *testing.B) { } func BenchmarkAuthenticateNode(b *testing.B) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(b.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -176,16 +175,16 @@ func BenchmarkAuthenticateNode(b *testing.B) { fn := authenticateNode b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { - fn(creds.AccessKey, creds.SecretKey, "aud") + for b.Loop() { + fn(creds.AccessKey, creds.SecretKey) } }) b.Run("cached", func(b *testing.B) { - fn := cachedAuthenticateNode(time.Second) + fn := newCachedAuthToken() b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { - fn(creds.AccessKey, creds.SecretKey, "aud") + for b.Loop() { + fn() } }) } diff --git a/cmd/kms-handlers.go b/cmd/kms-handlers.go index e496b484a77d7..ce5017c1f2536 100644 --- a/cmd/kms-handlers.go +++ b/cmd/kms-handlers.go @@ -20,16 +20,13 @@ package cmd import ( "crypto/subtle" "encoding/json" - "io" "net/http" - "strings" - "time" - "github.com/minio/kms-go/kes" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/kms" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // KMSStatusHandler - GET /minio/kms/v1/status @@ -46,22 +43,12 @@ func (a kmsAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) return } - stat, err := GlobalKMS.Stat(ctx) + stat, err := GlobalKMS.Status(ctx) if err != nil { writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) return } - - status := madmin.KMSStatus{ - Name: stat.Name, - DefaultKeyID: stat.DefaultKey, - Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)), - } - for _, endpoint := range stat.Endpoints { - status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS - } - - resp, err := json.Marshal(status) + resp, err := json.Marshal(stat) if err != nil { writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) return @@ -69,7 +56,7 @@ func (a kmsAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) writeSuccessResponseJSON(w, resp) } -// KMSMetricsHandler - POST /minio/kms/v1/metrics +// KMSMetricsHandler - GET /minio/kms/v1/metrics func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "KMSMetrics") defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) @@ -84,11 +71,6 @@ func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request return } - if _, ok := GlobalKMS.(kms.KeyManager); !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - metrics, err := GlobalKMS.Metrics(ctx) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -101,7 +83,7 @@ func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request } } -// KMSAPIsHandler - POST /minio/kms/v1/apis +// KMSAPIsHandler - GET /minio/kms/v1/apis func (a kmsAPIHandlers) KMSAPIsHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "KMSAPIs") defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) @@ -116,13 +98,7 @@ func (a kmsAPIHandlers) KMSAPIsHandler(w http.ResponseWriter, r *http.Request) { return } - manager, ok := GlobalKMS.(kms.StatusManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - - apis, err := manager.APIs(ctx) + apis, err := GlobalKMS.APIs(ctx) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return @@ -138,7 +114,7 @@ type versionResponse struct { Version string `json:"version"` } -// KMSVersionHandler - POST /minio/kms/v1/version +// KMSVersionHandler - GET /minio/kms/v1/version func (a kmsAPIHandlers) KMSVersionHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "KMSVersion") defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) @@ -153,13 +129,7 @@ func (a kmsAPIHandlers) KMSVersionHandler(w http.ResponseWriter, r *http.Request return } - manager, ok := GlobalKMS.(kms.StatusManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - - version, err := manager.Version(ctx) + version, err := GlobalKMS.Version(ctx) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return @@ -177,10 +147,6 @@ func (a kmsAPIHandlers) KMSVersionHandler(w http.ResponseWriter, r *http.Request func (a kmsAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Request) { // If env variable MINIO_KMS_SECRET_KEY is populated, prevent creation of new keys ctx := newContext(r, w, "KMSCreateKey") - if GlobalKMS != nil && GlobalKMS.IsLocal() { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSDefaultKeyAlreadyConfigured), r.URL) - return - } defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSCreateKeyAction) @@ -193,39 +159,20 @@ func (a kmsAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Reque return } - manager, ok := GlobalKMS.(kms.KeyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + keyID := r.Form.Get("key-id") - if err := manager.CreateKey(ctx, r.Form.Get("key-id")); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + // Ensure policy allows the user to create this key name + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) return } - writeSuccessResponseHeadersOnly(w) -} - -// KMSDeleteKeyHandler - DELETE /minio/kms/v1/key/delete?key-id= -func (a kmsAPIHandlers) KMSDeleteKeyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDeleteKey") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDeleteKeyAction) - if objectAPI == nil { + if !checkKMSActionAllowed(r, owner, cred, policy.KMSCreateKeyAction, keyID) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) return } - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.KeyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - if err := manager.DeleteKey(ctx, r.Form.Get("key-id")); err != nil { + if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{Name: keyID}); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } @@ -235,17 +182,11 @@ func (a kmsAPIHandlers) KMSDeleteKeyHandler(w http.ResponseWriter, r *http.Reque // KMSListKeysHandler - GET /minio/kms/v1/key/list?pattern= func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "KMSListKeys") - if GlobalKMS != nil && GlobalKMS.IsLocal() { - res, err := json.Marshal(GlobalKMS.List()) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - writeSuccessResponseJSON(w, res) - return - } defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) + // This only checks if the action (kms:ListKeys) is allowed, it does not check + // each key name against the policy's Resources. We check that below, once + // we have the list of key names from the KMS. objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSListKeysAction) if objectAPI == nil { return @@ -255,74 +196,39 @@ func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Reques writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) return } - manager, ok := GlobalKMS.(kms.KeyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - keys, err := manager.ListKeys(ctx) + allKeys, _, err := GlobalKMS.ListKeys(ctx, &kms.ListRequest{ + Prefix: r.Form.Get("pattern"), + }) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - pattern := r.Form.Get("pattern") - if !strings.Contains(pattern, "*") { - pattern += "*" + // Get the cred and owner for checking authz below. + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return } - var values []kes.KeyInfo - for name, err := keys.SeekTo(ctx, pattern); err != io.EOF; name, err = keys.Next(ctx) { - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return + // Now we have all the key names, for each of them, check whether the policy grants permission for + // the user to list it. Filter in place to leave only allowed keys. + n := 0 + for _, k := range allKeys { + if checkKMSActionAllowed(r, owner, cred, policy.KMSListKeysAction, k.Name) { + allKeys[n] = k + n++ } - values = append(values, kes.KeyInfo{ - Name: name, - }) } - if res, err := json.Marshal(values); err != nil { + allKeys = allKeys[:n] + + if res, err := json.Marshal(allKeys); err != nil { writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) } else { writeSuccessResponseJSON(w, res) } } -type importKeyRequest struct { - Bytes string -} - -// KMSImportKeyHandler - POST /minio/kms/v1/key/import?key-id= -func (a kmsAPIHandlers) KMSImportKeyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSImportKey") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSImportKeyAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.KeyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - var request importKeyRequest - if err := json.NewDecoder(r.Body).Decode(&request); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - if err := manager.ImportKey(ctx, r.Form.Get("key-id"), []byte(request.Bytes)); err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - writeSuccessResponseHeadersOnly(w) -} - // KMSKeyStatusHandler - GET /minio/kms/v1/key/status?key-id= func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "KMSKeyStatus") @@ -338,23 +244,28 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque return } - stat, err := GlobalKMS.Stat(ctx) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - keyID := r.Form.Get("key-id") if keyID == "" { - keyID = stat.DefaultKey + keyID = GlobalKMS.DefaultKey } response := madmin.KMSKeyStatus{ KeyID: keyID, } + // Ensure policy allows the user to get this key's status + cred, owner, s3Err := validateAdminSignature(ctx, r, "") + if s3Err != ErrNone { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return + } + if !checkKMSActionAllowed(r, owner, cred, policy.KMSKeyStatusAction, keyID) { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL) + return + } + kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation // 1. Generate a new key using the KMS. - key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext) + key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: keyID, AssociatedData: kmsContext}) if err != nil { response.EncryptionErr = err.Error() resp, err := json.Marshal(response) @@ -367,7 +278,11 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque } // 2. Verify that we can indeed decrypt the (encrypted) key - decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext) + decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{ + Name: key.KeyID, + Ciphertext: key.Ciphertext, + AssociatedData: kmsContext, + }) if err != nil { response.DecryptionErr = err.Error() resp, err := json.Marshal(response) @@ -399,295 +314,15 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque writeSuccessResponseJSON(w, resp) } -// KMSDescribePolicyHandler - GET /minio/kms/v1/policy/describe?policy= -func (a kmsAPIHandlers) KMSDescribePolicyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDescribePolicy") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDescribePolicyAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.PolicyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - policy, err := manager.DescribePolicy(ctx, r.Form.Get("policy")) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - p, err := json.Marshal(policy) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - writeSuccessResponseJSON(w, p) -} - -// KMSAssignPolicyHandler - POST /minio/kms/v1/policy/assign?policy= -func (a kmsAPIHandlers) KMSAssignPolicyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSAssignPolicy") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSAssignPolicyAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return -} - -// KMSDeletePolicyHandler - DELETE /minio/kms/v1/policy/delete?policy= -func (a kmsAPIHandlers) KMSDeletePolicyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDeletePolicy") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDeletePolicyAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return -} - -// KMSListPoliciesHandler - GET /minio/kms/v1/policy/list?pattern= -func (a kmsAPIHandlers) KMSListPoliciesHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSListPolicies") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSListPoliciesAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.PolicyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - policies, err := manager.ListPolicies(ctx) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - - pattern := r.Form.Get("pattern") - if !strings.Contains(pattern, "*") { - pattern += "*" - } - - var values []kes.PolicyInfo - for name, err := policies.SeekTo(ctx, pattern); err != io.EOF; name, err = policies.Next(ctx) { - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - values = append(values, kes.PolicyInfo{ - Name: name, - }) - } - if res, err := json.Marshal(values); err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - } else { - writeSuccessResponseJSON(w, res) - } -} - -// KMSGetPolicyHandler - GET /minio/kms/v1/policy/get?policy= -func (a kmsAPIHandlers) KMSGetPolicyHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSGetPolicy") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSGetPolicyAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.PolicyManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - policy, err := manager.GetPolicy(ctx, r.Form.Get("policy")) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - - if p, err := json.Marshal(policy); err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - } else { - writeSuccessResponseJSON(w, p) - } -} - -// KMSDescribeIdentityHandler - GET /minio/kms/v1/identity/describe?identity= -func (a kmsAPIHandlers) KMSDescribeIdentityHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDescribeIdentity") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDescribeIdentityAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.IdentityManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - identity, err := manager.DescribeIdentity(ctx, r.Form.Get("identity")) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - i, err := json.Marshal(identity) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - writeSuccessResponseJSON(w, i) -} - -type describeSelfIdentityResponse struct { - Policy *kes.Policy `json:"policy"` - PolicyName string `json:"policyName"` - Identity string `json:"identity"` - IsAdmin bool `json:"isAdmin"` - CreatedAt time.Time `json:"createdAt"` - CreatedBy string `json:"createdBy"` -} - -// KMSDescribeSelfIdentityHandler - GET /minio/kms/v1/identity/describe-self -func (a kmsAPIHandlers) KMSDescribeSelfIdentityHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDescribeSelfIdentity") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDescribeSelfIdentityAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.IdentityManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - identity, policy, err := manager.DescribeSelfIdentity(ctx) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - res := &describeSelfIdentityResponse{ - Policy: policy, - PolicyName: identity.Policy, - Identity: identity.Identity.String(), - IsAdmin: identity.IsAdmin, - CreatedAt: identity.CreatedAt, - CreatedBy: identity.CreatedBy.String(), - } - i, err := json.Marshal(res) - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - writeSuccessResponseJSON(w, i) -} - -// KMSDeleteIdentityHandler - DELETE /minio/kms/v1/identity/delete?identity= -func (a kmsAPIHandlers) KMSDeleteIdentityHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSDeleteIdentity") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSDeleteIdentityAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return -} - -// KMSListIdentitiesHandler - GET /minio/kms/v1/identity/list?pattern= -func (a kmsAPIHandlers) KMSListIdentitiesHandler(w http.ResponseWriter, r *http.Request) { - ctx := newContext(r, w, "KMSListIdentities") - defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - - objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSListIdentitiesAction) - if objectAPI == nil { - return - } - - if GlobalKMS == nil { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL) - return - } - manager, ok := GlobalKMS.(kms.IdentityManager) - if !ok { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - identities, err := manager.ListIdentities(ctx) - if err != nil { - writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) - return - } - - pattern := r.Form.Get("pattern") - if !strings.Contains(pattern, "*") { - pattern += "*" - } - - var values []kes.IdentityInfo - for name, err := identities.SeekTo(ctx, pattern); err != io.EOF; name, err = identities.Next(ctx) { - if err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - return - } - values = append(values, kes.IdentityInfo{ - Identity: name, - }) - } - if res, err := json.Marshal(values); err != nil { - writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL) - } else { - writeSuccessResponseJSON(w, res) - } +// checkKMSActionAllowed checks for authorization for a specific action on a resource. +func checkKMSActionAllowed(r *http.Request, owner bool, cred auth.Credentials, action policy.KMSAction, resource string) bool { + return globalIAMSys.IsAllowed(policy.Args{ + AccountName: cred.AccessKey, + Groups: cred.Groups, + Action: policy.Action(action), + ConditionValues: getConditionValues(r, "", cred), + IsOwner: owner, + Claims: cred.Claims, + BucketName: resource, // overloading BucketName as that's what the policy engine uses to assemble a Resource. + }) } diff --git a/cmd/kms-handlers_test.go b/cmd/kms-handlers_test.go new file mode 100644 index 0000000000000..4eccab4cd5184 --- /dev/null +++ b/cmd/kms-handlers_test.go @@ -0,0 +1,847 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/kms" + "github.com/minio/pkg/v3/policy" +) + +const ( + // KMS API paths + // For example: /minio/kms/v1/key/list?pattern=* + kmsURL = kmsPathPrefix + kmsAPIVersionPrefix + kmsStatusPath = kmsURL + "/status" + kmsMetricsPath = kmsURL + "/metrics" + kmsAPIsPath = kmsURL + "/apis" + kmsVersionPath = kmsURL + "/version" + kmsKeyCreatePath = kmsURL + "/key/create" + kmsKeyListPath = kmsURL + "/key/list" + kmsKeyStatusPath = kmsURL + "/key/status" + + // Admin API paths + // For example: /minio/admin/v3/kms/status + adminURL = adminPathPrefix + adminAPIVersionPrefix + kmsAdminStatusPath = adminURL + "/kms/status" + kmsAdminKeyStatusPath = adminURL + "/kms/key/status" + kmsAdminKeyCreate = adminURL + "/kms/key/create" +) + +const ( + userAccessKey = "miniofakeuseraccesskey" + userSecretKey = "miniofakeusersecret" +) + +type kmsTestCase struct { + name string + method string + path string + query map[string]string + + // User credentials and policy for request + policy string + asRoot bool + + // Wanted in response. + wantStatusCode int + wantKeyNames []string + wantResp []string +} + +func TestKMSHandlersCreateKey(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, true) + defer tearDown() + + tests := []kmsTestCase{ + // Create key test + { + name: "create key as user with no policy want forbidden", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "new-test-key"}, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "create key as user with no resources specified want success", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "new-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:CreateKey"] }`, + + wantStatusCode: http.StatusOK, + }, + { + name: "create key as user set policy to allow want success", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "second-new-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:CreateKey"], + "Resource": ["arn:minio:kms:::second-new-test-*"] }`, + + wantStatusCode: http.StatusOK, + }, + { + name: "create key as user set policy to non matching resource want forbidden", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "third-new-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:CreateKey"], + "Resource": ["arn:minio:kms:::non-matching-key-name"] }`, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + } + for testNum, test := range tests { + t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) { + execKMSTest(t, test, adminTestBed) + }) + } +} + +func TestKMSHandlersKeyStatus(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, true) + defer tearDown() + + tests := []kmsTestCase{ + { + name: "create a first key root user", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + }, + { + name: "key status as root want success", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"abc-test-key"}, + }, + { + name: "key status as user no policy want forbidden", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "key status as user legacy no resources specified want success", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:KeyStatus"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"abc-test-key"}, + }, + { + name: "key status as user set policy to allow only one key", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:KeyStatus"], + "Resource": ["arn:minio:kms:::abc-test-*"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"abc-test-key"}, + }, + { + name: "key status as user set policy to allow non-matching key", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:KeyStatus"], + "Resource": ["arn:minio:kms:::xyz-test-key"] }`, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + } + for testNum, test := range tests { + t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) { + execKMSTest(t, test, adminTestBed) + }) + } +} + +func TestKMSHandlersAPIs(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, true) + defer tearDown() + + tests := []kmsTestCase{ + // Version test + { + name: "version as root want success", + method: http.MethodGet, + path: kmsVersionPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"version"}, + }, + { + name: "version as user with no policy want forbidden", + method: http.MethodGet, + path: kmsVersionPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "version as user with policy ignores resource want success", + method: http.MethodGet, + path: kmsVersionPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:Version"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"version"}, + }, + + // APIs test + { + name: "apis as root want success", + method: http.MethodGet, + path: kmsAPIsPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"stub/path"}, + }, + { + name: "apis as user with no policy want forbidden", + method: http.MethodGet, + path: kmsAPIsPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "apis as user with policy ignores resource want success", + method: http.MethodGet, + path: kmsAPIsPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:API"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"stub/path"}, + }, + + // Metrics test + { + name: "metrics as root want success", + method: http.MethodGet, + path: kmsMetricsPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"kms"}, + }, + { + name: "metrics as user with no policy want forbidden", + method: http.MethodGet, + path: kmsMetricsPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "metrics as user with policy ignores resource want success", + method: http.MethodGet, + path: kmsMetricsPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:Metrics"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"kms"}, + }, + + // Status tests + { + name: "status as root want success", + method: http.MethodGet, + path: kmsStatusPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"MinIO builtin"}, + }, + { + name: "status as user with no policy want forbidden", + method: http.MethodGet, + path: kmsStatusPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "status as user with policy ignores resource want success", + method: http.MethodGet, + path: kmsStatusPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:Status"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"]}`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"MinIO builtin"}, + }, + } + for testNum, test := range tests { + t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) { + execKMSTest(t, test, adminTestBed) + }) + } +} + +func TestKMSHandlersListKeys(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, true) + defer tearDown() + + tests := []kmsTestCase{ + { + name: "create a first key root user", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + }, + { + name: "create a second key root user", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "xyz-test-key"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + }, + + // List keys tests + { + name: "list keys as root want all to be returned", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{"default-test-key", "abc-test-key", "xyz-test-key"}, + }, + { + name: "list keys as user with no policy want forbidden", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "list keys as user with no resources specified want success", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:ListKeys"] + }`, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{"default-test-key", "abc-test-key", "xyz-test-key"}, + }, + { + name: "list keys as user set policy resource to allow only one key", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:ListKeys"], + "Resource": ["arn:minio:kms:::abc*"]}`, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{"abc-test-key"}, + }, + { + name: "list keys as user set policy to allow only one key, use pattern that includes correct key", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "abc*"}, + + policy: `{"Effect": "Allow", + "Action": ["kms:ListKeys"], + "Resource": ["arn:minio:kms:::abc*"]}`, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{"abc-test-key"}, + }, + { + name: "list keys as user set policy to allow only one key, use pattern that excludes correct key", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "xyz*"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:ListKeys"], + "Resource": ["arn:minio:kms:::abc*"]}`, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{}, + }, + { + name: "list keys as user set policy that has no matching key resources", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["kms:ListKeys"], + "Resource": ["arn:minio:kms:::nonematch*"]}`, + + wantStatusCode: http.StatusOK, + wantKeyNames: []string{}, + }, + { + name: "list keys as user set policy that allows listing but denies specific keys", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + asRoot: false, + + // It looks like this should allow listing any key that isn't "default-test-key", however + // the policy engine matches all Deny statements first, without regard to Resources (for KMS). + // This is for backwards compatibility where historically KMS statements ignored Resources. + policy: `{ + "Effect": "Allow", + "Action": ["kms:ListKeys"] + },{ + "Effect": "Deny", + "Action": ["kms:ListKeys"], + "Resource": ["arn:minio:kms:::default-test-key"] + }`, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + } + + for testNum, test := range tests { + t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) { + execKMSTest(t, test, adminTestBed) + }) + } +} + +func TestKMSHandlerAdminAPI(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, true) + defer tearDown() + + tests := []kmsTestCase{ + // Create key tests + { + name: "create a key root user", + method: http.MethodPost, + path: kmsAdminKeyCreate, + query: map[string]string{"key-id": "abc-test-key"}, + asRoot: true, + + wantStatusCode: http.StatusOK, + }, + { + name: "create key as user with no policy want forbidden", + method: http.MethodPost, + path: kmsAdminKeyCreate, + query: map[string]string{"key-id": "new-test-key"}, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "create key as user with no resources specified want success", + method: http.MethodPost, + path: kmsAdminKeyCreate, + query: map[string]string{"key-id": "new-test-key"}, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["admin:KMSCreateKey"] }`, + + wantStatusCode: http.StatusOK, + }, + { + name: "create key as user set policy to non matching resource want success", + method: http.MethodPost, + path: kmsAdminKeyCreate, + query: map[string]string{"key-id": "third-new-test-key"}, + asRoot: false, + + // Admin actions ignore Resources + policy: `{"Effect": "Allow", + "Action": ["admin:KMSCreateKey"], + "Resource": ["arn:minio:kms:::this-is-disregarded"] }`, + + wantStatusCode: http.StatusOK, + }, + + // Status tests + { + name: "status as root want success", + method: http.MethodPost, + path: kmsAdminStatusPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"MinIO builtin"}, + }, + { + name: "status as user with no policy want forbidden", + method: http.MethodPost, + path: kmsAdminStatusPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "status as user with policy ignores resource want success", + method: http.MethodPost, + path: kmsAdminStatusPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["admin:KMSKeyStatus"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"MinIO builtin"}, + }, + + // Key status tests + { + name: "key status as root want success", + method: http.MethodGet, + path: kmsAdminKeyStatusPath, + asRoot: true, + + wantStatusCode: http.StatusOK, + wantResp: []string{"key-id"}, + }, + { + name: "key status as user with no policy want forbidden", + method: http.MethodGet, + path: kmsAdminKeyStatusPath, + asRoot: false, + + wantStatusCode: http.StatusForbidden, + wantResp: []string{"AccessDenied"}, + }, + { + name: "key status as user with policy ignores resource want success", + method: http.MethodGet, + path: kmsAdminKeyStatusPath, + asRoot: false, + + policy: `{"Effect": "Allow", + "Action": ["admin:KMSKeyStatus"], + "Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`, + + wantStatusCode: http.StatusOK, + wantResp: []string{"key-id"}, + }, + } + + for testNum, test := range tests { + t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) { + execKMSTest(t, test, adminTestBed) + }) + } +} + +// execKMSTest runs a single test case for KMS handlers +func execKMSTest(t *testing.T, test kmsTestCase, adminTestBed *adminErasureTestBed) { + var accessKey, secretKey string + if test.asRoot { + accessKey, secretKey = globalActiveCred.AccessKey, globalActiveCred.SecretKey + } else { + setupKMSUser(t, userAccessKey, userSecretKey, test.policy) + accessKey = userAccessKey + secretKey = userSecretKey + } + + req := buildKMSRequest(t, test.method, test.path, accessKey, secretKey, test.query) + rec := httptest.NewRecorder() + adminTestBed.router.ServeHTTP(rec, req) + + t.Logf("HTTP req: %s, resp code: %d, resp body: %s", req.URL.String(), rec.Code, rec.Body.String()) + + // Check status code + if rec.Code != test.wantStatusCode { + t.Errorf("want status code %d, got %d", test.wantStatusCode, rec.Code) + } + + // Check returned key list is correct + if test.wantKeyNames != nil { + keys := []madmin.KMSKeyInfo{} + err := json.Unmarshal(rec.Body.Bytes(), &keys) + if err != nil { + t.Fatal(err) + } + if len(keys) != len(test.wantKeyNames) { + t.Fatalf("want %d keys, got %d", len(test.wantKeyNames), len(keys)) + } + + for i, want := range keys { + if want.CreatedBy != kms.StubCreatedBy { + t.Fatalf("want key created by %s, got %s", kms.StubCreatedBy, want.CreatedBy) + } + if want.CreatedAt != kms.StubCreatedAt { + t.Fatalf("want key created at %s, got %s", kms.StubCreatedAt, want.CreatedAt) + } + if test.wantKeyNames[i] != want.Name { + t.Fatalf("want key name %s, got %s", test.wantKeyNames[i], want.Name) + } + } + } + + // Check generic text in the response + if test.wantResp != nil { + for _, want := range test.wantResp { + if !strings.Contains(rec.Body.String(), want) { + t.Fatalf("want response to contain %s, got %s", want, rec.Body.String()) + } + } + } +} + +// TestKMSHandlerNotConfiguredOrInvalidCreds tests KMS handlers for situations where KMS is not configured +// or invalid credentials are provided. +func TestKMSHandlerNotConfiguredOrInvalidCreds(t *testing.T) { + adminTestBed, tearDown := setupKMSTest(t, false) + defer tearDown() + + tests := []struct { + name string + method string + path string + query map[string]string + }{ + { + name: "GET status", + method: http.MethodGet, + path: kmsStatusPath, + }, + { + name: "GET metrics", + method: http.MethodGet, + path: kmsMetricsPath, + }, + { + name: "GET apis", + method: http.MethodGet, + path: kmsAPIsPath, + }, + { + name: "GET version", + method: http.MethodGet, + path: kmsVersionPath, + }, + { + name: "POST key create", + method: http.MethodPost, + path: kmsKeyCreatePath, + query: map[string]string{"key-id": "master-key-id"}, + }, + { + name: "GET key list", + method: http.MethodGet, + path: kmsKeyListPath, + query: map[string]string{"pattern": "*"}, + }, + { + name: "GET key status", + method: http.MethodGet, + path: kmsKeyStatusPath, + query: map[string]string{"key-id": "master-key-id"}, + }, + } + + // Test when the GlobalKMS is not configured + for _, test := range tests { + t.Run(test.name+" not configured", func(t *testing.T) { + req := buildKMSRequest(t, test.method, test.path, "", "", test.query) + rec := httptest.NewRecorder() + adminTestBed.router.ServeHTTP(rec, req) + if rec.Code != http.StatusNotImplemented { + t.Errorf("want status code %d, got %d", http.StatusNotImplemented, rec.Code) + } + }) + } + + // Test when the GlobalKMS is configured but the credentials are invalid + GlobalKMS = kms.NewStub("default-test-key") + for _, test := range tests { + t.Run(test.name+" invalid credentials", func(t *testing.T) { + req := buildKMSRequest(t, test.method, test.path, userAccessKey, userSecretKey, test.query) + rec := httptest.NewRecorder() + adminTestBed.router.ServeHTTP(rec, req) + if rec.Code != http.StatusForbidden { + t.Errorf("want status code %d, got %d", http.StatusForbidden, rec.Code) + } + }) + } +} + +func setupKMSTest(t *testing.T, enableKMS bool) (*adminErasureTestBed, func()) { + adminTestBed, err := prepareAdminErasureTestBed(t.Context()) + if err != nil { + t.Fatal(err) + } + registerKMSRouter(adminTestBed.router) + + if enableKMS { + GlobalKMS = kms.NewStub("default-test-key") + } + + tearDown := func() { + adminTestBed.TearDown() + GlobalKMS = nil + } + return adminTestBed, tearDown +} + +func buildKMSRequest(t *testing.T, method, path, accessKey, secretKey string, query map[string]string) *http.Request { + if len(query) > 0 { + queryVal := url.Values{} + for k, v := range query { + queryVal.Add(k, v) + } + path = path + "?" + queryVal.Encode() + } + + if accessKey == "" && secretKey == "" { + accessKey = globalActiveCred.AccessKey + secretKey = globalActiveCred.SecretKey + } + + req, err := newTestSignedRequestV4(method, path, 0, nil, accessKey, secretKey, nil) + if err != nil { + t.Fatal(err) + } + return req +} + +// setupKMSUser is a test helper that creates a new user with the provided access key and secret key +// and applies the given policy to the user. +func setupKMSUser(t *testing.T, accessKey, secretKey, p string) { + ctx := t.Context() + createUserParams := madmin.AddOrUpdateUserReq{ + SecretKey: secretKey, + Status: madmin.AccountEnabled, + } + _, err := globalIAMSys.CreateUser(ctx, accessKey, createUserParams) + if err != nil { + t.Fatal(err) + } + + testKMSPolicyName := "testKMSPolicy" + if p != "" { + p = `{"Version":"2012-10-17","Statement":[` + p + `]}` + policyData, err := policy.ParseConfig(strings.NewReader(p)) + if err != nil { + t.Fatal(err) + } + _, err = globalIAMSys.SetPolicy(ctx, testKMSPolicyName, *policyData) + if err != nil { + t.Fatal(err) + } + _, err = globalIAMSys.PolicyDBSet(ctx, accessKey, testKMSPolicyName, regUser, false) + if err != nil { + t.Fatal(err) + } + } else { + err = globalIAMSys.DeletePolicy(ctx, testKMSPolicyName, false) + if err != nil { + t.Fatal(err) + } + _, err = globalIAMSys.PolicyDBSet(ctx, accessKey, "", regUser, false) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/cmd/kms-router.go b/cmd/kms-router.go index 98c6c55c10a98..2428f4c1ef9de 100644 --- a/cmd/kms-router.go +++ b/cmd/kms-router.go @@ -57,23 +57,8 @@ func registerKMSRouter(router *mux.Router) { kmsRouter.Methods(http.MethodGet).Path(version + "/version").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSVersionHandler))) // KMS Key APIs kmsRouter.Methods(http.MethodPost).Path(version+"/key/create").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSCreateKeyHandler))).Queries("key-id", "{key-id:.*}") - kmsRouter.Methods(http.MethodPost).Path(version+"/key/import").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSImportKeyHandler))).Queries("key-id", "{key-id:.*}") - kmsRouter.Methods(http.MethodDelete).Path(version+"/key/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeleteKeyHandler))).Queries("key-id", "{key-id:.*}") kmsRouter.Methods(http.MethodGet).Path(version+"/key/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListKeysHandler))).Queries("pattern", "{pattern:.*}") kmsRouter.Methods(http.MethodGet).Path(version + "/key/status").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSKeyStatusHandler))) - - // KMS Policy APIs - kmsRouter.Methods(http.MethodPost).Path(version+"/policy/assign").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSAssignPolicyHandler))).Queries("policy", "{policy:.*}") - kmsRouter.Methods(http.MethodGet).Path(version+"/policy/describe").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribePolicyHandler))).Queries("policy", "{policy:.*}") - kmsRouter.Methods(http.MethodGet).Path(version+"/policy/get").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSGetPolicyHandler))).Queries("policy", "{policy:.*}") - kmsRouter.Methods(http.MethodDelete).Path(version+"/policy/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeletePolicyHandler))).Queries("policy", "{policy:.*}") - kmsRouter.Methods(http.MethodGet).Path(version+"/policy/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListPoliciesHandler))).Queries("pattern", "{pattern:.*}") - - // KMS Identity APIs - kmsRouter.Methods(http.MethodGet).Path(version+"/identity/describe").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribeIdentityHandler))).Queries("identity", "{identity:.*}") - kmsRouter.Methods(http.MethodGet).Path(version + "/identity/describe-self").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDescribeSelfIdentityHandler))) - kmsRouter.Methods(http.MethodDelete).Path(version+"/identity/delete").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSDeleteIdentityHandler))).Queries("identity", "{identity:.*}") - kmsRouter.Methods(http.MethodGet).Path(version+"/identity/list").HandlerFunc(gz(httpTraceAll(kmsAPI.KMSListIdentitiesHandler))).Queries("pattern", "{pattern:.*}") } // If none of the routes match add default error handler routes diff --git a/cmd/last-minute_gen.go b/cmd/last-minute_gen.go index 5a78826bb0538..4e0d66613eac8 100644 --- a/cmd/last-minute_gen.go +++ b/cmd/last-minute_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/last-minute_gen_test.go b/cmd/last-minute_gen_test.go index 39e004647c5ab..de6845515382e 100644 --- a/cmd/last-minute_gen_test.go +++ b/cmd/last-minute_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/leak-detect_test.go b/cmd/leak-detect_test.go index bc37414fda2e7..95b98c4e9549f 100644 --- a/cmd/leak-detect_test.go +++ b/cmd/leak-detect_test.go @@ -139,7 +139,7 @@ func pickRelevantGoroutines() (gs []string) { // get runtime stack buffer. buf := debug.Stack() // runtime stack of go routines will be listed with 2 blank spaces between each of them, so split on "\n\n" . - for _, g := range strings.Split(string(buf), "\n\n") { + for g := range strings.SplitSeq(string(buf), "\n\n") { // Again split on a new line, the first line of the second half contains the info about the go routine. sl := strings.SplitN(g, "\n", 2) if len(sl) != 2 { @@ -159,5 +159,5 @@ func pickRelevantGoroutines() (gs []string) { gs = append(gs, g) } sort.Strings(gs) - return + return gs } diff --git a/cmd/license-update.go b/cmd/license-update.go deleted file mode 100644 index 3cd4c2b2300db..0000000000000 --- a/cmd/license-update.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright (c) 2015-2023 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "fmt" - "math/rand" - "time" - - "github.com/minio/minio/internal/logger" - "github.com/tidwall/gjson" -) - -const ( - licUpdateCycle = 24 * time.Hour * 30 - licRenewPath = "/api/cluster/renew-license" -) - -// initlicenseUpdateJob start the periodic license update job in the background. -func initLicenseUpdateJob(ctx context.Context, objAPI ObjectLayer) { - go func() { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - // Leader node (that successfully acquires the lock inside licenceUpdaterLoop) - // will keep performing the license update. If the leader goes down for some - // reason, the lock will be released and another node will acquire it and - // take over because of this loop. - for { - licenceUpdaterLoop(ctx, objAPI) - - // license update stopped for some reason. - // sleep for some time and try again. - duration := time.Duration(r.Float64() * float64(time.Hour)) - if duration < time.Second { - // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } - time.Sleep(duration) - } - }() -} - -func licenceUpdaterLoop(ctx context.Context, objAPI ObjectLayer) { - ctx, cancel := globalLeaderLock.GetLock(ctx) - defer cancel() - - licenseUpdateTimer := time.NewTimer(licUpdateCycle) - defer licenseUpdateTimer.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-licenseUpdateTimer.C: - - if globalSubnetConfig.Registered() { - performLicenseUpdate(ctx, objAPI) - } - - // Reset the timer for next cycle. - licenseUpdateTimer.Reset(licUpdateCycle) - } - } -} - -func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) { - // the subnet license renewal api renews the license only - // if required e.g. when it is expiring soon - url := globalSubnetConfig.BaseURL + licRenewPath - - resp, err := globalSubnetConfig.Post(url, nil) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("error from %s: %w", url, err)) - return - } - - r := gjson.Parse(resp).Get("license_v2") - if r.Index == 0 { - logger.LogIf(ctx, fmt.Errorf("license not found in response from %s", url)) - return - } - - lic := r.String() - if lic == globalSubnetConfig.License { - // license hasn't changed. - return - } - - kv := "subnet license=" + lic - result, err := setConfigKV(ctx, objectAPI, []byte(kv)) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err)) - return - } - - if result.Dynamic { - if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil { - logger.LogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err)) - return - } - globalNotificationSys.SignalConfigReload(result.SubSys) - } -} diff --git a/cmd/listen-notification-handlers.go b/cmd/listen-notification-handlers.go index c8b72836d8d80..9f3210daf44a7 100644 --- a/cmd/listen-notification-handlers.go +++ b/cmd/listen-notification-handlers.go @@ -26,10 +26,11 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/grid" + xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r *http.Request) { @@ -132,7 +133,7 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r buf.Reset() tmpEvt.Records[0] = ev if err := enc.Encode(tmpEvt); err != nil { - logger.LogOnceIf(ctx, err, "event: Encode failed") + bugLogIf(ctx, err, "event: Encode failed") continue } mergeCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...) @@ -200,19 +201,19 @@ func (api objectAPIHandlers) ListenNotificationHandler(w http.ResponseWriter, r } if len(mergeCh) == 0 { // Flush if nothing is queued - w.(http.Flusher).Flush() + xhttp.Flush(w) } grid.PutByteBuffer(ev) case <-emptyEventTicker: if err := enc.Encode(struct{ Records []event.Event }{}); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-keepAliveTicker: if _, err := w.Write([]byte(" ")); err != nil { return } - w.(http.Flusher).Flush() + xhttp.Flush(w) case <-ctx.Done(): return } diff --git a/cmd/local-locker.go b/cmd/local-locker.go index a53de1de69524..3fd630b9c6835 100644 --- a/cmd/local-locker.go +++ b/cmd/local-locker.go @@ -24,23 +24,32 @@ import ( "fmt" "strconv" "sync" + "sync/atomic" "time" "github.com/minio/minio/internal/dsync" ) +// Reject new lock requests immediately when this many are queued +// for the local lock mutex. +// We do not block unlocking or maintenance, but they add to the count. +// The limit is set to allow for bursty behavior, +// but prevent requests to overload the server completely. +// Rejected clients are expected to retry. +const lockMutexWaitLimit = 1000 + // lockRequesterInfo stores various info from the client for each lock that is requested. type lockRequesterInfo struct { - Name string // name of the resource lock was requested for - Writer bool // Bool whether write or read lock. - UID string // UID to uniquely identify request of client. - Timestamp time.Time // Timestamp set at the time of initialization. - TimeLastRefresh time.Time // Timestamp for last lock refresh. - Source string // Contains line, function and filename requesting the lock. - Group bool // indicates if it was a group lock. - Owner string // Owner represents the UUID of the owner who originally requested the lock. - Quorum int // Quorum represents the quorum required for this lock to be active. - idx int `msg:"-"` // index of the lock in the lockMap. + Name string // name of the resource lock was requested for + Writer bool // Bool whether write or read lock. + UID string // UID to uniquely identify request of client. + Timestamp int64 // Timestamp set at the time of initialization. + TimeLastRefresh int64 // Timestamp for last lock refresh. + Source string // Contains line, function and filename requesting the lock. + Group bool // indicates if it was a group lock. + Owner string // Owner represents the UUID of the owner who originally requested the lock. + Quorum int // Quorum represents the quorum required for this lock to be active. + idx int `msg:"-"` // index of the lock in the lockMap. } // isWriteLock returns whether the lock is a write or read lock. @@ -52,9 +61,25 @@ func isWriteLock(lri []lockRequesterInfo) bool { // //msgp:ignore localLocker type localLocker struct { - mutex sync.Mutex - lockMap map[string][]lockRequesterInfo - lockUID map[string]string // UUID -> resource map. + mutex sync.Mutex + waitMutex atomic.Int32 + lockMap map[string][]lockRequesterInfo + lockUID map[string]string // UUID -> resource map. + + // the following are updated on every cleanup defined in lockValidityDuration + readers atomic.Int32 + writers atomic.Int32 + lastCleanup atomic.Pointer[time.Time] + locksOverloaded atomic.Int64 +} + +// getMutex will lock the mutex. +// Call the returned function to unlock. +func (l *localLocker) getMutex() func() { + l.waitMutex.Add(1) + l.mutex.Lock() + l.waitMutex.Add(-1) + return l.mutex.Unlock } func (l *localLocker) String() string { @@ -76,9 +101,16 @@ func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool return false, fmt.Errorf("internal error: localLocker.Lock called with more than %d resources", maxDeleteList) } - l.mutex.Lock() - defer l.mutex.Unlock() - + // If we have too many waiting, reject this at once. + if l.waitMutex.Load() > lockMutexWaitLimit { + l.locksOverloaded.Add(1) + return false, nil + } + // Wait for mutex + defer l.getMutex()() + if ctx.Err() != nil { + return false, ctx.Err() + } if !l.canTakeLock(args.Resources...) { // Not all locks can be taken on resources, // reject it completely. @@ -87,6 +119,7 @@ func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool // No locks held on the all resources, so claim write // lock on all resources at once. + now := UTCNow() for i, resource := range args.Resources { l.lockMap[resource] = []lockRequesterInfo{ { @@ -95,10 +128,10 @@ func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool Source: args.Source, Owner: args.Owner, UID: args.UID, - Timestamp: UTCNow(), - TimeLastRefresh: UTCNow(), + Timestamp: now.UnixNano(), + TimeLastRefresh: now.UnixNano(), Group: len(args.Resources) > 1, - Quorum: args.Quorum, + Quorum: *args.Quorum, idx: i, }, } @@ -108,7 +141,7 @@ func (l *localLocker) Lock(ctx context.Context, args dsync.LockArgs) (reply bool } func formatUUID(s string, idx int) string { - return s + strconv.Itoa(idx) + return concat(s, strconv.Itoa(idx)) } func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool, err error) { @@ -116,9 +149,7 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool return false, fmt.Errorf("internal error: localLocker.Unlock called with more than %d resources", maxDeleteList) } - l.mutex.Lock() - defer l.mutex.Unlock() - err = nil + defer l.getMutex()() for _, resource := range args.Resources { lri, ok := l.lockMap[resource] @@ -131,7 +162,7 @@ func (l *localLocker) Unlock(_ context.Context, args dsync.LockArgs) (reply bool reply = l.removeEntry(resource, args, &lri) || reply } } - return + return reply, err } // removeEntry based on the uid of the lock message, removes a single entry from the @@ -163,21 +194,31 @@ func (l *localLocker) RLock(ctx context.Context, args dsync.LockArgs) (reply boo if len(args.Resources) != 1 { return false, fmt.Errorf("internal error: localLocker.RLock called with more than one resource") } + // If we have too many waiting, reject this at once. + if l.waitMutex.Load() > lockMutexWaitLimit { + l.locksOverloaded.Add(1) + return false, nil + } - l.mutex.Lock() - defer l.mutex.Unlock() + // Wait for mutex + defer l.getMutex()() + if ctx.Err() != nil { + return false, ctx.Err() + } resource := args.Resources[0] + now := UTCNow() lrInfo := lockRequesterInfo{ Name: resource, Writer: false, Source: args.Source, Owner: args.Owner, UID: args.UID, - Timestamp: UTCNow(), - TimeLastRefresh: UTCNow(), - Quorum: args.Quorum, + Timestamp: now.UnixNano(), + TimeLastRefresh: now.UnixNano(), + Quorum: *args.Quorum, } - if lri, ok := l.lockMap[resource]; ok { + lri, ok := l.lockMap[resource] + if ok { if reply = !isWriteLock(lri); reply { // Unless there is a write lock l.lockMap[resource] = append(l.lockMap[resource], lrInfo) @@ -197,8 +238,7 @@ func (l *localLocker) RUnlock(_ context.Context, args dsync.LockArgs) (reply boo return false, fmt.Errorf("internal error: localLocker.RUnlock called with more than one resource") } - l.mutex.Lock() - defer l.mutex.Unlock() + defer l.getMutex()() var lri []lockRequesterInfo resource := args.Resources[0] @@ -215,35 +255,28 @@ func (l *localLocker) RUnlock(_ context.Context, args dsync.LockArgs) (reply boo } type lockStats struct { - Total int - Writes int - Reads int + Total int + Writes int + Reads int + LockQueue int + LocksAbandoned int + LastCleanup *time.Time } func (l *localLocker) stats() lockStats { - l.mutex.Lock() - defer l.mutex.Unlock() - - st := lockStats{Total: len(l.lockMap)} - for _, v := range l.lockMap { - if len(v) == 0 { - continue - } - entry := v[0] - if entry.Writer { - st.Writes++ - } else { - st.Reads += len(v) - } + return lockStats{ + Total: len(l.lockMap), + Reads: int(l.readers.Load()), + Writes: int(l.writers.Load()), + LockQueue: int(l.waitMutex.Load()), + LastCleanup: l.lastCleanup.Load(), } - return st } type localLockMap map[string][]lockRequesterInfo func (l *localLocker) DupLockMap() localLockMap { - l.mutex.Lock() - defer l.mutex.Unlock() + defer l.getMutex()() lockCopy := make(map[string][]lockRequesterInfo, len(l.lockMap)) for k, v := range l.lockMap { @@ -271,112 +304,115 @@ func (l *localLocker) IsLocal() bool { } func (l *localLocker) ForceUnlock(ctx context.Context, args dsync.LockArgs) (reply bool, err error) { - select { - case <-ctx.Done(): + if ctx.Err() != nil { + return false, ctx.Err() + } + + defer l.getMutex()() + if ctx.Err() != nil { return false, ctx.Err() - default: - l.mutex.Lock() - defer l.mutex.Unlock() - if len(args.UID) == 0 { - for _, resource := range args.Resources { + } + if len(args.UID) == 0 { + for _, resource := range args.Resources { + lris, ok := l.lockMap[resource] + if !ok { + continue + } + // Collect uids, so we don't mutate while we delete + uids := make([]string, 0, len(lris)) + for _, lri := range lris { + uids = append(uids, lri.UID) + } + + // Delete collected uids: + for _, uid := range uids { lris, ok := l.lockMap[resource] if !ok { - continue - } - // Collect uids, so we don't mutate while we delete - uids := make([]string, 0, len(lris)) - for _, lri := range lris { - uids = append(uids, lri.UID) - } - - // Delete collected uids: - for _, uid := range uids { - lris, ok := l.lockMap[resource] - if !ok { - // Just to be safe, delete uuids. - for idx := 0; idx < maxDeleteList; idx++ { - mapID := formatUUID(uid, idx) - if _, ok := l.lockUID[mapID]; !ok { - break - } - delete(l.lockUID, mapID) + // Just to be safe, delete uuids. + for idx := range maxDeleteList { + mapID := formatUUID(uid, idx) + if _, ok := l.lockUID[mapID]; !ok { + break } - continue + delete(l.lockUID, mapID) } - l.removeEntry(resource, dsync.LockArgs{UID: uid}, &lris) + continue } + l.removeEntry(resource, dsync.LockArgs{UID: uid}, &lris) } - return true, nil } + return true, nil + } - idx := 0 - for { - mapID := formatUUID(args.UID, idx) - resource, ok := l.lockUID[mapID] - if !ok { - return idx > 0, nil - } - lris, ok := l.lockMap[resource] - if !ok { - // Unexpected inconsistency, delete. - delete(l.lockUID, mapID) - idx++ - continue - } - reply = true - l.removeEntry(resource, dsync.LockArgs{UID: args.UID}, &lris) + idx := 0 + for { + mapID := formatUUID(args.UID, idx) + resource, ok := l.lockUID[mapID] + if !ok { + return idx > 0, nil + } + lris, ok := l.lockMap[resource] + if !ok { + // Unexpected inconsistency, delete. + delete(l.lockUID, mapID) idx++ + continue } + reply = true + l.removeEntry(resource, dsync.LockArgs{UID: args.UID}, &lris) + idx++ } } func (l *localLocker) Refresh(ctx context.Context, args dsync.LockArgs) (refreshed bool, err error) { - select { - case <-ctx.Done(): + if ctx.Err() != nil { return false, ctx.Err() - default: - l.mutex.Lock() - defer l.mutex.Unlock() + } - // Check whether uid is still active. - resource, ok := l.lockUID[formatUUID(args.UID, 0)] + defer l.getMutex()() + if ctx.Err() != nil { + return false, ctx.Err() + } + + // Check whether uid is still active. + resource, ok := l.lockUID[formatUUID(args.UID, 0)] + if !ok { + return false, nil + } + idx := 0 + for { + lris, ok := l.lockMap[resource] if !ok { - return false, nil + // Inconsistent. Delete UID. + delete(l.lockUID, formatUUID(args.UID, idx)) + return idx > 0, nil } - idx := 0 - for { - lris, ok := l.lockMap[resource] - if !ok { - // Inconsistent. Delete UID. - delete(l.lockUID, formatUUID(args.UID, idx)) - return idx > 0, nil - } - for i := range lris { - if lris[i].UID == args.UID { - lris[i].TimeLastRefresh = UTCNow() - } - } - idx++ - resource, ok = l.lockUID[formatUUID(args.UID, idx)] - if !ok { - // No more resources for UID, but we did update at least one. - return true, nil + now := UTCNow() + for i := range lris { + if lris[i].UID == args.UID { + lris[i].TimeLastRefresh = now.UnixNano() } } + idx++ + resource, ok = l.lockUID[formatUUID(args.UID, idx)] + if !ok { + // No more resources for UID, but we did update at least one. + return true, nil + } } } // Similar to removeEntry but only removes an entry only if the lock entry exists in map. // Caller must hold 'l.mutex' lock. func (l *localLocker) expireOldLocks(interval time.Duration) { - l.mutex.Lock() - defer l.mutex.Unlock() + defer l.getMutex()() + var readers, writers int32 for k, lris := range l.lockMap { modified := false for i := 0; i < len(lris); { lri := &lris[i] - if time.Since(lri.TimeLastRefresh) > interval { + if time.Since(time.Unix(0, lri.TimeLastRefresh)) > interval { delete(l.lockUID, formatUUID(lri.UID, lri.idx)) if len(lris) == 1 { // Remove the write lock. @@ -389,6 +425,11 @@ func (l *localLocker) expireOldLocks(interval time.Duration) { lris = append(lris[:i], lris[i+1:]...) // Check same i } else { + if lri.Writer { + writers++ + } else { + readers++ + } // Move to next i++ } @@ -397,6 +438,10 @@ func (l *localLocker) expireOldLocks(interval time.Duration) { l.lockMap[k] = lris } } + t := time.Now() + l.lastCleanup.Store(&t) + l.readers.Store(readers) + l.writers.Store(writers) } func newLocker() *localLocker { diff --git a/cmd/local-locker_gen.go b/cmd/local-locker_gen.go index 6ba4a893e45f2..41bb92543cd34 100644 --- a/cmd/local-locker_gen.go +++ b/cmd/local-locker_gen.go @@ -1,8 +1,10 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( + "time" + "github.com/tinylib/msgp/msgp" ) @@ -17,19 +19,19 @@ func (z *localLockMap) DecodeMsg(dc *msgp.Reader) (err error) { if (*z) == nil { (*z) = make(localLockMap, zb0004) } else if len((*z)) > 0 { - for key := range *z { - delete((*z), key) - } + clear((*z)) } + var field []byte + _ = field for zb0004 > 0 { zb0004-- var zb0001 string - var zb0002 []lockRequesterInfo zb0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err) return } + var zb0002 []lockRequesterInfo var zb0005 uint32 zb0005, err = dc.ReadArrayHeader() if err != nil { @@ -111,14 +113,14 @@ func (z *localLockMap) UnmarshalMsg(bts []byte) (o []byte, err error) { if (*z) == nil { (*z) = make(localLockMap, zb0004) } else if len((*z)) > 0 { - for key := range *z { - delete((*z), key) - } + clear((*z)) } + var field []byte + _ = field for zb0004 > 0 { - var zb0001 string var zb0002 []lockRequesterInfo zb0004-- + var zb0001 string zb0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err) @@ -200,13 +202,13 @@ func (z *lockRequesterInfo) DecodeMsg(dc *msgp.Reader) (err error) { return } case "Timestamp": - z.Timestamp, err = dc.ReadTime() + z.Timestamp, err = dc.ReadInt64() if err != nil { err = msgp.WrapError(err, "Timestamp") return } case "TimeLastRefresh": - z.TimeLastRefresh, err = dc.ReadTime() + z.TimeLastRefresh, err = dc.ReadInt64() if err != nil { err = msgp.WrapError(err, "TimeLastRefresh") return @@ -284,7 +286,7 @@ func (z *lockRequesterInfo) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteTime(z.Timestamp) + err = en.WriteInt64(z.Timestamp) if err != nil { err = msgp.WrapError(err, "Timestamp") return @@ -294,7 +296,7 @@ func (z *lockRequesterInfo) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteTime(z.TimeLastRefresh) + err = en.WriteInt64(z.TimeLastRefresh) if err != nil { err = msgp.WrapError(err, "TimeLastRefresh") return @@ -357,10 +359,10 @@ func (z *lockRequesterInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendString(o, z.UID) // string "Timestamp" o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) - o = msgp.AppendTime(o, z.Timestamp) + o = msgp.AppendInt64(o, z.Timestamp) // string "TimeLastRefresh" o = append(o, 0xaf, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68) - o = msgp.AppendTime(o, z.TimeLastRefresh) + o = msgp.AppendInt64(o, z.TimeLastRefresh) // string "Source" o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) o = msgp.AppendString(o, z.Source) @@ -413,13 +415,13 @@ func (z *lockRequesterInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { return } case "Timestamp": - z.Timestamp, bts, err = msgp.ReadTimeBytes(bts) + z.Timestamp, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "Timestamp") return } case "TimeLastRefresh": - z.TimeLastRefresh, bts, err = msgp.ReadTimeBytes(bts) + z.TimeLastRefresh, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "TimeLastRefresh") return @@ -462,7 +464,7 @@ func (z *lockRequesterInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *lockRequesterInfo) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 7 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.UID) + 10 + msgp.TimeSize + 16 + msgp.TimeSize + 7 + msgp.StringPrefixSize + len(z.Source) + 6 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.IntSize + s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 7 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.UID) + 10 + msgp.Int64Size + 16 + msgp.Int64Size + 7 + msgp.StringPrefixSize + len(z.Source) + 6 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.IntSize return } @@ -502,6 +504,36 @@ func (z *lockStats) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Reads") return } + case "LockQueue": + z.LockQueue, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "LockQueue") + return + } + case "LocksAbandoned": + z.LocksAbandoned, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "LocksAbandoned") + return + } + case "LastCleanup": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "LastCleanup") + return + } + z.LastCleanup = nil + } else { + if z.LastCleanup == nil { + z.LastCleanup = new(time.Time) + } + *z.LastCleanup, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "LastCleanup") + return + } + } default: err = dc.Skip() if err != nil { @@ -514,10 +546,10 @@ func (z *lockStats) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z lockStats) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 +func (z *lockStats) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 6 // write "Total" - err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c) + err = en.Append(0x86, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c) if err != nil { return } @@ -546,15 +578,52 @@ func (z lockStats) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Reads") return } + // write "LockQueue" + err = en.Append(0xa9, 0x4c, 0x6f, 0x63, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65) + if err != nil { + return + } + err = en.WriteInt(z.LockQueue) + if err != nil { + err = msgp.WrapError(err, "LockQueue") + return + } + // write "LocksAbandoned" + err = en.Append(0xae, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x41, 0x62, 0x61, 0x6e, 0x64, 0x6f, 0x6e, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteInt(z.LocksAbandoned) + if err != nil { + err = msgp.WrapError(err, "LocksAbandoned") + return + } + // write "LastCleanup" + err = en.Append(0xab, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70) + if err != nil { + return + } + if z.LastCleanup == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteTime(*z.LastCleanup) + if err != nil { + err = msgp.WrapError(err, "LastCleanup") + return + } + } return } // MarshalMsg implements msgp.Marshaler -func (z lockStats) MarshalMsg(b []byte) (o []byte, err error) { +func (z *lockStats) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 3 + // map header, size 6 // string "Total" - o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c) + o = append(o, 0x86, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c) o = msgp.AppendInt(o, z.Total) // string "Writes" o = append(o, 0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73) @@ -562,6 +631,19 @@ func (z lockStats) MarshalMsg(b []byte) (o []byte, err error) { // string "Reads" o = append(o, 0xa5, 0x52, 0x65, 0x61, 0x64, 0x73) o = msgp.AppendInt(o, z.Reads) + // string "LockQueue" + o = append(o, 0xa9, 0x4c, 0x6f, 0x63, 0x6b, 0x51, 0x75, 0x65, 0x75, 0x65) + o = msgp.AppendInt(o, z.LockQueue) + // string "LocksAbandoned" + o = append(o, 0xae, 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x41, 0x62, 0x61, 0x6e, 0x64, 0x6f, 0x6e, 0x65, 0x64) + o = msgp.AppendInt(o, z.LocksAbandoned) + // string "LastCleanup" + o = append(o, 0xab, 0x4c, 0x61, 0x73, 0x74, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x75, 0x70) + if z.LastCleanup == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendTime(o, *z.LastCleanup) + } return } @@ -601,6 +683,35 @@ func (z *lockStats) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Reads") return } + case "LockQueue": + z.LockQueue, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LockQueue") + return + } + case "LocksAbandoned": + z.LocksAbandoned, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LocksAbandoned") + return + } + case "LastCleanup": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.LastCleanup = nil + } else { + if z.LastCleanup == nil { + z.LastCleanup = new(time.Time) + } + *z.LastCleanup, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "LastCleanup") + return + } + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -614,7 +725,12 @@ func (z *lockStats) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z lockStats) Msgsize() (s int) { - s = 1 + 6 + msgp.IntSize + 7 + msgp.IntSize + 6 + msgp.IntSize +func (z *lockStats) Msgsize() (s int) { + s = 1 + 6 + msgp.IntSize + 7 + msgp.IntSize + 6 + msgp.IntSize + 10 + msgp.IntSize + 15 + msgp.IntSize + 12 + if z.LastCleanup == nil { + s += msgp.NilSize + } else { + s += msgp.TimeSize + } return } diff --git a/cmd/local-locker_gen_test.go b/cmd/local-locker_gen_test.go index 983b33bcc457b..fe0fee2d5792d 100644 --- a/cmd/local-locker_gen_test.go +++ b/cmd/local-locker_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/local-locker_test.go b/cmd/local-locker_test.go index 06d7d68643cad..674cf07b63263 100644 --- a/cmd/local-locker_test.go +++ b/cmd/local-locker_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "encoding/hex" "fmt" "math/rand" @@ -32,15 +31,16 @@ import ( func TestLocalLockerExpire(t *testing.T) { wResources := make([]string, 1000) rResources := make([]string, 1000) + quorum := 0 l := newLocker() - ctx := context.Background() + ctx := t.Context() for i := range wResources { arg := dsync.LockArgs{ UID: mustGetUUID(), Resources: []string{mustGetUUID()}, Source: t.Name(), Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.Lock(ctx, arg) if err != nil { @@ -58,14 +58,14 @@ func TestLocalLockerExpire(t *testing.T) { Resources: []string{name}, Source: t.Name(), Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.RLock(ctx, arg) if err != nil { t.Fatal(err) } if !ok { - t.Fatal("did not get write lock") + t.Fatal("did not get read lock") } // RLock twice ok, err = l.RLock(ctx, arg) @@ -111,7 +111,8 @@ func TestLocalLockerUnlock(t *testing.T) { wUIDs := make([]string, n) rUIDs := make([]string, 0, n*2) l := newLocker() - ctx := context.Background() + ctx := t.Context() + quorum := 0 for i := range wResources { names := [m]string{} for j := range names { @@ -123,7 +124,7 @@ func TestLocalLockerUnlock(t *testing.T) { Resources: names[:], Source: t.Name(), Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.Lock(ctx, arg) if err != nil { @@ -134,7 +135,6 @@ func TestLocalLockerUnlock(t *testing.T) { } wResources[i] = names wUIDs[i] = uid - } for i := range rResources { name := mustGetUUID() @@ -144,7 +144,7 @@ func TestLocalLockerUnlock(t *testing.T) { Resources: []string{name}, Source: t.Name(), Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.RLock(ctx, arg) if err != nil { @@ -183,7 +183,7 @@ func TestLocalLockerUnlock(t *testing.T) { Resources: []string{name}, Source: t.Name(), Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.RUnlock(ctx, arg) if err != nil { @@ -203,6 +203,7 @@ func TestLocalLockerUnlock(t *testing.T) { if len(l.lockUID) != len(rResources)+len(wResources)*m { t.Fatalf("lockUID len, got %d, want %d + %d", len(l.lockUID), len(rResources), len(wResources)*m) } + // RUnlock again, different uids for i, name := range rResources { arg := dsync.LockArgs{ @@ -210,7 +211,7 @@ func TestLocalLockerUnlock(t *testing.T) { Resources: []string{name}, Source: "minio", Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.RUnlock(ctx, arg) if err != nil { @@ -238,7 +239,7 @@ func TestLocalLockerUnlock(t *testing.T) { Resources: names[:], Source: "minio", Owner: "owner", - Quorum: 0, + Quorum: &quorum, } ok, err := l.Unlock(ctx, arg) if err != nil { @@ -261,6 +262,7 @@ func TestLocalLockerUnlock(t *testing.T) { func Test_localLocker_expireOldLocksExpire(t *testing.T) { rng := rand.New(rand.NewSource(0)) + quorum := 0 // Numbers of unique locks for _, locks := range []int{100, 1000, 1e6} { if testing.Short() && locks > 100 { @@ -277,19 +279,19 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) { } t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { l := newLocker() - for i := 0; i < locks; i++ { + for range locks { var tmp [16]byte rng.Read(tmp[:]) res := []string{hex.EncodeToString(tmp[:])} - for i := 0; i < readers; i++ { + for range readers { rng.Read(tmp[:]) - ok, err := l.RLock(context.Background(), dsync.LockArgs{ + ok, err := l.RLock(t.Context(), dsync.LockArgs{ UID: uuid.NewString(), Resources: res, Source: hex.EncodeToString(tmp[:8]), Owner: hex.EncodeToString(tmp[8:]), - Quorum: 0, + Quorum: &quorum, }) if !ok || err != nil { t.Fatal("failed:", err, ok) @@ -311,7 +313,7 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) { for _, v := range l.lockMap { for i := range v { if rng.Intn(2) == 0 { - v[i].TimeLastRefresh = expired + v[i].TimeLastRefresh = expired.UnixNano() } } } @@ -347,6 +349,7 @@ func Test_localLocker_expireOldLocksExpire(t *testing.T) { func Test_localLocker_RUnlock(t *testing.T) { rng := rand.New(rand.NewSource(0)) + quorum := 0 // Numbers of unique locks for _, locks := range []int{1, 100, 1000, 1e6} { if testing.Short() && locks > 100 { @@ -363,19 +366,19 @@ func Test_localLocker_RUnlock(t *testing.T) { } t.Run(fmt.Sprintf("%d-read", readers), func(t *testing.T) { l := newLocker() - for i := 0; i < locks; i++ { + for range locks { var tmp [16]byte rng.Read(tmp[:]) res := []string{hex.EncodeToString(tmp[:])} - for i := 0; i < readers; i++ { + for range readers { rng.Read(tmp[:]) - ok, err := l.RLock(context.Background(), dsync.LockArgs{ + ok, err := l.RLock(t.Context(), dsync.LockArgs{ UID: uuid.NewString(), Resources: res, Source: hex.EncodeToString(tmp[:8]), Owner: hex.EncodeToString(tmp[8:]), - Quorum: 0, + Quorum: &quorum, }) if !ok || err != nil { t.Fatal("failed:", err, ok) @@ -394,7 +397,7 @@ func Test_localLocker_RUnlock(t *testing.T) { } start := time.Now() for _, lock := range toUnLock { - ok, err := l.ForceUnlock(context.Background(), lock) + ok, err := l.ForceUnlock(t.Context(), lock) if err != nil || !ok { t.Fatal(err) } @@ -419,7 +422,7 @@ func Test_localLocker_RUnlock(t *testing.T) { } start = time.Now() for _, lock := range toUnLock { - ok, err := l.RUnlock(context.TODO(), lock) + ok, err := l.RUnlock(t.Context(), lock) if err != nil || !ok { t.Fatal(err) } diff --git a/cmd/lock-rest-client.go b/cmd/lock-rest-client.go index bf162d124c485..ff8e2c35a4904 100644 --- a/cmd/lock-rest-client.go +++ b/cmd/lock-rest-client.go @@ -107,5 +107,5 @@ func newLockAPI(endpoint Endpoint) dsync.NetLocker { // Returns a lock rest client. func newlockRESTClient(ep Endpoint) *lockRESTClient { - return &lockRESTClient{globalGrid.Load().Connection(ep.GridHost())} + return &lockRESTClient{globalLockGrid.Load().Connection(ep.GridHost())} } diff --git a/cmd/lock-rest-client_test.go b/cmd/lock-rest-client_test.go index 10beb12a0328e..cdfadc6a244d1 100644 --- a/cmd/lock-rest-client_test.go +++ b/cmd/lock-rest-client_test.go @@ -37,9 +37,9 @@ func TestLockRESTlient(t *testing.T) { } endpointLocal.IsLocal = true - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() - err = initGlobalGrid(ctx, []PoolEndpoints{{Endpoints: Endpoints{endpoint, endpointLocal}}}) + err = initGlobalLockGrid(ctx, []PoolEndpoints{{Endpoints: Endpoints{endpoint, endpointLocal}}}) if err != nil { t.Fatal(err) } @@ -50,22 +50,22 @@ func TestLockRESTlient(t *testing.T) { } // Attempt all calls. - _, err = lkClient.RLock(context.Background(), dsync.LockArgs{}) + _, err = lkClient.RLock(t.Context(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for Rlock to fail") } - _, err = lkClient.Lock(context.Background(), dsync.LockArgs{}) + _, err = lkClient.Lock(t.Context(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for Lock to fail") } - _, err = lkClient.RUnlock(context.Background(), dsync.LockArgs{}) + _, err = lkClient.RUnlock(t.Context(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for RUnlock to fail") } - _, err = lkClient.Unlock(context.Background(), dsync.LockArgs{}) + _, err = lkClient.Unlock(t.Context(), dsync.LockArgs{}) if err == nil { t.Fatal("Expected for Unlock to fail") } diff --git a/cmd/lock-rest-server-common_test.go b/cmd/lock-rest-server-common_test.go index 1ef8845324aeb..21b7fdcd8ed46 100644 --- a/cmd/lock-rest-server-common_test.go +++ b/cmd/lock-rest-server-common_test.go @@ -44,7 +44,7 @@ func createLockTestServer(ctx context.Context, t *testing.T) (string, *lockRESTS }, } creds := globalActiveCred - token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "") + token, err := authenticateNode(creds.AccessKey, creds.SecretKey) if err != nil { t.Fatal(err) } @@ -53,7 +53,7 @@ func createLockTestServer(ctx context.Context, t *testing.T) (string, *lockRESTS // Test function to remove lock entries from map based on name & uid combination func TestLockRpcServerRemoveEntry(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() testPath, locker, _ := createLockTestServer(ctx, t) @@ -63,15 +63,15 @@ func TestLockRpcServerRemoveEntry(t *testing.T) { Owner: "owner", Writer: true, UID: "0123-4567", - Timestamp: UTCNow(), - TimeLastRefresh: UTCNow(), + Timestamp: UTCNow().UnixNano(), + TimeLastRefresh: UTCNow().UnixNano(), } lockRequesterInfo2 := lockRequesterInfo{ Owner: "owner", Writer: true, UID: "89ab-cdef", - Timestamp: UTCNow(), - TimeLastRefresh: UTCNow(), + Timestamp: UTCNow().UnixNano(), + TimeLastRefresh: UTCNow().UnixNano(), } locker.ll.lockMap["name"] = []lockRequesterInfo{ diff --git a/cmd/lock-rest-server.go b/cmd/lock-rest-server.go index a0adea595bc4b..79e3dfdb93058 100644 --- a/cmd/lock-rest-server.go +++ b/cmd/lock-rest-server.go @@ -33,8 +33,12 @@ type lockRESTServer struct { // RefreshHandler - refresh the current lock func (l *lockRESTServer) RefreshHandler(args *dsync.LockArgs) (*dsync.LockResp, *grid.RemoteErr) { + // Add a timeout similar to what we expect upstream. + ctx, cancel := context.WithTimeout(context.Background(), dsync.DefaultTimeouts.RefreshCall) + defer cancel() + resp := lockRPCRefresh.NewResponse() - refreshed, err := l.ll.Refresh(context.Background(), *args) + refreshed, err := l.ll.Refresh(ctx, *args) if err != nil { return l.makeResp(resp, err) } @@ -46,8 +50,11 @@ func (l *lockRESTServer) RefreshHandler(args *dsync.LockArgs) (*dsync.LockResp, // LockHandler - Acquires a lock. func (l *lockRESTServer) LockHandler(args *dsync.LockArgs) (*dsync.LockResp, *grid.RemoteErr) { + // Add a timeout similar to what we expect upstream. + ctx, cancel := context.WithTimeout(context.Background(), dsync.DefaultTimeouts.Acquire) + defer cancel() resp := lockRPCLock.NewResponse() - success, err := l.ll.Lock(context.Background(), *args) + success, err := l.ll.Lock(ctx, *args) if err == nil && !success { return l.makeResp(resp, errLockConflict) } @@ -65,8 +72,11 @@ func (l *lockRESTServer) UnlockHandler(args *dsync.LockArgs) (*dsync.LockResp, * // RLockHandler - Acquires an RLock. func (l *lockRESTServer) RLockHandler(args *dsync.LockArgs) (*dsync.LockResp, *grid.RemoteErr) { + // Add a timeout similar to what we expect upstream. + ctx, cancel := context.WithTimeout(context.Background(), dsync.DefaultTimeouts.Acquire) + defer cancel() resp := lockRPCRLock.NewResponse() - success, err := l.ll.RLock(context.Background(), *args) + success, err := l.ll.RLock(ctx, *args) if err == nil && !success { err = errLockConflict } @@ -111,17 +121,17 @@ func newLockHandler(h grid.HandlerID) *grid.SingleHandler[*dsync.LockArgs, *dsyn } // registerLockRESTHandlers - register lock rest router. -func registerLockRESTHandlers() { +func registerLockRESTHandlers(gm *grid.Manager) { lockServer := &lockRESTServer{ ll: newLocker(), } - logger.FatalIf(lockRPCForceUnlock.Register(globalGrid.Load(), lockServer.ForceUnlockHandler), "unable to register handler") - logger.FatalIf(lockRPCRefresh.Register(globalGrid.Load(), lockServer.RefreshHandler), "unable to register handler") - logger.FatalIf(lockRPCLock.Register(globalGrid.Load(), lockServer.LockHandler), "unable to register handler") - logger.FatalIf(lockRPCUnlock.Register(globalGrid.Load(), lockServer.UnlockHandler), "unable to register handler") - logger.FatalIf(lockRPCRLock.Register(globalGrid.Load(), lockServer.RLockHandler), "unable to register handler") - logger.FatalIf(lockRPCRUnlock.Register(globalGrid.Load(), lockServer.RUnlockHandler), "unable to register handler") + logger.FatalIf(lockRPCForceUnlock.Register(gm, lockServer.ForceUnlockHandler), "unable to register handler") + logger.FatalIf(lockRPCRefresh.Register(gm, lockServer.RefreshHandler), "unable to register handler") + logger.FatalIf(lockRPCLock.Register(gm, lockServer.LockHandler), "unable to register handler") + logger.FatalIf(lockRPCUnlock.Register(gm, lockServer.UnlockHandler), "unable to register handler") + logger.FatalIf(lockRPCRLock.Register(gm, lockServer.RLockHandler), "unable to register handler") + logger.FatalIf(lockRPCRUnlock.Register(gm, lockServer.RUnlockHandler), "unable to register handler") globalLockServer = lockServer.ll diff --git a/cmd/logging.go b/cmd/logging.go new file mode 100644 index 0000000000000..9c24ae22dfebf --- /dev/null +++ b/cmd/logging.go @@ -0,0 +1,230 @@ +package cmd + +import ( + "context" + "errors" + + "github.com/minio/minio/internal/grid" + "github.com/minio/minio/internal/logger" +) + +func proxyLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "proxy", err, errKind...) +} + +func replLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "replication", err, errKind...) +} + +func replLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "replication", err, id, errKind...) +} + +func iamLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "iam", err, id, errKind...) +} + +func iamLogIf(ctx context.Context, err error, errKind ...any) { + if !errors.Is(err, grid.ErrDisconnected) { + logger.LogIf(ctx, "iam", err, errKind...) + } +} + +func iamLogEvent(ctx context.Context, msg string, args ...any) { + logger.Event(ctx, "iam", msg, args...) +} + +func rebalanceLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "rebalance", err, errKind...) +} + +func rebalanceLogEvent(ctx context.Context, msg string, args ...any) { + logger.Event(ctx, "rebalance", msg, args...) +} + +func adminLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "admin", err, errKind...) +} + +func authNLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "authN", err, errKind...) +} + +func authZLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "authZ", err, errKind...) +} + +func peersLogIf(ctx context.Context, err error, errKind ...any) { + if !errors.Is(err, grid.ErrDisconnected) { + logger.LogIf(ctx, "peers", err, errKind...) + } +} + +func peersLogAlwaysIf(ctx context.Context, err error, errKind ...any) { + if !errors.Is(err, grid.ErrDisconnected) { + logger.LogAlwaysIf(ctx, "peers", err, errKind...) + } +} + +func peersLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + if !errors.Is(err, grid.ErrDisconnected) { + logger.LogOnceIf(ctx, "peers", err, id, errKind...) + } +} + +func bugLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "internal", err, errKind...) +} + +func healingLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "healing", err, errKind...) +} + +func healingLogEvent(ctx context.Context, msg string, args ...any) { + logger.Event(ctx, "healing", msg, args...) +} + +func healingLogOnceIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "healing", err, errKind...) +} + +func batchLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "batch", err, errKind...) +} + +func batchLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "batch", err, id, errKind...) +} + +func bootLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "bootstrap", err, errKind...) +} + +func bootLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "bootstrap", err, id, errKind...) +} + +func dnsLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "dns", err, errKind...) +} + +func internalLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "internal", err, errKind...) +} + +func internalLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "internal", err, id, errKind...) +} + +func transitionLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "transition", err, errKind...) +} + +func configLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "config", err, errKind...) +} + +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "config", err, id, errKind...) +} + +func configLogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceConsoleIf(ctx, "config", err, id, errKind...) +} + +func scannerLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "scanner", err, errKind...) +} + +func scannerLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "scanner", err, id, errKind...) +} + +func ilmLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "ilm", err, errKind...) +} + +func ilmLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "ilm", err, id, errKind...) +} + +func encLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "encryption", err, errKind...) +} + +func encLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "encryption", err, id, errKind...) +} + +func storageLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "storage", err, errKind...) +} + +func storageLogAlwaysIf(ctx context.Context, err error, errKind ...any) { + logger.LogAlwaysIf(ctx, "storage", err, errKind...) +} + +func storageLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "storage", err, id, errKind...) +} + +func decomLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "decom", err, errKind...) +} + +func decomLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "decom", err, id, errKind...) +} + +func decomLogEvent(ctx context.Context, msg string, args ...any) { + logger.Event(ctx, "decom", msg, args...) +} + +func etcdLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "etcd", err, errKind...) +} + +func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "etcd", err, id, errKind...) +} + +func metricsLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "metrics", err, errKind...) +} + +func s3LogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "s3", err, errKind...) +} + +func sftpLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "sftp", err, id, errKind...) +} + +func shutdownLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "shutdown", err, errKind...) +} + +func stsLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "sts", err, errKind...) +} + +func tierLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "tier", err, errKind...) +} + +func kmsLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "kms", err, errKind...) +} + +// KMSLogger permits access to kms module specific logging +type KMSLogger struct{} + +// LogOnceIf is the implementation of LogOnceIf, accessible using the Logger interface +func (l KMSLogger) LogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "kms", err, id, errKind...) +} + +// LogIf is the implementation of LogIf, accessible using the Logger interface +func (l KMSLogger) LogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "kms", err, errKind...) +} diff --git a/cmd/main.go b/cmd/main.go index 06f24ff3916e7..53249af42e4d8 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -32,10 +32,10 @@ import ( "github.com/minio/cli" "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/env" - "github.com/minio/pkg/v2/trie" - "github.com/minio/pkg/v2/words" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/env" + "github.com/minio/pkg/v3/trie" + "github.com/minio/pkg/v3/words" ) // GlobalFlags - global flags for minio. @@ -107,6 +107,11 @@ func newApp(name string) *cli.App { // registerCommand registers a cli command. registerCommand := func(command cli.Command) { + // avoid registering commands which are not being built (via + // go:build tags) + if command.Name == "" { + return + } commands = append(commands, command) commandsTree.Insert(command.Name) } @@ -134,7 +139,7 @@ func newApp(name string) *cli.App { // Register all commands. registerCommand(serverCmd) - registerCommand(gatewayCmd) // hidden kept for guiding users. + registerCommand(fmtGenCmd) // Set up app. cli.HelpFlag = cli.BoolFlag{ @@ -181,7 +186,7 @@ func versionBanner(c *cli.Context) io.Reader { banner := &strings.Builder{} fmt.Fprintln(banner, color.Bold("%s version %s (commit-id=%s)", c.App.Name, c.App.Version, CommitID)) fmt.Fprintln(banner, color.Blue("Runtime:")+color.Bold(" %s %s/%s", runtime.Version(), runtime.GOOS, runtime.GOARCH)) - fmt.Fprintln(banner, color.Blue("License:")+color.Bold(" GNU AGPLv3 ")) + fmt.Fprintln(banner, color.Blue("License:")+color.Bold(" GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html")) fmt.Fprintln(banner, color.Blue("Copyright:")+color.Bold(" 2015-%s MinIO, Inc.", CopyrightYear)) return strings.NewReader(banner.String()) } @@ -190,12 +195,14 @@ func printMinIOVersion(c *cli.Context) { io.Copy(c.App.Writer, versionBanner(c)) } +var debugNoExit = env.Get("_MINIO_DEBUG_NO_EXIT", "") != "" + // Main main for minio server. func Main(args []string) { // Set the minio app name. appName := filepath.Base(args[0]) - if env.Get("_MINIO_DEBUG_NO_EXIT", "") != "" { + if debugNoExit { freeze := func(_ int) { // Infinite blocking op <-make(chan struct{}) diff --git a/cmd/metacache-bucket.go b/cmd/metacache-bucket.go index c8f334995b16b..4df23d8253af6 100644 --- a/cmd/metacache-bucket.go +++ b/cmd/metacache-bucket.go @@ -20,13 +20,14 @@ package cmd import ( "context" "errors" + "maps" "runtime/debug" "sort" "sync" "time" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" + "github.com/minio/pkg/v3/console" ) // a bucketMetacache keeps track of all caches generated @@ -70,7 +71,7 @@ func newBucketMetacache(bucket string, cleanup bool) *bucketMetacache { } } -func (b *bucketMetacache) debugf(format string, data ...interface{}) { +func (b *bucketMetacache) debugf(format string, data ...any) { if serverDebugLog { console.Debugf(format+"\n", data...) } @@ -195,9 +196,7 @@ func (b *bucketMetacache) cloneCaches() (map[string]metacache, map[string][]stri b.mu.RLock() defer b.mu.RUnlock() dst := make(map[string]metacache, len(b.caches)) - for k, v := range b.caches { - dst[k] = v - } + maps.Copy(dst, b.caches) // Copy indexes dst2 := make(map[string][]string, len(b.cachesRoot)) for k, v := range b.cachesRoot { @@ -221,7 +220,7 @@ func (b *bucketMetacache) deleteAll() { ez, ok := objAPI.(deleteAllStorager) if !ok { - logger.LogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'")) + bugLogIf(ctx, errors.New("bucketMetacache: expected objAPI to be 'deleteAllStorager'")) return } diff --git a/cmd/metacache-bucket_test.go b/cmd/metacache-bucket_test.go index 6676201b9527e..768d78538ab78 100644 --- a/cmd/metacache-bucket_test.go +++ b/cmd/metacache-bucket_test.go @@ -33,7 +33,7 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) { for i := range pathNames[:] { pathNames[i] = fmt.Sprintf("prefix/%d", i) } - for i := 0; i < elements; i++ { + for i := range elements { bm.findCache(listPathOptions{ ID: mustGetUUID(), Bucket: "", @@ -49,8 +49,8 @@ func Benchmark_bucketMetacache_findCache(b *testing.B) { }) } b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for i := 0; b.Loop(); i++ { bm.findCache(listPathOptions{ ID: mustGetUUID(), Bucket: "", diff --git a/cmd/metacache-entries.go b/cmd/metacache-entries.go index 22598d20b17e2..69c5e835c2e44 100644 --- a/cmd/metacache-entries.go +++ b/cmd/metacache-entries.go @@ -20,15 +20,13 @@ package cmd import ( "bytes" "context" - "errors" "os" "path" "sort" "strings" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" + "github.com/minio/pkg/v3/console" ) // metaCacheEntry is an object or a directory within an unknown bucket. @@ -121,6 +119,16 @@ func (e *metaCacheEntry) matches(other *metaCacheEntry, strict bool) (prefer *me for i, eVer := range eVers.versions { oVer := oVers.versions[i] if eVer.header != oVer.header { + if eVer.header.hasEC() != oVer.header.hasEC() { + // One version has EC and the other doesn't - may have been written later. + // Compare without considering EC. + a, b := eVer.header, oVer.header + a.EcN, a.EcM = 0, 0 + b.EcN, b.EcM = 0, 0 + if a == b { + continue + } + } if !strict && eVer.header.matchesNotStrict(oVer.header) { if prefer == nil { if eVer.header.sortsBefore(oVer.header) { @@ -249,9 +257,9 @@ func (e *metaCacheEntry) fileInfo(bucket string) (FileInfo, error) { ModTime: timeSentinel1970, }, nil } - return e.cached.ToFileInfo(bucket, e.name, "", false, false) + return e.cached.ToFileInfo(bucket, e.name, "", false, true) } - return getFileInfo(e.metadata, bucket, e.name, "", false, false) + return getFileInfo(e.metadata, bucket, e.name, "", fileInfoOpts{}) } // xlmeta returns the decoded metadata. @@ -292,7 +300,7 @@ func (e *metaCacheEntry) fileInfoVersions(bucket string) (FileInfoVersions, erro }, nil } // Too small gains to reuse cache here. - return getFileInfoVersions(e.metadata, bucket, e.name, false) + return getFileInfoVersions(e.metadata, bucket, e.name, true) } // metaCacheEntries is a slice of metacache entries. @@ -376,9 +384,6 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa // shallow decode. xl, err := entry.xlmeta() if err != nil { - if !errors.Is(err, errFileNotFound) { - logger.LogIf(GlobalContext, err) - } continue } objsValid++ @@ -437,7 +442,7 @@ func (m metaCacheEntries) resolve(r *metadataResolutionParams) (selected *metaCa var err error selected.metadata, err = selected.cached.AppendTo(metaDataPoolGet()) if err != nil { - logger.LogIf(context.Background(), err) + bugLogIf(context.Background(), err) return nil, false } return selected, true @@ -473,6 +478,8 @@ type metaCacheEntriesSorted struct { listID string // Reuse buffers reuse bool + // Contain the last skipped object after an ILM expiry evaluation + lastSkippedEntry string } // shallowClone will create a shallow clone of the array objects, @@ -525,6 +532,9 @@ func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, aft } for _, version := range fiVersions { + if !version.VersionPurgeStatus().Empty() { + continue + } versioned := vcfg != nil && vcfg.Versioned(entry.name) versions = append(versions, version.ToObjectInfo(bucket, entry.name, versioned)) } @@ -586,7 +596,7 @@ func (m *metaCacheEntriesSorted) fileInfos(bucket, prefix, delimiter string) (ob } fi, err := entry.fileInfo(bucket) - if err == nil { + if err == nil && fi.VersionPurgeStatus().Empty() { versioned := vcfg != nil && vcfg.Versioned(entry.name) objects = append(objects, fi.ToObjectInfo(bucket, entry.name, versioned)) } @@ -725,16 +735,41 @@ func mergeEntryChannels(ctx context.Context, in []chan metaCacheEntry, out chan< bestIdx = otherIdx continue } - // We should make sure to avoid objects and directories - // of this fashion such as - // - foo-1 - // - foo-1/ - // we should avoid this situation by making sure that - // we compare the `foo-1/` after path.Clean() to - // de-dup the entries. if path.Clean(best.name) == path.Clean(other.name) { - toMerge = append(toMerge, otherIdx) - continue + // We may be in a situation where we have a directory and an object with the same name. + // In that case we will drop the directory entry. + // This should however not be confused with an object with a trailing slash. + dirMatches := best.isDir() == other.isDir() + suffixMatches := strings.HasSuffix(best.name, slashSeparator) == strings.HasSuffix(other.name, slashSeparator) + + // Simple case. Both are same type with same suffix. + if dirMatches && suffixMatches { + toMerge = append(toMerge, otherIdx) + continue + } + + if !dirMatches { + // We have an object `name` or 'name/' and a directory `name/`. + if other.isDir() { + if serverDebugLog { + console.Debugln("mergeEntryChannels: discarding directory", other.name, "for object", best.name) + } + // Discard the directory. + if err := selectFrom(otherIdx); err != nil { + return err + } + continue + } + // Replace directory with object. + if serverDebugLog { + console.Debugln("mergeEntryChannels: discarding directory", best.name, "for object", other.name) + } + toMerge = toMerge[:0] + best = other + bestIdx = otherIdx + continue + } + // Leave it to be resolved. Names are different. } if best.name > other.name { toMerge = toMerge[:0] diff --git a/cmd/metacache-entries_test.go b/cmd/metacache-entries_test.go index 3857724d84685..75af4d0f27909 100644 --- a/cmd/metacache-entries_test.go +++ b/cmd/metacache-entries_test.go @@ -633,7 +633,7 @@ func Test_metaCacheEntries_resolve(t *testing.T) { for testID, tt := range tests { rng := rand.New(rand.NewSource(0)) // Run for a number of times, shuffling the input to ensure that output is consistent. - for i := 0; i < 10; i++ { + for i := range 10 { t.Run(fmt.Sprintf("test-%d-%s-run-%d", testID, tt.name, i), func(t *testing.T) { if i > 0 { rng.Shuffle(len(tt.m), func(i, j int) { diff --git a/cmd/metacache-marker.go b/cmd/metacache-marker.go index 3510f91f8e3f1..4548af2b9300f 100644 --- a/cmd/metacache-marker.go +++ b/cmd/metacache-marker.go @@ -22,8 +22,6 @@ import ( "fmt" "strconv" "strings" - - "github.com/minio/minio/internal/logger" ) // markerTagVersion is the marker version. @@ -40,8 +38,8 @@ func (o *listPathOptions) parseMarker() { o.Marker = s[:start] end := strings.LastIndex(s, "]") tag := strings.Trim(s[start:end], "[]") - tags := strings.Split(tag, ",") - for _, tag := range tags { + tags := strings.SplitSeq(tag, ",") + for tag := range tags { kv := strings.Split(tag, ":") if len(kv) < 2 { continue @@ -86,7 +84,7 @@ func (o listPathOptions) encodeMarker(marker string) string { return fmt.Sprintf("%s[minio_cache:%s,return:]", marker, markerTagVersion) } if strings.ContainsAny(o.ID, "[:,") { - logger.LogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID)) + internalLogIf(context.Background(), fmt.Errorf("encodeMarker: uuid %s contained invalid characters", o.ID)) } return fmt.Sprintf("%s[minio_cache:%s,id:%s,p:%d,s:%d]", marker, markerTagVersion, o.ID, o.pool, o.set) } diff --git a/cmd/metacache-server-pool.go b/cmd/metacache-server-pool.go index e2b35aa2c9a3b..bcdfed8f10ff4 100644 --- a/cmd/metacache-server-pool.go +++ b/cmd/metacache-server-pool.go @@ -28,8 +28,8 @@ import ( "sync" "time" + "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" ) func renameAllBucketMetacache(epPath string) error { @@ -134,9 +134,9 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) ( // request canceled, no entries to return return entries, io.EOF } - if !errors.Is(err, context.DeadlineExceeded) { - // Report error once per bucket, but continue listing. - logger.LogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket) + if !IsErr(err, context.DeadlineExceeded, grid.ErrDisconnected) { + // Report error once per bucket, but continue listing.x + storageLogOnceIf(ctx, err, "GetMetacacheListing:"+o.Bucket) } o.Transient = true o.Create = false @@ -153,19 +153,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) ( } else { // Continue listing o.ID = c.id - go func(meta metacache) { - // Continuously update while we wait. - t := time.NewTicker(metacacheMaxClientWait / 10) - defer t.Stop() - select { - case <-ctx.Done(): - // Request is done, stop updating. - return - case <-t.C: - meta.lastHandout = time.Now() - meta, _ = rpc.UpdateMetacacheListing(ctx, meta) - } - }(*c) + go c.keepAlive(ctx, rpc) } } } @@ -219,6 +207,9 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) ( o.ID = "" } + if contextCanceled(ctx) { + return entries, ctx.Err() + } // Do listing in-place. // Create output for our results. // Create filter for results. @@ -232,7 +223,11 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) ( go func(o listPathOptions) { defer wg.Done() - o.StopDiskAtLimit = true + if o.Lifecycle == nil { + // No filtering ahead, ask drives to stop + // listing exactly at a specific limit. + o.StopDiskAtLimit = true + } listErr = z.listMerged(listCtx, o, filterCh) o.debugln("listMerged returned with", listErr) }(*o) @@ -291,14 +286,6 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, } mu.Unlock() - // Do lifecycle filtering. - if o.Lifecycle != nil || o.Replication.Config != nil { - filterIn := make(chan metaCacheEntry, 10) - go applyBucketActions(ctx, o, filterIn, results) - // Replace results. - results = filterIn - } - // Gather results to a single channel. // Quorum is one since we are merging across sets. err := mergeEntryChannels(ctx, inputs, results, 1) @@ -330,7 +317,7 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, allAtEOF = false continue } - logger.LogIf(ctx, err) + storageLogIf(ctx, err) return err } if allAtEOF { @@ -339,84 +326,50 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions, return nil } -// applyBucketActions applies lifecycle and replication actions on the listing -// It will filter out objects if the most recent version should be deleted by lifecycle. -// Entries that failed replication will be queued if no lifecycle rules got applied. -// out will be closed when there are no more results. -// When 'in' is closed or the context is canceled the -// function closes 'out' and exits. -func applyBucketActions(ctx context.Context, o listPathOptions, in <-chan metaCacheEntry, out chan<- metaCacheEntry) { - defer xioutil.SafeClose(out) - - for { - var obj metaCacheEntry - var ok bool - select { - case <-ctx.Done(): - return - case obj, ok = <-in: - if !ok { - return - } +// triggerExpiryAndRepl applies lifecycle and replication actions on the listing +// It returns true if the listing is non-versioned and the given object is expired. +func triggerExpiryAndRepl(ctx context.Context, o listPathOptions, obj metaCacheEntry) (skip bool) { + versioned := o.Versioning != nil && o.Versioning.Versioned(obj.name) + + // skip latest object from listing only for regular + // listObjects calls, versioned based listing cannot + // filter out between versions 'obj' cannot be truncated + // in such a manner, so look for skipping an object only + // for regular ListObjects() call only. + if !o.Versioned && !o.V1 { + fi, err := obj.fileInfo(o.Bucket) + if err != nil { + return skip } - - var skip bool - - versioned := o.Versioning != nil && o.Versioning.Versioned(obj.name) - - // skip latest object from listing only for regular - // listObjects calls, versioned based listing cannot - // filter out between versions 'obj' cannot be truncated - // in such a manner, so look for skipping an object only - // for regular ListObjects() call only. - if !o.Versioned { - fi, err := obj.fileInfo(o.Bucket) - if err != nil { - continue - } - - objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned) - if o.Lifecycle != nil { - act := evalActionFromLifecycle(ctx, *o.Lifecycle, o.Retention, o.Replication.Config, objInfo).Action - skip = act.Delete() - if act.DeleteRestored() { - // do not skip DeleteRestored* actions - skip = false - } - } - } - - // Skip entry only if needed via ILM, skipping is never true for versioned listing. - if !skip { - select { - case <-ctx.Done(): - return - case out <- obj: - } + objInfo := fi.ToObjectInfo(o.Bucket, obj.name, versioned) + if o.Lifecycle != nil { + act := evalActionFromLifecycle(ctx, *o.Lifecycle, o.Retention, o.Replication.Config, objInfo).Action + skip = act.Delete() && !act.DeleteRestored() } + } - fiv, err := obj.fileInfoVersions(o.Bucket) - if err != nil { - continue - } + fiv, err := obj.fileInfoVersions(o.Bucket) + if err != nil { + return skip + } - // Expire all versions if needed, if not attempt to queue for replication. - for _, version := range fiv.Versions { - objInfo := version.ToObjectInfo(o.Bucket, obj.name, versioned) - - if o.Lifecycle != nil { - evt := evalActionFromLifecycle(ctx, *o.Lifecycle, o.Retention, o.Replication.Config, objInfo) - if evt.Action.Delete() { - globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_s3ListObjects) - if !evt.Action.DeleteRestored() { - continue - } // queue version for replication upon expired restored copies if needed. - } + // Expire all versions if needed, if not attempt to queue for replication. + for _, version := range fiv.Versions { + objInfo := version.ToObjectInfo(o.Bucket, obj.name, versioned) + + if o.Lifecycle != nil { + evt := evalActionFromLifecycle(ctx, *o.Lifecycle, o.Retention, o.Replication.Config, objInfo) + if evt.Action.Delete() { + globalExpiryState.enqueueByDays(objInfo, evt, lcEventSrc_s3ListObjects) + if !evt.Action.DeleteRestored() { + continue + } // queue version for replication upon expired restored copies if needed. } - - queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication, 0) } + + queueReplicationHeal(ctx, o.Bucket, objInfo, o.Replication, 0) } + return skip } func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) { @@ -473,6 +426,9 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions go func() { var returned bool for entry := range inCh { + if o.shouldSkip(ctx, entry) { + continue + } if !returned { funcReturnedMu.Lock() returned = funcReturned @@ -491,5 +447,10 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions xioutil.SafeClose(saveCh) }() - return filteredResults() + entries, err = filteredResults() + if err == nil { + // Check if listing recorded an error. + err = meta.getErr() + } + return entries, err } diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index e2b336ec7cba7..c43d18d7153a8 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -25,10 +25,12 @@ import ( "errors" "fmt" "io" + "maps" "math/rand" "strconv" "strings" "sync" + "sync/atomic" "time" jsoniter "github.com/json-iterator/go" @@ -38,8 +40,7 @@ import ( "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/hash" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" + "github.com/minio/pkg/v3/console" ) //go:generate msgp -file $GOFILE -unexported @@ -98,6 +99,8 @@ type listPathOptions struct { // Versioned is this a ListObjectVersions call. Versioned bool + // V1 listing type + V1 bool // Versioning config is used for if the path // has versioning enabled. @@ -160,19 +163,45 @@ func (o listPathOptions) newMetacache() metacache { } } -func (o *listPathOptions) debugf(format string, data ...interface{}) { +func (o *listPathOptions) debugf(format string, data ...any) { if serverDebugLog { console.Debugf(format+"\n", data...) } } -func (o *listPathOptions) debugln(data ...interface{}) { +func (o *listPathOptions) debugln(data ...any) { if serverDebugLog { console.Debugln(data...) } } -// gatherResults will collect all results on the input channel and filter results according to the options. +func (o *listPathOptions) shouldSkip(ctx context.Context, entry metaCacheEntry) (yes bool) { + if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) { + return true + } + if o.Marker != "" && entry.name < o.Marker { + return true + } + if !strings.HasPrefix(entry.name, o.Prefix) { + return true + } + if o.Separator != "" && entry.isDir() && !strings.Contains(strings.TrimPrefix(entry.name, o.Prefix), o.Separator) { + return true + } + if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) { + return true + } + if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() { + return true + } + if o.Lifecycle != nil || o.Replication.Config != nil { + return triggerExpiryAndRepl(ctx, *o, entry) + } + return false +} + +// gatherResults will collect all results on the input channel and filter results according +// to the options or to the current bucket ILM expiry rules. // Caller should close the channel when done. // The returned function will return the results once there is enough or input is closed, // or the context is canceled. @@ -180,8 +209,7 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache resultsDone := make(chan metaCacheEntriesSorted) // Copy so we can mutate resCh := resultsDone - var done bool - var mu sync.Mutex + var done atomic.Bool resErr := io.EOF go func() { @@ -192,26 +220,16 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache // past limit continue } - mu.Lock() - returned = done - mu.Unlock() + returned = done.Load() if returned { resCh = nil continue } - if !o.IncludeDirectories && (entry.isDir() || (!o.Versioned && entry.isObjectDir() && entry.isLatestDeletemarker())) { - continue - } - if o.Marker != "" && entry.name < o.Marker { - continue - } - if !strings.HasPrefix(entry.name, o.Prefix) { - continue - } - if !o.Recursive && !entry.isInDir(o.Prefix, o.Separator) { - continue - } - if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() { + if yes := o.shouldSkip(ctx, entry); yes { + // when we have not enough results, record the skipped entry + if o.Limit > 0 && results.len() < o.Limit { + results.lastSkippedEntry = entry.name + } continue } if o.Limit > 0 && results.len() >= o.Limit { @@ -242,9 +260,7 @@ func (o *listPathOptions) gatherResults(ctx context.Context, in <-chan metaCache return func() (metaCacheEntriesSorted, error) { select { case <-ctx.Done(): - mu.Lock() - done = true - mu.Unlock() + done.Store(true) return metaCacheEntriesSorted{}, ctx.Err() case r := <-resultsDone: return r, resErr @@ -276,7 +292,7 @@ func (o *listPathOptions) findFirstPart(fi FileInfo) (int, error) { } err := json.Unmarshal([]byte(v), &tmp) if !ok { - logger.LogIf(context.Background(), err) + bugLogIf(context.Background(), err) return -1, err } if tmp.First == "" && tmp.Last == "" && tmp.EOS { @@ -383,7 +399,7 @@ func (r *metacacheReader) filter(o listPathOptions) (entries metaCacheEntriesSor if !o.InclDeleted && entry.isObject() && entry.isLatestDeletemarker() && !entry.isObjectDir() { return true } - if entry.isAllFreeVersions() { + if !o.InclDeleted && entry.isAllFreeVersions() { return true } entries.o = append(entries.o, entry) @@ -529,7 +545,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt } loadedPart = partN bi, err := getMetacacheBlockInfo(fi, partN) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) if err == nil { if bi.pastPrefix(o.Prefix) { return entries, io.EOF @@ -565,10 +581,11 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt continue case InsufficientReadQuorum: retries++ + loadedPart = -1 time.Sleep(retryDelay250) continue default: - logger.LogIf(ctx, err) + internalLogIf(ctx, err) return entries, err } } @@ -576,7 +593,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt // We finished at the end of the block. // And should not expect any more results. bi, err := getMetacacheBlockInfo(fi, partN) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) if err != nil || bi.EOS { // We are done and there are no more parts. return entries, io.EOF @@ -617,18 +634,18 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com } filter := func(list []uint64) (commonCount uint64) { - max := 0 + maxCnt := 0 signatureMap := map[uint64]int{} for _, v := range list { signatureMap[v]++ } for ops, count := range signatureMap { - if max < count && commonCount < ops { - max = count + if maxCnt < count && commonCount < ops { + maxCnt = count commonCount = ops } } - if max < readQuorum { + if maxCnt < readQuorum { return 0 } return commonCount @@ -636,12 +653,12 @@ func calcCommonWritesDeletes(infos []DiskInfo, readQuorum int) (commonWrite, com commonWrite = filter(writes) commonDelete = filter(deletes) - return + return commonWrite, commonDelete } func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { filter := func() (commonCount uint64) { - max := 0 + maxCnt := 0 signatureMap := map[uint64]int{} for _, info := range infos { if info.Error != "" { @@ -651,12 +668,12 @@ func calcCommonCounter(infos []DiskInfo, readQuorum int) (commonCount uint64) { signatureMap[mutations]++ } for ops, count := range signatureMap { - if max < count && commonCount < ops { - max = count + if maxCnt < count && commonCount < ops { + maxCnt = count commonCount = ops } } - if max < readQuorum { + if maxCnt < readQuorum { return 0 } return commonCount @@ -796,6 +813,17 @@ func (m *metaCacheRPC) setErr(err string) { *m.meta = meta } +// getErr will return an error if the listing failed. +// The error is not type safe. +func (m *metaCacheRPC) getErr() error { + m.mu.Lock() + defer m.mu.Unlock() + if m.meta.status == scanStateError { + return errors.New(m.meta.error) + } + return nil +} + func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) { o := mc.o o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o) @@ -859,7 +887,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache } o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n)) r, err := hash.NewReader(ctx, bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) custom := b.headerKV() _, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{ UserDefined: custom, @@ -879,9 +907,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache fi := FileInfo{ Metadata: make(map[string]string, len(meta)), } - for k, v := range meta { - fi.Metadata[k] = v - } + maps.Copy(fi.Metadata, meta) err := er.updateObjectMetaWithOpts(ctx, minioMetaBucket, o.objectPath(0), fi, er.getDisks(), UpdateMetadataOpts{NoPersistence: true}) if err == nil { break @@ -893,7 +919,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache return err case InsufficientReadQuorum: default: - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } if retries >= maxTries { return err @@ -1001,8 +1027,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { // not a storage error. return nil } - askDisks := len(disks) - readers := make([]*metacacheReader, askDisks) + readers := make([]*metacacheReader, len(disks)) defer func() { for _, r := range readers { r.Close() @@ -1084,21 +1109,18 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { case nil: default: switch err.Error() { - case errFileNotFound.Error(), - errVolumeNotFound.Error(), - errUnformattedDisk.Error(), - errDiskNotFound.Error(): + case errFileNotFound.Error(): atEOF++ fnf++ - // This is a special case, to handle bucket does - // not exist situations. - if errors.Is(err, errVolumeNotFound) { - vnf++ - } + continue + case errVolumeNotFound.Error(): + atEOF++ + fnf++ + vnf++ continue } hasErr++ - errs[i] = err + errs[i] = fmt.Errorf("drive: %s returned err: %v", disks[i], err) continue } // If no current, add it. @@ -1133,27 +1155,21 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { topEntries[i] = entry } - // Stop if we exceed number of bad disks - if hasErr > len(disks)-opts.minDisks && hasErr > 0 { + // Since minDisks is set to quorum, we return if we have enough. + if vnf > 0 && vnf >= len(readers)-opts.minDisks { + return errVolumeNotFound + } + // Since minDisks is set to quorum, we return if we have enough. + if fnf > 0 && fnf >= len(readers)-opts.minDisks { + return errFileNotFound + } + + // Stop if we exceed number of bad disks. + if hasErr > 0 && hasErr+fnf > len(disks)-opts.minDisks { if opts.finished != nil { opts.finished(errs) } - var combinedErr []string - for i, err := range errs { - if err != nil { - if disks[i] != nil { - combinedErr = append(combinedErr, - fmt.Sprintf("drive %s returned: %s", disks[i], err)) - } else { - combinedErr = append(combinedErr, err.Error()) - } - } - } - return errors.New(strings.Join(combinedErr, ", ")) - } - - if vnf == len(readers) { - return errVolumeNotFound + return errors.Join(errs...) } // Break if all at EOF or error. @@ -1164,10 +1180,6 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) { break } - if fnf == len(readers) { - return errFileNotFound - } - if agree == len(readers) { // Everybody agreed for _, r := range readers { diff --git a/cmd/metacache-set_gen.go b/cmd/metacache-set_gen.go index 3633edc65cdc2..8b29bfca4515c 100644 --- a/cmd/metacache-set_gen.go +++ b/cmd/metacache-set_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -114,6 +114,12 @@ func (z *listPathOptions) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Versioned") return } + case "V1": + z.V1, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "V1") + return + } case "StopDiskAtLimit": z.StopDiskAtLimit, err = dc.ReadBool() if err != nil { @@ -145,9 +151,9 @@ func (z *listPathOptions) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *listPathOptions) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 18 + // map header, size 19 // write "ID" - err = en.Append(0xde, 0x0, 0x12, 0xa2, 0x49, 0x44) + err = en.Append(0xde, 0x0, 0x13, 0xa2, 0x49, 0x44) if err != nil { return } @@ -296,6 +302,16 @@ func (z *listPathOptions) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Versioned") return } + // write "V1" + err = en.Append(0xa2, 0x56, 0x31) + if err != nil { + return + } + err = en.WriteBool(z.V1) + if err != nil { + err = msgp.WrapError(err, "V1") + return + } // write "StopDiskAtLimit" err = en.Append(0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74) if err != nil { @@ -332,9 +348,9 @@ func (z *listPathOptions) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *listPathOptions) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 18 + // map header, size 19 // string "ID" - o = append(o, 0xde, 0x0, 0x12, 0xa2, 0x49, 0x44) + o = append(o, 0xde, 0x0, 0x13, 0xa2, 0x49, 0x44) o = msgp.AppendString(o, z.ID) // string "Bucket" o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) @@ -378,6 +394,9 @@ func (z *listPathOptions) MarshalMsg(b []byte) (o []byte, err error) { // string "Versioned" o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64) o = msgp.AppendBool(o, z.Versioned) + // string "V1" + o = append(o, 0xa2, 0x56, 0x31) + o = msgp.AppendBool(o, z.V1) // string "StopDiskAtLimit" o = append(o, 0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74) o = msgp.AppendBool(o, z.StopDiskAtLimit) @@ -498,6 +517,12 @@ func (z *listPathOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Versioned") return } + case "V1": + z.V1, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "V1") + return + } case "StopDiskAtLimit": z.StopDiskAtLimit, bts, err = msgp.ReadBoolBytes(bts) if err != nil { @@ -530,6 +555,6 @@ func (z *listPathOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *listPathOptions) Msgsize() (s int) { - s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 8 + msgp.StringPrefixSize + len(z.BaseDir) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 13 + msgp.StringPrefixSize + len(z.FilterPrefix) + 7 + msgp.StringPrefixSize + len(z.Marker) + 6 + msgp.IntSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 12 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.StringPrefixSize + len(z.Separator) + 7 + msgp.BoolSize + 19 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.BoolSize + 16 + msgp.BoolSize + 5 + msgp.IntSize + 4 + msgp.IntSize + s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 8 + msgp.StringPrefixSize + len(z.BaseDir) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 13 + msgp.StringPrefixSize + len(z.FilterPrefix) + 7 + msgp.StringPrefixSize + len(z.Marker) + 6 + msgp.IntSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 12 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.StringPrefixSize + len(z.Separator) + 7 + msgp.BoolSize + 19 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.BoolSize + 3 + msgp.BoolSize + 16 + msgp.BoolSize + 5 + msgp.IntSize + 4 + msgp.IntSize return } diff --git a/cmd/metacache-set_gen_test.go b/cmd/metacache-set_gen_test.go index 0b57966b444db..bee9830bd4291 100644 --- a/cmd/metacache-set_gen_test.go +++ b/cmd/metacache-set_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/metacache-stream.go b/cmd/metacache-stream.go index f6d08245e1c42..cf61895fe4d7b 100644 --- a/cmd/metacache-stream.go +++ b/cmd/metacache-stream.go @@ -27,8 +27,8 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/s2" + "github.com/minio/minio/internal/bpool" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" "github.com/valyala/bytebufferpool" ) @@ -237,7 +237,7 @@ func (w *metacacheWriter) Reset(out io.Writer) { } } -var s2DecPool = sync.Pool{New: func() interface{} { +var s2DecPool = bpool.Pool[*s2.Reader]{New: func() *s2.Reader { // Default alloc block for network transfer. return s2.NewReader(nil, s2.ReaderAllocBlock(16<<10)) }} @@ -254,7 +254,7 @@ type metacacheReader struct { // newMetacacheReader creates a new cache reader. // Nothing will be read from the stream yet. func newMetacacheReader(r io.Reader) *metacacheReader { - dec := s2DecPool.Get().(*s2.Reader) + dec := s2DecPool.Get() dec.Reset(r) mr := msgpNewReader(dec) return &metacacheReader{ @@ -758,7 +758,7 @@ func (r *metacacheReader) Close() error { return nil } -// metacacheBlockWriter collects blocks and provides a callaback to store them. +// metacacheBlockWriter collects blocks and provides a callback to store them. type metacacheBlockWriter struct { wg sync.WaitGroup streamErr error @@ -845,7 +845,7 @@ func (b metacacheBlock) headerKV() map[string]string { json := jsoniter.ConfigCompatibleWithStandardLibrary v, err := json.Marshal(b) if err != nil { - logger.LogIf(context.Background(), err) // Unlikely + bugLogIf(context.Background(), err) // Unlikely return nil } return map[string]string{fmt.Sprintf("%s-metacache-part-%d", ReservedMetadataPrefixLower, b.n): string(v)} diff --git a/cmd/metacache-stream_test.go b/cmd/metacache-stream_test.go index 70f3e1ba9637a..36286f6883b8d 100644 --- a/cmd/metacache-stream_test.go +++ b/cmd/metacache-stream_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" - "context" "io" "os" "reflect" @@ -278,7 +277,7 @@ func Test_metacacheReader_readAll(t *testing.T) { var wg sync.WaitGroup wg.Add(1) go func() { - readErr = r.readAll(context.Background(), objs) + readErr = r.readAll(t.Context(), objs) wg.Done() }() want := loadMetacacheSampleNames diff --git a/cmd/metacache-walk.go b/cmd/metacache-walk.go index e01760aad83e3..fa949d9ab4f3e 100644 --- a/cmd/metacache-walk.go +++ b/cmd/metacache-walk.go @@ -19,13 +19,13 @@ package cmd import ( "context" + "errors" "io" "sort" "strings" "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/valyala/bytebufferpool" ) @@ -60,10 +60,23 @@ type WalkDirOptions struct { DiskID string } +// supported FS for Nlink optimization in readdir. +const ( + xfs = "XFS" + ext4 = "EXT4" +) + // WalkDir will traverse a directory and return all entries found. // On success a sorted meta cache stream will be returned. // Metadata has data stripped, if any. +// The function tries to quit as fast as the context is canceled to avoid further drive IO func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) { + legacyFS := s.fsType != xfs && s.fsType != ext4 + + s.RLock() + legacy := s.formatLegacy + s.RUnlock() + // Verify if volume is valid and it exists. volumeDir, err := s.getVolDir(opts.Bucket) if err != nil { @@ -77,10 +90,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ } } - s.RLock() - legacy := s.formatLegacy - s.RUnlock() - // Use a small block size to start sending quickly w := newMetacacheWriter(wr, 16<<10) w.reuseBlocks = true // We are not sharing results, so reuse buffers. @@ -139,6 +148,13 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ var scanDir func(path string) error scanDir = func(current string) error { + if contextCanceled(ctx) { + return ctx.Err() + } + if opts.Limit > 0 && objsReturned >= opts.Limit { + return nil + } + // Skip forward, if requested... sb := bytebufferpool.Get() defer func() { @@ -154,12 +170,6 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ forward = forward[:idx] } } - if contextCanceled(ctx) { - return ctx.Err() - } - if opts.Limit > 0 && objsReturned >= opts.Limit { - return nil - } if s.walkMu != nil { s.walkMu.Lock() @@ -171,7 +181,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ if err != nil { // Folder could have gone away in-between if err != errVolumeNotFound && err != errFileNotFound { - logger.LogOnceIf(ctx, err, "metacache-walk-scan-dir") + internalLogOnceIf(ctx, err, "metacache-walk-scan-dir") } if opts.ReportNotFound && err == errFileNotFound && current == opts.BaseDir { err = errFileNotFound @@ -190,6 +200,9 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ // Avoid a bunch of cleanup when joining. current = strings.Trim(current, SlashSeparator) for i, entry := range entries { + if contextCanceled(ctx) { + return ctx.Err() + } if opts.Limit > 0 && objsReturned >= opts.Limit { return nil } @@ -239,7 +252,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ // while being concurrently listed at the same time in // such scenarios the 'xl.meta' might get truncated if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { - logger.LogOnceIf(ctx, err, "metacache-walk-read-metadata") + internalLogOnceIf(ctx, err, "metacache-walk-read-metadata") } continue } @@ -257,7 +270,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ diskHealthCheckOK(ctx, err) if err != nil { if !IsErrIgnored(err, io.EOF, io.ErrUnexpectedEOF) { - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } continue } @@ -285,15 +298,15 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ } for _, entry := range entries { + if contextCanceled(ctx) { + return ctx.Err() + } if opts.Limit > 0 && objsReturned >= opts.Limit { return nil } if entry == "" { continue } - if contextCanceled(ctx) { - return ctx.Err() - } meta := metaCacheEntry{name: pathJoinBuf(sb, current, entry)} // If directory entry on stack before this, pop it now. @@ -307,8 +320,11 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ if opts.Recursive { // Scan folder we found. Should be in correct sort order where we are. err := scanDir(pop) - if err != nil && !IsErrIgnored(err, context.Canceled) { - logger.LogIf(ctx, err) + if err != nil { + if errors.Is(err, context.Canceled) { + return err + } + internalLogIf(ctx, err) } } dirStack = dirStack[:len(dirStack)-1] @@ -354,7 +370,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ // NOT an object, append to stack (with slash) // If dirObject, but no metadata (which is unexpected) we skip it. if !isDirObj { - if !isDirEmpty(pathJoinBuf(sb, volumeDir, meta.name)) { + if !isDirEmpty(pathJoinBuf(sb, volumeDir, meta.name), legacyFS) { dirStack = append(dirStack, meta.name+slashSeparator) } } @@ -379,7 +395,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ } if opts.Recursive { // Scan folder we found. Should be in correct sort order where we are. - logger.LogIf(ctx, scanDir(pop)) + internalLogIf(ctx, scanDir(pop)) } dirStack = dirStack[:len(dirStack)-1] } @@ -398,7 +414,7 @@ func (p *xlStorageDiskIDCheck) WalkDir(ctx context.Context, opts WalkDirOptions, if err != nil { return err } - defer done(&err) + defer done(0, &err) return p.storage.WalkDir(ctx, opts, wr) } @@ -407,7 +423,7 @@ func (p *xlStorageDiskIDCheck) WalkDir(ctx context.Context, opts WalkDirOptions, // On success a meta cache stream will be returned, that should be closed when done. func (client *storageRESTClient) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) error { // Ensure remote has the same disk ID. - opts.DiskID = client.diskID + opts.DiskID = *client.diskID.Load() b, err := opts.MarshalMsg(grid.GetByteBuffer()[:0]) if err != nil { return toStorageErr(err) diff --git a/cmd/metacache-walk_gen.go b/cmd/metacache-walk_gen.go index e59cf64ebf538..6f0624f929dbb 100644 --- a/cmd/metacache-walk_gen.go +++ b/cmd/metacache-walk_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/metacache-walk_gen_test.go b/cmd/metacache-walk_gen_test.go index 02c4a1ecc27aa..16edd846cb05a 100644 --- a/cmd/metacache-walk_gen_test.go +++ b/cmd/metacache-walk_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/metacache.go b/cmd/metacache.go index 13a151b17a945..7f35d391f67b2 100644 --- a/cmd/metacache.go +++ b/cmd/metacache.go @@ -25,7 +25,7 @@ import ( "strings" "time" - "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/v3/console" ) type scanStatus uint8 @@ -99,6 +99,37 @@ func (m *metacache) worthKeeping() bool { return true } +// keepAlive will continuously update lastHandout until ctx is canceled. +func (m metacache) keepAlive(ctx context.Context, rpc *peerRESTClient) { + // we intentionally operate on a copy of m, so we can update without locks. + t := time.NewTicker(metacacheMaxClientWait / 10) + defer t.Stop() + for { + select { + case <-ctx.Done(): + // Request is done, stop updating. + return + case <-t.C: + m.lastHandout = time.Now() + + if m2, err := rpc.UpdateMetacacheListing(ctx, m); err == nil { + if m2.status != scanStateStarted { + if serverDebugLog { + console.Debugln("returning", m.id, "due to scan state", m2.status, time.Now().Format(time.RFC3339)) + } + return + } + m = m2 + if serverDebugLog { + console.Debugln("refreshed", m.id, time.Now().Format(time.RFC3339)) + } + } else if serverDebugLog { + console.Debugln("error refreshing", m.id, time.Now().Format(time.RFC3339)) + } + } + } +} + // baseDirFromPrefix will return the base directory given an object path. // For example an object with name prefix/folder/object.ext will return `prefix/folder/`. func baseDirFromPrefix(prefix string) string { @@ -118,13 +149,17 @@ func baseDirFromPrefix(prefix string) string { // update cache with new status. // The updates are conditional so multiple callers can update with different states. func (m *metacache) update(update metacache) { - m.lastUpdate = UTCNow() - - if m.lastHandout.After(m.lastHandout) { - m.lastHandout = UTCNow() + now := UTCNow() + m.lastUpdate = now + + if update.lastHandout.After(m.lastHandout) { + m.lastHandout = update.lastUpdate + if m.lastHandout.After(now) { + m.lastHandout = now + } } if m.status == scanStateStarted && update.status == scanStateSuccess { - m.ended = UTCNow() + m.ended = now } if m.status == scanStateStarted && update.status != scanStateStarted { @@ -140,7 +175,7 @@ func (m *metacache) update(update metacache) { if m.error == "" && update.error != "" { m.error = update.error m.status = scanStateError - m.ended = UTCNow() + m.ended = now } m.fileNotFound = m.fileNotFound || update.fileNotFound } @@ -148,16 +183,17 @@ func (m *metacache) update(update metacache) { // delete all cache data on disks. func (m *metacache) delete(ctx context.Context) { if m.bucket == "" || m.id == "" { - logger.LogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id)) + bugLogIf(ctx, fmt.Errorf("metacache.delete: bucket (%s) or id (%s) empty", m.bucket, m.id)) + return } objAPI := newObjectLayerFn() if objAPI == nil { - logger.LogIf(ctx, errors.New("metacache.delete: no object layer")) + internalLogIf(ctx, errors.New("metacache.delete: no object layer")) return } ez, ok := objAPI.(deleteAllStorager) if !ok { - logger.LogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'")) + bugLogIf(ctx, errors.New("metacache.delete: expected objAPI to be 'deleteAllStorager'")) return } ez.deleteAll(ctx, minioMetaBucket, metacachePrefixForID(m.bucket, m.id)) diff --git a/cmd/metacache_gen.go b/cmd/metacache_gen.go index a77fa43161ee7..9ca1bca826ba6 100644 --- a/cmd/metacache_gen.go +++ b/cmd/metacache_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/metacache_gen_test.go b/cmd/metacache_gen_test.go index 1b61d9a1d2a2d..017d4cc3ef2fd 100644 --- a/cmd/metacache_gen_test.go +++ b/cmd/metacache_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/metrics-realtime.go b/cmd/metrics-realtime.go index d704969ca20ab..4a1d557329268 100644 --- a/cmd/metrics-realtime.go +++ b/cmd/metrics-realtime.go @@ -20,6 +20,8 @@ package cmd import ( "context" "fmt" + "net/http" + "strings" "time" "github.com/minio/madmin-go/v3" @@ -38,7 +40,23 @@ type collectMetricsOpts struct { func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { if types == madmin.MetricsNone { - return + return m + } + + byHostName := globalMinioAddr + if len(opts.hosts) > 0 { + server := getLocalServerProperty(globalEndpoints, &http.Request{ + Host: globalLocalNodeName, + }, false) + if _, ok := opts.hosts[server.Endpoint]; ok { + byHostName = server.Endpoint + } else { + return m + } + } + + if strings.HasPrefix(byHostName, ":") && !strings.HasPrefix(globalLocalNodeName, ":") { + byHostName = globalLocalNodeName } if types.Contains(madmin.MetricsDisk) { @@ -74,7 +92,7 @@ func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m ma } netStats, err := net.GetInterfaceNetStats(globalInternodeInterface) if err != nil { - m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (nicstats)", globalMinioAddr, err.Error())) + m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (nicstats)", byHostName, err.Error())) } else { m.Aggregated.Net.NetStats = netStats } @@ -83,7 +101,7 @@ func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m ma m.Aggregated.Mem = &madmin.MemMetrics{ CollectedAt: UTCNow(), } - m.Aggregated.Mem.Info = madmin.GetMemInfo(GlobalContext, globalMinioAddr) + m.Aggregated.Mem.Info = madmin.GetMemInfo(GlobalContext, byHostName) } if types.Contains(madmin.MetricsCPU) { m.Aggregated.CPU = &madmin.CPUMetrics{ @@ -91,34 +109,43 @@ func collectLocalMetrics(types madmin.MetricType, opts collectMetricsOpts) (m ma } cm, err := c.Times(false) if err != nil { - m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuTimes)", globalMinioAddr, err.Error())) + m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuTimes)", byHostName, err.Error())) } else { // not collecting per-cpu stats, so there will be only one element if len(cm) == 1 { m.Aggregated.CPU.TimesStat = &cm[0] } else { - m.Errors = append(m.Errors, fmt.Sprintf("%s: Expected one CPU stat, got %d", globalMinioAddr, len(cm))) + m.Errors = append(m.Errors, fmt.Sprintf("%s: Expected one CPU stat, got %d", byHostName, len(cm))) } } cpuCount, err := c.Counts(true) if err != nil { - m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuCount)", globalMinioAddr, err.Error())) + m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (cpuCount)", byHostName, err.Error())) } else { m.Aggregated.CPU.CPUCount = cpuCount } loadStat, err := load.Avg() if err != nil { - m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (loadStat)", globalMinioAddr, err.Error())) + m.Errors = append(m.Errors, fmt.Sprintf("%s: %v (loadStat)", byHostName, err.Error())) } else { m.Aggregated.CPU.LoadStat = loadStat } } + if types.Contains(madmin.MetricsRPC) { + gr := globalGrid.Load() + if gr == nil { + m.Errors = append(m.Errors, fmt.Sprintf("%s: Grid not initialized", byHostName)) + } else { + stats := gr.ConnStats() + m.Aggregated.RPC = &stats + } + } // Add types... // ByHost is a shallow reference, so careful about sharing. - m.ByHost = map[string]madmin.Metrics{globalMinioAddr: m.Aggregated} - m.Hosts = append(m.Hosts, globalMinioAddr) + m.ByHost = map[string]madmin.Metrics{byHostName: m.Aggregated} + m.Hosts = append(m.Hosts, byHostName) return m } @@ -194,7 +221,7 @@ func collectLocalDisksMetrics(disks map[string]struct{}) map[string]madmin.DiskM func collectRemoteMetrics(ctx context.Context, types madmin.MetricType, opts collectMetricsOpts) (m madmin.RealtimeMetrics) { if !globalIsDistErasure { - return + return m } all := globalNotificationSys.GetMetrics(ctx, types, opts) for _, remote := range all { diff --git a/cmd/metrics-resource.go b/cmd/metrics-resource.go index 6d052fefb48be..94c9187337153 100644 --- a/cmd/metrics-resource.go +++ b/cmd/metrics-resource.go @@ -50,15 +50,6 @@ const ( interfaceTxBytes MetricName = "tx_bytes" interfaceTxErrors MetricName = "tx_errors" - // memory stats - memUsed MetricName = "used" - memUsedPerc MetricName = "used_perc" - memFree MetricName = "free" - memShared MetricName = "shared" - memBuffers MetricName = "buffers" - memCache MetricName = "cache" - memAvailable MetricName = "available" - // cpu stats cpuUser MetricName = "user" cpuSystem MetricName = "system" @@ -81,7 +72,7 @@ var ( resourceMetricsMapMu sync.RWMutex // resourceMetricsHelpMap maps metric name to its help string resourceMetricsHelpMap map[MetricName]string - resourceMetricsGroups []*MetricsGroup + resourceMetricsGroups []*MetricsGroupV2 // initial values for drives (at the time of server startup) // used for calculating avg values for drive metrics latestDriveStats map[string]madmin.DiskIOStats @@ -160,25 +151,18 @@ func init() { cpuLoad1: "CPU load average 1min", cpuLoad5: "CPU load average 5min", cpuLoad15: "CPU load average 15min", - cpuLoad1Perc: "CPU load average 1min (perentage)", + cpuLoad1Perc: "CPU load average 1min (percentage)", cpuLoad5Perc: "CPU load average 5min (percentage)", cpuLoad15Perc: "CPU load average 15min (percentage)", } - resourceMetricsGroups = []*MetricsGroup{ + resourceMetricsGroups = []*MetricsGroupV2{ getResourceMetrics(), } resourceCollector = newMinioResourceCollector(resourceMetricsGroups) } -func updateResourceMetrics(subSys MetricSubsystem, name MetricName, val float64, labels map[string]string, isCumulative bool) { - resourceMetricsMapMu.Lock() - defer resourceMetricsMapMu.Unlock() - subsysMetrics, found := resourceMetricsMap[subSys] - if !found { - subsysMetrics = ResourceMetrics{} - } - +func getResourceKey(name MetricName, labels map[string]string) string { // labels are used to uniquely identify a metric // e.g. reads_per_sec_{drive} inside the map sfx := "" @@ -189,7 +173,18 @@ func updateResourceMetrics(subSys MetricSubsystem, name MetricName, val float64, sfx += v } - key := string(name) + "_" + sfx + return string(name) + "_" + sfx +} + +func updateResourceMetrics(subSys MetricSubsystem, name MetricName, val float64, labels map[string]string, isCumulative bool) { + resourceMetricsMapMu.Lock() + defer resourceMetricsMapMu.Unlock() + subsysMetrics, found := resourceMetricsMap[subSys] + if !found { + subsysMetrics = ResourceMetrics{} + } + + key := getResourceKey(name, labels) metric, found := subsysMetrics[key] if !found { metric = ResourceMetric{ @@ -227,15 +222,7 @@ func updateDriveIOStats(currentStats madmin.DiskIOStats, latestStats madmin.Disk // too soon to update the stats return } - diffStats := madmin.DiskIOStats{ - ReadIOs: currentStats.ReadIOs - latestStats.ReadIOs, - WriteIOs: currentStats.WriteIOs - latestStats.WriteIOs, - ReadTicks: currentStats.ReadTicks - latestStats.ReadTicks, - WriteTicks: currentStats.WriteTicks - latestStats.WriteTicks, - TotalTicks: currentStats.TotalTicks - latestStats.TotalTicks, - ReadSectors: currentStats.ReadSectors - latestStats.ReadSectors, - WriteSectors: currentStats.WriteSectors - latestStats.WriteSectors, - } + diffStats := getDiffStats(latestStats, currentStats) updateResourceMetrics(driveSubsystem, readsPerSec, float64(diffStats.ReadIOs)/diffInSeconds, labels, false) readKib := float64(diffStats.ReadSectors*sectorSize) / kib @@ -275,7 +262,7 @@ func collectDriveMetrics(m madmin.RealtimeMetrics) { latestDriveStatsMu.Unlock() globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() for _, d := range localDrives { @@ -291,84 +278,72 @@ func collectDriveMetrics(m madmin.RealtimeMetrics) { } func collectLocalResourceMetrics() { - var types madmin.MetricType = madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU - - m := collectLocalMetrics(types, collectMetricsOpts{ - hosts: map[string]struct{}{ - globalLocalNodeName: {}, - }, - }) - - for host, hm := range m.ByHost { - if len(host) > 0 { - if hm.Net != nil && len(hm.Net.NetStats.Name) > 0 { - stats := hm.Net.NetStats - labels := map[string]string{"interface": stats.Name} - updateResourceMetrics(interfaceSubsystem, interfaceRxBytes, float64(stats.RxBytes), labels, true) - updateResourceMetrics(interfaceSubsystem, interfaceRxErrors, float64(stats.RxErrors), labels, true) - updateResourceMetrics(interfaceSubsystem, interfaceTxBytes, float64(stats.TxBytes), labels, true) - updateResourceMetrics(interfaceSubsystem, interfaceTxErrors, float64(stats.TxErrors), labels, true) - } - if hm.Mem != nil && len(hm.Mem.Info.Addr) > 0 { - labels := map[string]string{} - stats := hm.Mem.Info - updateResourceMetrics(memSubsystem, total, float64(stats.Total), labels, false) - updateResourceMetrics(memSubsystem, memUsed, float64(stats.Used), labels, false) - perc := math.Round(float64(stats.Used*100*100)/float64(stats.Total)) / 100 - updateResourceMetrics(memSubsystem, memUsedPerc, perc, labels, false) - updateResourceMetrics(memSubsystem, memFree, float64(stats.Free), labels, false) - updateResourceMetrics(memSubsystem, memShared, float64(stats.Shared), labels, false) - updateResourceMetrics(memSubsystem, memBuffers, float64(stats.Buffers), labels, false) - updateResourceMetrics(memSubsystem, memAvailable, float64(stats.Available), labels, false) - updateResourceMetrics(memSubsystem, memCache, float64(stats.Cache), labels, false) + types := madmin.MetricsDisk | madmin.MetricNet | madmin.MetricsMem | madmin.MetricsCPU + + m := collectLocalMetrics(types, collectMetricsOpts{}) + for _, hm := range m.ByHost { + if hm.Net != nil && len(hm.Net.NetStats.Name) > 0 { + stats := hm.Net.NetStats + labels := map[string]string{"interface": stats.Name} + updateResourceMetrics(interfaceSubsystem, interfaceRxBytes, float64(stats.RxBytes), labels, true) + updateResourceMetrics(interfaceSubsystem, interfaceRxErrors, float64(stats.RxErrors), labels, true) + updateResourceMetrics(interfaceSubsystem, interfaceTxBytes, float64(stats.TxBytes), labels, true) + updateResourceMetrics(interfaceSubsystem, interfaceTxErrors, float64(stats.TxErrors), labels, true) + } + if hm.Mem != nil && len(hm.Mem.Info.Addr) > 0 { + labels := map[string]string{} + stats := hm.Mem.Info + updateResourceMetrics(memSubsystem, total, float64(stats.Total), labels, false) + updateResourceMetrics(memSubsystem, memUsed, float64(stats.Used), labels, false) + perc := math.Round(float64(stats.Used*100*100)/float64(stats.Total)) / 100 + updateResourceMetrics(memSubsystem, memUsedPerc, perc, labels, false) + updateResourceMetrics(memSubsystem, memFree, float64(stats.Free), labels, false) + updateResourceMetrics(memSubsystem, memShared, float64(stats.Shared), labels, false) + updateResourceMetrics(memSubsystem, memBuffers, float64(stats.Buffers), labels, false) + updateResourceMetrics(memSubsystem, memAvailable, float64(stats.Available), labels, false) + updateResourceMetrics(memSubsystem, memCache, float64(stats.Cache), labels, false) + } + if hm.CPU != nil { + labels := map[string]string{} + ts := hm.CPU.TimesStat + if ts != nil { + tot := ts.User + ts.System + ts.Idle + ts.Iowait + ts.Nice + ts.Steal + cpuUserVal := math.Round(ts.User/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuUser, cpuUserVal, labels, false) + cpuSystemVal := math.Round(ts.System/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuSystem, cpuSystemVal, labels, false) + cpuIdleVal := math.Round(ts.Idle/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuIdle, cpuIdleVal, labels, false) + cpuIOWaitVal := math.Round(ts.Iowait/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuIOWait, cpuIOWaitVal, labels, false) + cpuNiceVal := math.Round(ts.Nice/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuNice, cpuNiceVal, labels, false) + cpuStealVal := math.Round(ts.Steal/tot*100*100) / 100 + updateResourceMetrics(cpuSubsystem, cpuSteal, cpuStealVal, labels, false) } - if hm.CPU != nil { - labels := map[string]string{} - ts := hm.CPU.TimesStat - if ts != nil { - tot := ts.User + ts.System + ts.Idle + ts.Iowait + ts.Nice + ts.Steal - cpuUserVal := math.Round(ts.User/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuUser, cpuUserVal, labels, false) - cpuSystemVal := math.Round(ts.System/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuSystem, cpuSystemVal, labels, false) - cpuIdleVal := math.Round(ts.Idle/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuIdle, cpuIdleVal, labels, false) - cpuIOWaitVal := math.Round(ts.Iowait/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuIOWait, cpuIOWaitVal, labels, false) - cpuNiceVal := math.Round(ts.Nice/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuNice, cpuNiceVal, labels, false) - cpuStealVal := math.Round(ts.Steal/tot*100*100) / 100 - updateResourceMetrics(cpuSubsystem, cpuSteal, cpuStealVal, labels, false) - } - ls := hm.CPU.LoadStat - if ls != nil { - updateResourceMetrics(cpuSubsystem, cpuLoad1, ls.Load1, labels, false) - updateResourceMetrics(cpuSubsystem, cpuLoad5, ls.Load5, labels, false) - updateResourceMetrics(cpuSubsystem, cpuLoad15, ls.Load15, labels, false) - if hm.CPU.CPUCount > 0 { - perc := math.Round(ls.Load1*100*100/float64(hm.CPU.CPUCount)) / 100 - updateResourceMetrics(cpuSubsystem, cpuLoad1Perc, perc, labels, false) - perc = math.Round(ls.Load5*100*100/float64(hm.CPU.CPUCount)) / 100 - updateResourceMetrics(cpuSubsystem, cpuLoad5Perc, perc, labels, false) - perc = math.Round(ls.Load15*100*100/float64(hm.CPU.CPUCount)) / 100 - updateResourceMetrics(cpuSubsystem, cpuLoad15Perc, perc, labels, false) - } + ls := hm.CPU.LoadStat + if ls != nil { + updateResourceMetrics(cpuSubsystem, cpuLoad1, ls.Load1, labels, false) + updateResourceMetrics(cpuSubsystem, cpuLoad5, ls.Load5, labels, false) + updateResourceMetrics(cpuSubsystem, cpuLoad15, ls.Load15, labels, false) + if hm.CPU.CPUCount > 0 { + perc := math.Round(ls.Load1*100*100/float64(hm.CPU.CPUCount)) / 100 + updateResourceMetrics(cpuSubsystem, cpuLoad1Perc, perc, labels, false) + perc = math.Round(ls.Load5*100*100/float64(hm.CPU.CPUCount)) / 100 + updateResourceMetrics(cpuSubsystem, cpuLoad5Perc, perc, labels, false) + perc = math.Round(ls.Load15*100*100/float64(hm.CPU.CPUCount)) / 100 + updateResourceMetrics(cpuSubsystem, cpuLoad15Perc, perc, labels, false) } } - break // only one host expected } + break // only one host expected } collectDriveMetrics(m) } func initLatestValues() { - m := collectLocalMetrics(madmin.MetricsDisk, collectMetricsOpts{ - hosts: map[string]struct{}{ - globalLocalNodeName: {}, - }, - }) - + m := collectLocalMetrics(madmin.MetricsDisk, collectMetricsOpts{}) latestDriveStatsMu.Lock() latestDriveStats = map[string]madmin.DiskIOStats{} for d, dm := range m.ByDisk { @@ -405,7 +380,7 @@ func startResourceMetricsCollection() { // minioResourceCollector is the Collector for resource metrics type minioResourceCollector struct { - metricsGroups []*MetricsGroup + metricsGroups []*MetricsGroupV2 desc *prometheus.Desc } @@ -417,7 +392,7 @@ func (c *minioResourceCollector) Describe(ch chan<- *prometheus.Desc) { // Collect is called by the Prometheus registry when collecting metrics. func (c *minioResourceCollector) Collect(out chan<- prometheus.Metric) { var wg sync.WaitGroup - publish := func(in <-chan Metric) { + publish := func(in <-chan MetricV2) { defer wg.Done() for metric := range in { labels, values := getOrderedLabelValueArrays(metric.VariableLabels) @@ -436,18 +411,18 @@ func (c *minioResourceCollector) Collect(out chan<- prometheus.Metric) { // and returns reference of minio resource Collector // It creates the Prometheus Description which is used // to define Metric and help string -func newMinioResourceCollector(metricsGroups []*MetricsGroup) *minioResourceCollector { +func newMinioResourceCollector(metricsGroups []*MetricsGroupV2) *minioResourceCollector { return &minioResourceCollector{ metricsGroups: metricsGroups, desc: prometheus.NewDesc("minio_resource_stats", "Resource statistics exposed by MinIO server", nil, nil), } } -func prepareResourceMetrics(rm ResourceMetric, subSys MetricSubsystem, requireAvgMax bool) []Metric { +func prepareResourceMetrics(rm ResourceMetric, subSys MetricSubsystem, requireAvgMax bool) []MetricV2 { help := resourceMetricsHelpMap[rm.Name] name := rm.Name - metrics := make([]Metric, 0, 3) - metrics = append(metrics, Metric{ + metrics := make([]MetricV2, 0, 3) + metrics = append(metrics, MetricV2{ Description: getResourceMetricDescription(subSys, name, help), Value: rm.Current, VariableLabels: cloneMSS(rm.Labels), @@ -456,7 +431,7 @@ func prepareResourceMetrics(rm ResourceMetric, subSys MetricSubsystem, requireAv if requireAvgMax { avgName := MetricName(fmt.Sprintf("%s_avg", name)) avgHelp := fmt.Sprintf("%s (avg)", help) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getResourceMetricDescription(subSys, avgName, avgHelp), Value: math.Round(rm.Avg*100) / 100, VariableLabels: cloneMSS(rm.Labels), @@ -464,7 +439,7 @@ func prepareResourceMetrics(rm ResourceMetric, subSys MetricSubsystem, requireAv maxName := MetricName(fmt.Sprintf("%s_max", name)) maxHelp := fmt.Sprintf("%s (max)", help) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getResourceMetricDescription(subSys, maxName, maxHelp), Value: rm.Max, VariableLabels: cloneMSS(rm.Labels), @@ -484,12 +459,12 @@ func getResourceMetricDescription(subSys MetricSubsystem, name MetricName, help } } -func getResourceMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getResourceMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: resourceMetricsCacheInterval, } - mg.RegisterRead(func(ctx context.Context) []Metric { - metrics := []Metric{} + mg.RegisterRead(func(ctx context.Context) []MetricV2 { + metrics := []MetricV2{} subSystems := []MetricSubsystem{interfaceSubsystem, memSubsystem, driveSubsystem, cpuSubsystem} resourceMetricsMapMu.RLock() diff --git a/cmd/metrics-router.go b/cmd/metrics-router.go index e2cf23ced4c43..f8b85c2541c35 100644 --- a/cmd/metrics-router.go +++ b/cmd/metrics-router.go @@ -18,10 +18,11 @@ package cmd import ( + "net/http" "strings" "github.com/minio/mux" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) const ( @@ -30,11 +31,15 @@ const ( prometheusMetricsV2BucketPath = "/v2/metrics/bucket" prometheusMetricsV2NodePath = "/v2/metrics/node" prometheusMetricsV2ResourcePath = "/v2/metrics/resource" + + // Metrics v3 endpoints + metricsV3Path = "/metrics/v3" ) // Standard env prometheus auth type const ( - EnvPrometheusAuthType = "MINIO_PROMETHEUS_AUTH_TYPE" + EnvPrometheusAuthType = "MINIO_PROMETHEUS_AUTH_TYPE" + EnvPrometheusOpenMetrics = "MINIO_PROMETHEUS_OPEN_METRICS" ) type prometheusAuthType string @@ -48,15 +53,23 @@ const ( func registerMetricsRouter(router *mux.Router) { // metrics router metricsRouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() - authType := strings.ToLower(env.Get(EnvPrometheusAuthType, string(prometheusJWT))) + authType := prometheusAuthType(strings.ToLower(env.Get(EnvPrometheusAuthType, string(prometheusJWT)))) auth := AuthMiddleware - if prometheusAuthType(authType) == prometheusPublic { + if authType == prometheusPublic { auth = NoAuthMiddleware } + metricsRouter.Handle(prometheusMetricsPathLegacy, auth(metricsHandler())) metricsRouter.Handle(prometheusMetricsV2ClusterPath, auth(metricsServerHandler())) metricsRouter.Handle(prometheusMetricsV2BucketPath, auth(metricsBucketHandler())) metricsRouter.Handle(prometheusMetricsV2NodePath, auth(metricsNodeHandler())) metricsRouter.Handle(prometheusMetricsV2ResourcePath, auth(metricsResourceHandler())) + + // Metrics v3 + metricsV3Server := newMetricsV3Server(auth) + + // Register metrics v3 handler. It also accepts an optional query + // parameter `?list` - see handler for details. + metricsRouter.Methods(http.MethodGet).Path(metricsV3Path + "{pathComps:.*}").Handler(metricsV3Server) } diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index 3ae7cc8ad4069..e38750a580962 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -20,6 +20,7 @@ package cmd import ( "context" "fmt" + "maps" "math" "net/http" "runtime" @@ -49,12 +50,16 @@ var ( nodeCollector *minioNodeCollector clusterCollector *minioClusterCollector bucketCollector *minioBucketCollector - peerMetricsGroups []*MetricsGroup - bucketPeerMetricsGroups []*MetricsGroup + peerMetricsGroups []*MetricsGroupV2 + bucketPeerMetricsGroups []*MetricsGroupV2 ) +// v2MetricsMaxBuckets enforces a bucket count limit on metrics for v2 calls. +// If people hit this limit, they should move to v3, as certain calls explode with high bucket count. +const v2MetricsMaxBuckets = 100 + func init() { - clusterMetricsGroups := []*MetricsGroup{ + clusterMetricsGroups := []*MetricsGroupV2{ getNodeHealthMetrics(MetricsGroupOpts{dependGlobalNotificationSys: true}), getClusterStorageMetrics(MetricsGroupOpts{dependGlobalObjectAPI: true}), getClusterTierMetrics(MetricsGroupOpts{dependGlobalObjectAPI: true}), @@ -66,7 +71,7 @@ func init() { getBatchJobsMetrics(MetricsGroupOpts{dependGlobalObjectAPI: true}), } - peerMetricsGroups = []*MetricsGroup{ + peerMetricsGroups = []*MetricsGroupV2{ getGoMetrics(), getHTTPMetrics(MetricsGroupOpts{}), getNotificationMetrics(MetricsGroupOpts{dependGlobalLambdaTargetList: true}), @@ -83,13 +88,13 @@ func init() { getTierMetrics(), } - allMetricsGroups := func() (allMetrics []*MetricsGroup) { + allMetricsGroups := func() (allMetrics []*MetricsGroupV2) { allMetrics = append(allMetrics, clusterMetricsGroups...) allMetrics = append(allMetrics, peerMetricsGroups...) return allMetrics }() - nodeGroups := []*MetricsGroup{ + nodeGroups := []*MetricsGroupV2{ getNodeHealthMetrics(MetricsGroupOpts{dependGlobalNotificationSys: true}), getHTTPMetrics(MetricsGroupOpts{}), getNetworkMetrics(), @@ -103,13 +108,13 @@ func init() { getReplicationNodeMetrics(MetricsGroupOpts{dependGlobalObjectAPI: true, dependBucketTargetSys: true}), } - bucketMetricsGroups := []*MetricsGroup{ + bucketMetricsGroups := []*MetricsGroupV2{ getBucketUsageMetrics(MetricsGroupOpts{dependGlobalObjectAPI: true}), getHTTPMetrics(MetricsGroupOpts{bucketOnly: true}), getBucketTTFBMetric(), } - bucketPeerMetricsGroups = []*MetricsGroup{ + bucketPeerMetricsGroups = []*MetricsGroupV2{ getHTTPMetrics(MetricsGroupOpts{bucketOnly: true}), getBucketTTFBMetric(), } @@ -273,13 +278,10 @@ const ( vmemory = "virtual_memory_bytes" cpu = "cpu_total_seconds" - expiryPendingTasks MetricName = "expiry_pending_tasks" expiryMissedTasks MetricName = "expiry_missed_tasks" expiryMissedFreeVersions MetricName = "expiry_missed_freeversions" expiryMissedTierJournalTasks MetricName = "expiry_missed_tierjournal_tasks" expiryNumWorkers MetricName = "expiry_num_workers" - transitionPendingTasks MetricName = "transition_pending_tasks" - transitionActiveTasks MetricName = "transition_active_tasks" transitionMissedTasks MetricName = "transition_missed_immediate_tasks" transitionedBytes MetricName = "transitioned_bytes" @@ -295,18 +297,15 @@ const ( kmsRequestsFail = "request_failure" kmsUptime = "uptime" - webhookOnline = "online" - webhookQueueLength = "queue_length" - webhookTotalMessages = "total_messages" - webhookFailedMessages = "failed_messages" + webhookOnline = "online" ) const ( serverName = "server" ) -// MetricType for the types of metrics supported -type MetricType string +// MetricTypeV2 for the types of metrics supported +type MetricTypeV2 string const ( gaugeMetric = "gaugeMetric" @@ -320,11 +319,11 @@ type MetricDescription struct { Subsystem MetricSubsystem `json:"Subsystem"` Name MetricName `json:"MetricName"` Help string `json:"Help"` - Type MetricType `json:"Type"` + Type MetricTypeV2 `json:"Type"` } -// Metric captures the details for a metric -type Metric struct { +// MetricV2 captures the details for a metric +type MetricV2 struct { Description MetricDescription `json:"Description"` StaticLabels map[string]string `json:"StaticLabels"` Value float64 `json:"Value"` @@ -333,9 +332,9 @@ type Metric struct { Histogram map[string]uint64 `json:"Histogram"` } -// MetricsGroup are a group of metrics that are initialized together. -type MetricsGroup struct { - metricsCache *cachevalue.Cache[[]Metric] `msg:"-"` +// MetricsGroupV2 are a group of metrics that are initialized together. +type MetricsGroupV2 struct { + metricsCache *cachevalue.Cache[[]MetricV2] `msg:"-"` cacheInterval time.Duration metricsGroupOpts MetricsGroupOpts } @@ -358,65 +357,65 @@ type MetricsGroupOpts struct { // RegisterRead register the metrics populator function to be used // to populate new values upon cache invalidation. -func (g *MetricsGroup) RegisterRead(read func(context.Context) []Metric) { +func (g *MetricsGroupV2) RegisterRead(read func(context.Context) []MetricV2) { g.metricsCache = cachevalue.NewFromFunc(g.cacheInterval, cachevalue.Opts{ReturnLastGood: true}, - func() ([]Metric, error) { + func(ctx context.Context) ([]MetricV2, error) { if g.metricsGroupOpts.dependGlobalObjectAPI { objLayer := newObjectLayerFn() // Service not initialized yet if objLayer == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalAuthNPlugin { if globalAuthNPlugin == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalSiteReplicationSys { if !globalSiteReplicationSys.isEnabled() { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalNotificationSys { if globalNotificationSys == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalKMS { if GlobalKMS == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalLambdaTargetList { if globalLambdaTargetList == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalIAMSys { if globalIAMSys == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalLockServer { if globalLockServer == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalIsDistErasure { if !globalIsDistErasure { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependGlobalBackgroundHealState { if globalBackgroundHealState == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } if g.metricsGroupOpts.dependBucketTargetSys { if globalBucketTargetSys == nil { - return []Metric{}, nil + return []MetricV2{}, nil } } return read(GlobalContext), nil @@ -424,8 +423,8 @@ func (g *MetricsGroup) RegisterRead(read func(context.Context) []Metric) { ) } -func (m *Metric) clone() Metric { - metric := Metric{ +func (m *MetricV2) clone() MetricV2 { + metric := MetricV2{ Description: m.Description, Value: m.Value, HistogramBucketLabel: m.HistogramBucketLabel, @@ -433,28 +432,22 @@ func (m *Metric) clone() Metric { VariableLabels: make(map[string]string, len(m.VariableLabels)), Histogram: make(map[string]uint64, len(m.Histogram)), } - for k, v := range m.StaticLabels { - metric.StaticLabels[k] = v - } - for k, v := range m.VariableLabels { - metric.VariableLabels[k] = v - } - for k, v := range m.Histogram { - metric.Histogram[k] = v - } + maps.Copy(metric.StaticLabels, m.StaticLabels) + maps.Copy(metric.VariableLabels, m.VariableLabels) + maps.Copy(metric.Histogram, m.Histogram) return metric } // Get - returns cached value always upton the configured TTL, // once the TTL expires "read()" registered function is called // to return the new values and updated. -func (g *MetricsGroup) Get() (metrics []Metric) { +func (g *MetricsGroupV2) Get() (metrics []MetricV2) { m, _ := g.metricsCache.Get() if len(m) == 0 { - return []Metric{} + return []MetricV2{} } - metrics = make([]Metric, 0, len(m)) + metrics = make([]MetricV2, 0, len(m)) for i := range m { metrics = append(metrics, m[i].clone()) } @@ -536,7 +529,17 @@ func getNodeDriveTimeoutErrorsMD() MetricDescription { Namespace: nodeMetricNamespace, Subsystem: driveSubsystem, Name: "errors_timeout", - Help: "Total number of drive timeout errors since server start", + Help: "Total number of drive timeout errors since server uptime", + Type: counterMetric, + } +} + +func getNodeDriveIOErrorsMD() MetricDescription { + return MetricDescription{ + Namespace: nodeMetricNamespace, + Subsystem: driveSubsystem, + Name: "errors_ioerror", + Help: "Total number of drive I/O errors since server uptime", Type: counterMetric, } } @@ -546,7 +549,7 @@ func getNodeDriveAvailabilityErrorsMD() MetricDescription { Namespace: nodeMetricNamespace, Subsystem: driveSubsystem, Name: "errors_availability", - Help: "Total number of drive I/O errors, permission denied and timeouts since server start", + Help: "Total number of drive I/O errors, timeouts since server uptime", Type: counterMetric, } } @@ -676,7 +679,17 @@ func getUsageLastScanActivityMD() MetricDescription { Namespace: minioMetricNamespace, Subsystem: usageSubsystem, Name: lastActivityTime, - Help: "Time elapsed (in nano seconds) since last scan activity.", + Help: "Time elapsed (in nano seconds) since last scan activity", + Type: gaugeMetric, + } +} + +func getBucketUsageLastScanActivityMD() MetricDescription { + return MetricDescription{ + Namespace: bucketMetricNamespace, + Subsystem: usageSubsystem, + Name: lastActivityTime, + Help: "Time elapsed (in nano seconds) since last scan activity", Type: gaugeMetric, } } @@ -813,7 +826,7 @@ func getClusterObjectVersionsMD() MetricDescription { func getClusterRepLinkLatencyCurrMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: currLinkLatency, Help: "Replication current link latency in milliseconds", @@ -823,7 +836,7 @@ func getClusterRepLinkLatencyCurrMD() MetricDescription { func getClusterRepLinkOnlineMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: linkOnline, Help: "Reports whether replication link is online (1) or offline(0)", @@ -833,7 +846,7 @@ func getClusterRepLinkOnlineMD() MetricDescription { func getClusterRepLinkCurrOfflineDurationMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: linkOfflineDuration, Help: "Duration of replication link being offline in seconds since last offline event", @@ -843,10 +856,10 @@ func getClusterRepLinkCurrOfflineDurationMD() MetricDescription { func getClusterRepLinkTotalOfflineDurationMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: linkDowntimeTotalDuration, - Help: "Total downtime of replication link in seconds since server start", + Help: "Total downtime of replication link in seconds since server uptime", Type: gaugeMetric, } } @@ -906,8 +919,8 @@ func getRepFailedBytesTotalMD(namespace MetricNamespace) MetricDescription { Namespace: namespace, Subsystem: replicationSubsystem, Name: totalFailedBytes, - Help: "Total number of bytes failed at least once to replicate since server start", - Type: gaugeMetric, + Help: "Total number of bytes failed at least once to replicate since server uptime", + Type: counterMetric, } } @@ -916,8 +929,8 @@ func getRepFailedOperationsTotalMD(namespace MetricNamespace) MetricDescription Namespace: namespace, Subsystem: replicationSubsystem, Name: totalFailedCount, - Help: "Total number of objects which failed replication since server start", - Type: gaugeMetric, + Help: "Total number of objects which failed replication since server uptime", + Type: counterMetric, } } @@ -927,7 +940,7 @@ func getRepSentBytesMD(namespace MetricNamespace) MetricDescription { Subsystem: replicationSubsystem, Name: sentBytes, Help: "Total number of bytes replicated to the target", - Type: gaugeMetric, + Type: counterMetric, } } @@ -951,7 +964,7 @@ func getRepReceivedBytesMD(namespace MetricNamespace) MetricDescription { Subsystem: replicationSubsystem, Name: receivedBytes, Help: helpText, - Type: gaugeMetric, + Type: counterMetric, } } @@ -971,7 +984,7 @@ func getRepReceivedOperationsMD(namespace MetricNamespace) MetricDescription { func getClusterReplMRFFailedOperationsMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: recentBacklogCount, Help: "Total number of objects seen in replication backlog in the last 5 minutes", @@ -984,14 +997,14 @@ func getClusterRepCredentialErrorsMD(namespace MetricNamespace) MetricDescriptio Namespace: namespace, Subsystem: replicationSubsystem, Name: credentialErrors, - Help: "Total number of replication credential errors since server start", + Help: "Total number of replication credential errors since server uptime", Type: counterMetric, } } func getClusterReplCurrQueuedOperationsMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: currInQueueCount, Help: "Total number of objects queued for replication in the last full minute", @@ -1001,7 +1014,7 @@ func getClusterReplCurrQueuedOperationsMD() MetricDescription { func getClusterReplCurrQueuedBytesMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: currInQueueBytes, Help: "Total number of bytes queued for replication in the last full minute", @@ -1011,7 +1024,7 @@ func getClusterReplCurrQueuedBytesMD() MetricDescription { func getClusterReplActiveWorkersCountMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: currActiveWorkers, Help: "Total number of active replication workers", @@ -1021,7 +1034,7 @@ func getClusterReplActiveWorkersCountMD() MetricDescription { func getClusterReplAvgActiveWorkersCountMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: avgActiveWorkers, Help: "Average number of active replication workers", @@ -1031,17 +1044,17 @@ func getClusterReplAvgActiveWorkersCountMD() MetricDescription { func getClusterReplMaxActiveWorkersCountMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: maxActiveWorkers, - Help: "Maximum number of active replication workers seen since server start", + Help: "Maximum number of active replication workers seen since server uptime", Type: gaugeMetric, } } func getClusterReplCurrentTransferRateMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: currTransferRate, Help: "Current replication transfer rate in bytes/sec", @@ -1051,17 +1064,17 @@ func getClusterReplCurrentTransferRateMD() MetricDescription { func getClusterRepLinkLatencyMaxMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: maxLinkLatency, - Help: "Maximum replication link latency in milliseconds seen since server start", + Help: "Maximum replication link latency in milliseconds seen since server uptime", Type: gaugeMetric, } } func getClusterRepLinkLatencyAvgMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: avgLinkLatency, Help: "Average replication link latency in milliseconds", @@ -1071,47 +1084,47 @@ func getClusterRepLinkLatencyAvgMD() MetricDescription { func getClusterReplAvgQueuedOperationsMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: avgInQueueCount, - Help: "Average number of objects queued for replication since server start", + Help: "Average number of objects queued for replication since server uptime", Type: gaugeMetric, } } func getClusterReplAvgQueuedBytesMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: avgInQueueBytes, - Help: "Average number of bytes queued for replication since server start", + Help: "Average number of bytes queued for replication since server uptime", Type: gaugeMetric, } } func getClusterReplMaxQueuedOperationsMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: maxInQueueCount, - Help: "Maximum number of objects queued for replication since server start", + Help: "Maximum number of objects queued for replication since server uptime", Type: gaugeMetric, } } func getClusterReplMaxQueuedBytesMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: maxInQueueBytes, - Help: "Maximum number of bytes queued for replication since server start", + Help: "Maximum number of bytes queued for replication since server uptime", Type: gaugeMetric, } } func getClusterReplAvgTransferRateMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: avgTransferRate, Help: "Average replication transfer rate in bytes/sec", @@ -1121,10 +1134,10 @@ func getClusterReplAvgTransferRateMD() MetricDescription { func getClusterReplMaxTransferRateMD() MetricDescription { return MetricDescription{ - Namespace: clusterMetricNamespace, + Namespace: nodeMetricNamespace, Subsystem: replicationSubsystem, Name: maxTransferRate, - Help: "Maximum replication transfer rate in bytes/sec seen since server start", + Help: "Maximum replication transfer rate in bytes/sec seen since server uptime", Type: gaugeMetric, } } @@ -1444,8 +1457,8 @@ func getHealObjectsTotalMD() MetricDescription { Namespace: healMetricNamespace, Subsystem: objectsSubsystem, Name: total, - Help: "Objects scanned in current self healing run", - Type: gaugeMetric, + Help: "Objects scanned since server uptime", + Type: counterMetric, } } @@ -1454,8 +1467,8 @@ func getHealObjectsHealTotalMD() MetricDescription { Namespace: healMetricNamespace, Subsystem: objectsSubsystem, Name: healTotal, - Help: "Objects healed in current self healing run", - Type: gaugeMetric, + Help: "Objects healed since server uptime", + Type: counterMetric, } } @@ -1464,8 +1477,8 @@ func getHealObjectsFailTotalMD() MetricDescription { Namespace: healMetricNamespace, Subsystem: objectsSubsystem, Name: errorsTotal, - Help: "Objects for which healing failed in current self healing run", - Type: gaugeMetric, + Help: "Objects with healing failed since server uptime", + Type: counterMetric, } } @@ -1474,7 +1487,7 @@ func getHealLastActivityTimeMD() MetricDescription { Namespace: healMetricNamespace, Subsystem: timeSubsystem, Name: lastActivityTime, - Help: "Time elapsed (in nano seconds) since last self healing activity.", + Help: "Time elapsed (in nano seconds) since last self healing activity", Type: gaugeMetric, } } @@ -1679,19 +1692,19 @@ func getMinIOProcessCPUTime() MetricDescription { } } -func getMinioProcMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getMinioProcMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { - if runtime.GOOS == "windows" { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { + if runtime.GOOS == globalWindowsOSName || runtime.GOOS == globalMacOSName { return nil } p, err := procfs.Self() if err != nil { - logger.LogOnceIf(ctx, err, string(nodeMetricNamespace)) - return + internalLogOnceIf(ctx, err, string(nodeMetricNamespace)) + return metrics } openFDs, _ := p.FileDescriptorsLen() @@ -1700,11 +1713,11 @@ func getMinioProcMetrics() *MetricsGroup { stat, _ := p.Stat() startTime, _ := stat.StartTime() - metrics = make([]Metric, 0, 20) + metrics = make([]MetricV2, 0, 20) if openFDs > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioFDOpenMD(), Value: float64(openFDs), }, @@ -1713,7 +1726,7 @@ func getMinioProcMetrics() *MetricsGroup { if l.OpenFiles > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioFDLimitMD(), Value: float64(l.OpenFiles), }) @@ -1721,7 +1734,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.SyscR > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessSysCallRMD(), Value: float64(io.SyscR), }) @@ -1729,7 +1742,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.SyscW > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessSysCallWMD(), Value: float64(io.SyscW), }) @@ -1737,7 +1750,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.ReadBytes > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioProcessIOReadBytesMD(), Value: float64(io.ReadBytes), }) @@ -1745,7 +1758,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.WriteBytes > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioProcessIOWriteBytesMD(), Value: float64(io.WriteBytes), }) @@ -1753,7 +1766,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.RChar > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioProcessIOReadCachedBytesMD(), Value: float64(io.RChar), }) @@ -1761,7 +1774,7 @@ func getMinioProcMetrics() *MetricsGroup { if io.WChar > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinioProcessIOWriteCachedBytesMD(), Value: float64(io.WChar), }) @@ -1769,7 +1782,7 @@ func getMinioProcMetrics() *MetricsGroup { if startTime > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessStartTimeMD(), Value: startTime, }) @@ -1777,7 +1790,7 @@ func getMinioProcMetrics() *MetricsGroup { if !globalBootTime.IsZero() { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessUptimeMD(), Value: time.Since(globalBootTime).Seconds(), }) @@ -1785,7 +1798,7 @@ func getMinioProcMetrics() *MetricsGroup { if stat.ResidentMemory() > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessResidentMemory(), Value: float64(stat.ResidentMemory()), }) @@ -1793,7 +1806,7 @@ func getMinioProcMetrics() *MetricsGroup { if stat.VirtualMemory() > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessVirtualMemory(), Value: float64(stat.VirtualMemory()), }) @@ -1801,26 +1814,26 @@ func getMinioProcMetrics() *MetricsGroup { if stat.CPUTime() > 0 { metrics = append(metrics, - Metric{ + MetricV2{ Description: getMinIOProcessCPUTime(), Value: stat.CPUTime(), }) } - return + return metrics }) return mg } -func getGoMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getGoMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { - metrics = append(metrics, Metric{ + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { + metrics = append(metrics, MetricV2{ Description: getMinIOGORoutineCountMD(), Value: float64(runtime.NumGoroutine()), }) - return + return metrics }) return mg } @@ -1828,9 +1841,9 @@ func getGoMetrics() *MetricsGroup { // getHistogramMetrics fetches histogram metrics and returns it in a []Metric // Note: Typically used in MetricGroup.RegisterRead // -// The last parameter is added for compatibility - if true it lowercases the -// `api` label values. -func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription, toLowerAPILabels bool) []Metric { +// The toLowerAPILabels parameter is added for compatibility, +// if set, it lowercases the `api` label values. +func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription, toLowerAPILabels, limitBuckets bool) []MetricV2 { ch := make(chan prometheus.Metric) go func() { defer xioutil.SafeClose(ch) @@ -1839,14 +1852,15 @@ func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription, }() // Converts metrics received into internal []Metric type - var metrics []Metric + var metrics []MetricV2 + buckets := make(map[string][]MetricV2, v2MetricsMaxBuckets) for promMetric := range ch { dtoMetric := &dto.Metric{} err := promMetric.Write(dtoMetric) if err != nil { // Log error and continue to receive other metric // values - logger.LogIf(GlobalContext, err) + bugLogIf(GlobalContext, err) continue } @@ -1861,55 +1875,78 @@ func getHistogramMetrics(hist *prometheus.HistogramVec, desc MetricDescription, } } labels["le"] = fmt.Sprintf("%.3f", *b.UpperBound) - metric := Metric{ + metric := MetricV2{ Description: desc, VariableLabels: labels, Value: float64(b.GetCumulativeCount()), } - metrics = append(metrics, metric) + if limitBuckets && labels["bucket"] != "" { + buckets[labels["bucket"]] = append(buckets[labels["bucket"]], metric) + } else { + metrics = append(metrics, metric) + } } // add metrics with +Inf label labels1 := make(map[string]string) for _, lp := range dtoMetric.GetLabel() { - labels1[*lp.Name] = *lp.Value + if *lp.Name == "api" && toLowerAPILabels { + labels1[*lp.Name] = strings.ToLower(*lp.Value) + } else { + labels1[*lp.Name] = *lp.Value + } } labels1["le"] = fmt.Sprintf("%.3f", math.Inf(+1)) - metrics = append(metrics, Metric{ + + metric := MetricV2{ Description: desc, VariableLabels: labels1, - Value: dtoMetric.Counter.GetValue(), - }) + Value: float64(dtoMetric.Histogram.GetSampleCount()), + } + if limitBuckets && labels1["bucket"] != "" { + buckets[labels1["bucket"]] = append(buckets[labels1["bucket"]], metric) + } else { + metrics = append(metrics, metric) + } + } + + // Limit bucket metrics... + if limitBuckets { + bucketNames := mapKeysSorted(buckets) + bucketNames = bucketNames[:min(len(buckets), v2MetricsMaxBuckets)] + for _, b := range bucketNames { + metrics = append(metrics, buckets[b]...) + } } return metrics } -func getBucketTTFBMetric() *MetricsGroup { - mg := &MetricsGroup{ +func getBucketTTFBMetric() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) []Metric { + mg.RegisterRead(func(ctx context.Context) []MetricV2 { return getHistogramMetrics(bucketHTTPRequestsDuration, - getBucketTTFBDistributionMD(), true) + getBucketTTFBDistributionMD(), true, true) }) return mg } -func getS3TTFBMetric() *MetricsGroup { - mg := &MetricsGroup{ +func getS3TTFBMetric() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) []Metric { + mg.RegisterRead(func(ctx context.Context) []MetricV2 { return getHistogramMetrics(httpRequestsDuration, - getS3TTFBDistributionMD(), true) + getS3TTFBDistributionMD(), true, true) }) return mg } -func getTierMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getTierMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) []Metric { + mg.RegisterRead(func(ctx context.Context) []MetricV2 { return globalTierMetrics.Report() }) return mg @@ -2005,15 +2042,15 @@ func getBucketS3RequestsCanceledMD() MetricDescription { } } -func getILMNodeMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getILMNodeMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(_ context.Context) []Metric { - expPendingTasks := Metric{ + mg.RegisterRead(func(_ context.Context) []MetricV2 { + expPendingTasks := MetricV2{ Description: getExpiryPendingTasksMD(), } - expMissedTasks := Metric{ + expMissedTasks := MetricV2{ Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, @@ -2022,7 +2059,7 @@ func getILMNodeMetrics() *MetricsGroup { Type: counterMetric, }, } - expMissedFreeVersions := Metric{ + expMissedFreeVersions := MetricV2{ Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, @@ -2031,7 +2068,7 @@ func getILMNodeMetrics() *MetricsGroup { Type: counterMetric, }, } - expMissedTierJournalTasks := Metric{ + expMissedTierJournalTasks := MetricV2{ Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, @@ -2040,7 +2077,7 @@ func getILMNodeMetrics() *MetricsGroup { Type: counterMetric, }, } - expNumWorkers := Metric{ + expNumWorkers := MetricV2{ Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, @@ -2049,13 +2086,13 @@ func getILMNodeMetrics() *MetricsGroup { Type: gaugeMetric, }, } - trPendingTasks := Metric{ + trPendingTasks := MetricV2{ Description: getTransitionPendingTasksMD(), } - trActiveTasks := Metric{ + trActiveTasks := MetricV2{ Description: getTransitionActiveTasksMD(), } - trMissedTasks := Metric{ + trMissedTasks := MetricV2{ Description: getTransitionMissedTasksMD(), } if globalExpiryState != nil { @@ -2070,7 +2107,7 @@ func getILMNodeMetrics() *MetricsGroup { trActiveTasks.Value = float64(globalTransitionState.ActiveTasks()) trMissedTasks.Value = float64(globalTransitionState.MissedImmediateTasks()) } - return []Metric{ + return []MetricV2{ expPendingTasks, expMissedTasks, expMissedFreeVersions, @@ -2084,18 +2121,18 @@ func getILMNodeMetrics() *MetricsGroup { return mg } -func getScannerNodeMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getScannerNodeMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(_ context.Context) []Metric { - metrics := []Metric{ + mg.RegisterRead(func(_ context.Context) []MetricV2 { + metrics := []MetricV2{ { Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: scannerSubsystem, Name: "objects_scanned", - Help: "Total number of unique objects scanned since server start", + Help: "Total number of unique objects scanned since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricScanObject)), @@ -2105,7 +2142,7 @@ func getScannerNodeMetrics() *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: scannerSubsystem, Name: "versions_scanned", - Help: "Total number of object versions scanned since server start", + Help: "Total number of object versions scanned since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricApplyVersion)), @@ -2115,7 +2152,7 @@ func getScannerNodeMetrics() *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: scannerSubsystem, Name: "directories_scanned", - Help: "Total number of directories scanned since server start", + Help: "Total number of directories scanned since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricScanFolder)), @@ -2125,7 +2162,7 @@ func getScannerNodeMetrics() *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: scannerSubsystem, Name: "bucket_scans_started", - Help: "Total number of bucket scans started since server start", + Help: "Total number of bucket scans started since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive) + uint64(globalScannerMetrics.activeDrives())), @@ -2135,7 +2172,7 @@ func getScannerNodeMetrics() *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: scannerSubsystem, Name: "bucket_scans_finished", - Help: "Total number of bucket scans finished since server start", + Help: "Total number of bucket scans finished since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive)), @@ -2145,7 +2182,7 @@ func getScannerNodeMetrics() *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, Name: "versions_scanned", - Help: "Total number of object versions checked for ilm actions since server start", + Help: "Total number of object versions checked for ilm actions since server uptime", Type: counterMetric, }, Value: float64(globalScannerMetrics.lifetime(scannerMetricILM)), @@ -2157,12 +2194,12 @@ func getScannerNodeMetrics() *MetricsGroup { if v == 0 { continue } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: nodeMetricNamespace, Subsystem: ilmSubsystem, Name: MetricName("action_count_" + toSnake(action.String())), - Help: "Total action outcome of lifecycle checks since server start", + Help: "Total action outcome of lifecycle checks since server uptime", Type: counterMetric, }, Value: float64(v), @@ -2173,12 +2210,12 @@ func getScannerNodeMetrics() *MetricsGroup { return mg } -func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(_ context.Context) (metrics []Metric) { + mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) { lastSyncTime := atomic.LoadUint64(&globalIAMSys.LastRefreshTimeUnixNano) var sinceLastSyncMillis uint64 if lastSyncTime != 0 { @@ -2186,7 +2223,7 @@ func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } pluginAuthNMetrics := globalAuthNPlugin.Metrics() - metrics = []Metric{ + metrics = []MetricV2{ { Description: MetricDescription{ Namespace: nodeMetricNamespace, @@ -2202,7 +2239,7 @@ func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: iamSubsystem, Name: "since_last_sync_millis", - Help: "Time (in milliseconds) since last successful IAM data sync.", + Help: "Time (in milliseconds) since last successful IAM data sync", Type: gaugeMetric, }, Value: float64(sinceLastSyncMillis), @@ -2212,7 +2249,7 @@ func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: iamSubsystem, Name: "sync_successes", - Help: "Number of successful IAM data syncs since server start.", + Help: "Number of successful IAM data syncs since server uptime", Type: counterMetric, }, Value: float64(atomic.LoadUint64(&globalIAMSys.TotalRefreshSuccesses)), @@ -2222,7 +2259,7 @@ func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { Namespace: nodeMetricNamespace, Subsystem: iamSubsystem, Name: "sync_failures", - Help: "Number of failed IAM data syncs since server start.", + Help: "Number of failed IAM data syncs since server uptime", Type: counterMetric, }, Value: float64(atomic.LoadUint64(&globalIAMSys.TotalRefreshFailures)), @@ -2295,8 +2332,8 @@ func getIAMNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } // replication metrics for each node - published to the cluster endpoint with nodename as label -func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } @@ -2305,50 +2342,50 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { Offline = 0 ) - mg.RegisterRead(func(_ context.Context) []Metric { - var ml []Metric + mg.RegisterRead(func(_ context.Context) []MetricV2 { + var ml []MetricV2 // common operational metrics for bucket replication and site replication - published // at cluster level - if globalReplicationStats != nil { - qs := globalReplicationStats.getNodeQueueStatsSummary() - activeWorkersCount := Metric{ + if rStats := globalReplicationStats.Load(); rStats != nil { + qs := rStats.getNodeQueueStatsSummary() + activeWorkersCount := MetricV2{ Description: getClusterReplActiveWorkersCountMD(), } - avgActiveWorkersCount := Metric{ + avgActiveWorkersCount := MetricV2{ Description: getClusterReplAvgActiveWorkersCountMD(), } - maxActiveWorkersCount := Metric{ + maxActiveWorkersCount := MetricV2{ Description: getClusterReplMaxActiveWorkersCountMD(), } - currInQueueCount := Metric{ + currInQueueCount := MetricV2{ Description: getClusterReplCurrQueuedOperationsMD(), } - currInQueueBytes := Metric{ + currInQueueBytes := MetricV2{ Description: getClusterReplCurrQueuedBytesMD(), } - currTransferRate := Metric{ + currTransferRate := MetricV2{ Description: getClusterReplCurrentTransferRateMD(), } - avgQueueCount := Metric{ + avgQueueCount := MetricV2{ Description: getClusterReplAvgQueuedOperationsMD(), } - avgQueueBytes := Metric{ + avgQueueBytes := MetricV2{ Description: getClusterReplAvgQueuedBytesMD(), } - maxQueueCount := Metric{ + maxQueueCount := MetricV2{ Description: getClusterReplMaxQueuedOperationsMD(), } - maxQueueBytes := Metric{ + maxQueueBytes := MetricV2{ Description: getClusterReplMaxQueuedBytesMD(), } - avgTransferRate := Metric{ + avgTransferRate := MetricV2{ Description: getClusterReplAvgTransferRateMD(), } - maxTransferRate := Metric{ + maxTransferRate := MetricV2{ Description: getClusterReplMaxTransferRateMD(), } - mrfCount := Metric{ + mrfCount := MetricV2{ Description: getClusterReplMRFFailedOperationsMD(), Value: float64(qs.MRFStats.LastFailedCount), } @@ -2372,7 +2409,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { avgTransferRate.Value = tots.Avg maxTransferRate.Value = tots.Peak } - ml = []Metric{ + ml = []MetricV2{ activeWorkersCount, avgActiveWorkersCount, maxActiveWorkersCount, @@ -2390,7 +2427,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } for ep, health := range globalBucketTargetSys.healthStats() { // link latency current - m := Metric{ + m := MetricV2{ Description: getClusterRepLinkLatencyCurrMD(), VariableLabels: map[string]string{ "endpoint": ep, @@ -2400,7 +2437,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { ml = append(ml, m) // link latency average - m = Metric{ + m = MetricV2{ Description: getClusterRepLinkLatencyAvgMD(), VariableLabels: map[string]string{ "endpoint": ep, @@ -2410,7 +2447,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { ml = append(ml, m) // link latency max - m = Metric{ + m = MetricV2{ Description: getClusterRepLinkLatencyMaxMD(), VariableLabels: map[string]string{ "endpoint": ep, @@ -2419,7 +2456,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { m.Value = float64(health.latency.peak / time.Millisecond) ml = append(ml, m) - linkOnline := Metric{ + linkOnline := MetricV2{ Description: getClusterRepLinkOnlineMD(), VariableLabels: map[string]string{ "endpoint": ep, @@ -2431,7 +2468,7 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } linkOnline.Value = float64(online) ml = append(ml, linkOnline) - offlineDuration := Metric{ + offlineDuration := MetricV2{ Description: getClusterRepLinkCurrOfflineDurationMD(), VariableLabels: map[string]string{ "endpoint": ep, @@ -2444,19 +2481,15 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { offlineDuration.Value = float64(currDowntime / time.Second) ml = append(ml, offlineDuration) - downtimeDuration := Metric{ + downtimeDuration := MetricV2{ Description: getClusterRepLinkTotalOfflineDurationMD(), VariableLabels: map[string]string{ "endpoint": ep, }, } - dwntime := currDowntime - if health.offlineDuration > currDowntime { - dwntime = health.offlineDuration - } + dwntime := max(health.offlineDuration, currDowntime) downtimeDuration.Value = float64(dwntime / time.Second) ml = append(ml, downtimeDuration) - } return ml }) @@ -2464,118 +2497,118 @@ func getReplicationNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } // replication metrics for site replication -func getReplicationSiteMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getReplicationSiteMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(_ context.Context) []Metric { - ml := []Metric{} + mg.RegisterRead(func(_ context.Context) []MetricV2 { + ml := []MetricV2{} // metrics pertinent to site replication - overall roll up. if globalSiteReplicationSys.isEnabled() { m, err := globalSiteReplicationSys.getSiteMetrics(GlobalContext) if err != nil { - logger.LogIf(GlobalContext, err) + metricsLogIf(GlobalContext, err) return ml } - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepReceivedBytesMD(clusterMetricNamespace), Value: float64(m.ReplicaSize), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepReceivedOperationsMD(clusterMetricNamespace), Value: float64(m.ReplicaCount), }) for _, stat := range m.Metrics { - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedBytesLastMinuteMD(clusterMetricNamespace), Value: float64(stat.Failed.LastMinute.Bytes), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedOperationsLastMinuteMD(clusterMetricNamespace), Value: stat.Failed.LastMinute.Count, VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedBytesLastHourMD(clusterMetricNamespace), Value: float64(stat.Failed.LastHour.Bytes), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedOperationsLastHourMD(clusterMetricNamespace), Value: stat.Failed.LastHour.Count, VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedBytesTotalMD(clusterMetricNamespace), Value: float64(stat.Failed.Totals.Bytes), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepFailedOperationsTotalMD(clusterMetricNamespace), Value: stat.Failed.Totals.Count, VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepSentBytesMD(clusterMetricNamespace), Value: float64(stat.ReplicatedSize), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getRepSentOperationsMD(clusterMetricNamespace), Value: float64(stat.ReplicatedCount), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) if c, ok := stat.Failed.ErrCounts["AccessDenied"]; ok { - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterRepCredentialErrorsMD(clusterMetricNamespace), Value: float64(c), VariableLabels: map[string]string{"endpoint": stat.Endpoint}, }) } } - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedGetOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.GetTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedHeadOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.HeadTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedPutTaggingOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.PutTagTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedGetTaggingOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.GetTagTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedRmvTaggingOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.RmvTagTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedGetFailedOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.GetFailedTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedHeadFailedOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.HeadFailedTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedPutTaggingFailedOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.PutTagFailedTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedGetTaggingFailedOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.GetTagFailedTotal), }) - ml = append(ml, Metric{ + ml = append(ml, MetricV2{ Description: getClusterReplProxiedRmvTaggingFailedOperationsMD(clusterMetricNamespace), Value: float64(m.Proxied.RmvTagFailedTotal), }) @@ -2586,130 +2619,126 @@ func getReplicationSiteMetrics(opts MetricsGroupOpts) *MetricsGroup { return mg } -func getMinioVersionMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getMinioVersionMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(_ context.Context) (metrics []Metric) { - metrics = append(metrics, Metric{ + mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) { + metrics = append(metrics, MetricV2{ Description: getMinIOCommitMD(), VariableLabels: map[string]string{"commit": CommitID}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getMinIOVersionMD(), VariableLabels: map[string]string{"version": Version}, }) - return + return metrics }) return mg } -func getNodeHealthMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getNodeHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(_ context.Context) (metrics []Metric) { - metrics = make([]Metric, 0, 16) + mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) { + metrics = make([]MetricV2, 0, 16) nodesUp, nodesDown := globalNotificationSys.GetPeerOnlineCount() - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeOnlineTotalMD(), Value: float64(nodesUp), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeOfflineTotalMD(), Value: float64(nodesDown), }) - return + return metrics }) return mg } -func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getMinioHealingMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(_ context.Context) (metrics []Metric) { + mg.RegisterRead(func(_ context.Context) (metrics []MetricV2) { bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) if !exists { - return + return metrics } if bgSeq.lastHealActivity.IsZero() { - return + return metrics } - metrics = make([]Metric, 0, 5) - metrics = append(metrics, Metric{ + metrics = make([]MetricV2, 0, 5) + metrics = append(metrics, MetricV2{ Description: getHealLastActivityTimeMD(), Value: float64(time.Since(bgSeq.lastHealActivity)), }) metrics = append(metrics, getObjectsScanned(bgSeq)...) metrics = append(metrics, getHealedItems(bgSeq)...) metrics = append(metrics, getFailedItems(bgSeq)...) - return + return metrics }) return mg } -func getFailedItems(seq *healSequence) (m []Metric) { - items := seq.gethealFailedItemsMap() - m = make([]Metric, 0, len(items)) +func getFailedItems(seq *healSequence) (m []MetricV2) { + items := seq.getHealFailedItemsMap() + m = make([]MetricV2, 0, len(items)) for k, v := range items { - s := strings.Split(k, ",") - m = append(m, Metric{ - Description: getHealObjectsFailTotalMD(), - VariableLabels: map[string]string{ - "mount_path": s[0], - "volume_status": s[1], - }, - Value: float64(v), + m = append(m, MetricV2{ + Description: getHealObjectsFailTotalMD(), + VariableLabels: map[string]string{"type": string(k)}, + Value: float64(v), }) } - return + return m } -func getHealedItems(seq *healSequence) (m []Metric) { +func getHealedItems(seq *healSequence) (m []MetricV2) { items := seq.getHealedItemsMap() - m = make([]Metric, 0, len(items)) + m = make([]MetricV2, 0, len(items)) for k, v := range items { - m = append(m, Metric{ + m = append(m, MetricV2{ Description: getHealObjectsHealTotalMD(), VariableLabels: map[string]string{"type": string(k)}, Value: float64(v), }) } - return + return m } -func getObjectsScanned(seq *healSequence) (m []Metric) { +func getObjectsScanned(seq *healSequence) (m []MetricV2) { items := seq.getScannedItemsMap() - m = make([]Metric, 0, len(items)) + m = make([]MetricV2, 0, len(items)) for k, v := range items { - m = append(m, Metric{ + m = append(m, MetricV2{ Description: getHealObjectsTotalMD(), VariableLabels: map[string]string{"type": string(k)}, Value: float64(v), }) } - return + return m } -func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) []Metric { + mg.RegisterRead(func(ctx context.Context) []MetricV2 { if !globalIsDistErasure { - return []Metric{} + return []MetricV2{} } st := globalLockServer.stats() - metrics := make([]Metric, 0, 3) - metrics = append(metrics, Metric{ + metrics := make([]MetricV2, 0, 3) + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: "locks", @@ -2719,7 +2748,7 @@ func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroup { }, Value: float64(st.Total), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: "locks", @@ -2729,7 +2758,7 @@ func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroup { }, Value: float64(st.Writes), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: "locks", @@ -2744,17 +2773,17 @@ func getDistLockMetrics(opts MetricsGroupOpts) *MetricsGroup { return mg } -func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) []Metric { - metrics := make([]Metric, 0, 3) + mg.RegisterRead(func(ctx context.Context) []MetricV2 { + metrics := make([]MetricV2, 0, 3) if globalEventNotifier != nil { nstats := globalEventNotifier.targetList.Stats() - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2764,7 +2793,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { }, Value: float64(nstats.CurrentSendCalls), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2774,7 +2803,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { }, Value: float64(nstats.EventsSkipped), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2784,7 +2813,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { }, Value: float64(nstats.EventsErrorsTotal), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2795,7 +2824,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { Value: float64(nstats.TotalEvents), }) for id, st := range nstats.TargetStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2806,7 +2835,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": id.ID, "target_name": id.Name}, Value: float64(st.TotalEvents), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2817,7 +2846,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": id.ID, "target_name": id.Name}, Value: float64(st.FailedEvents), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2828,7 +2857,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": id.ID, "target_name": id.Name}, Value: float64(st.CurrentSendCalls), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: notifySubsystem, @@ -2844,7 +2873,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { lstats := globalLambdaTargetList.Stats() for _, st := range lstats.TargetStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: lambdaSubsystem, @@ -2854,7 +2883,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": st.ID.ID, "target_name": st.ID.Name}, Value: float64(st.ActiveRequests), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: lambdaSubsystem, @@ -2865,7 +2894,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": st.ID.ID, "target_name": st.ID.Name}, Value: float64(st.TotalRequests), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: lambdaSubsystem, @@ -2881,7 +2910,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { // Audit and system: audit := logger.CurrentStats() for id, st := range audit { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: auditSubsystem, @@ -2892,7 +2921,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": id}, Value: float64(st.QueueLength), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: auditSubsystem, @@ -2903,7 +2932,7 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { VariableLabels: map[string]string{"target_id": id}, Value: float64(st.TotalMessages), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: minioNamespace, Subsystem: auditSubsystem, @@ -2920,94 +2949,100 @@ func getNotificationMetrics(opts MetricsGroupOpts) *MetricsGroup { return mg } -func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { if !mg.metricsGroupOpts.bucketOnly { httpStats := globalHTTPStats.toServerHTTPStats(true) - metrics = make([]Metric, 0, 3+ + metrics = make([]MetricV2, 0, 3+ len(httpStats.CurrentS3Requests.APIStats)+ len(httpStats.TotalS3Requests.APIStats)+ len(httpStats.TotalS3Errors.APIStats)+ len(httpStats.TotalS35xxErrors.APIStats)+ len(httpStats.TotalS34xxErrors.APIStats)) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RejectedAuthRequestsTotalMD(), Value: float64(httpStats.TotalS3RejectedAuth), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RejectedTimestampRequestsTotalMD(), Value: float64(httpStats.TotalS3RejectedTime), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RejectedHeaderRequestsTotalMD(), Value: float64(httpStats.TotalS3RejectedHeader), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RejectedInvalidRequestsTotalMD(), Value: float64(httpStats.TotalS3RejectedInvalid), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RequestsInQueueMD(), Value: float64(httpStats.S3RequestsInQueue), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getIncomingS3RequestsMD(), Value: float64(httpStats.S3RequestsIncoming), }) for api, value := range httpStats.CurrentS3Requests.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RequestsInFlightMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } for api, value := range httpStats.TotalS3Requests.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RequestsTotalMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } for api, value := range httpStats.TotalS3Errors.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RequestsErrorsMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } for api, value := range httpStats.TotalS35xxErrors.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3Requests5xxErrorsMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } for api, value := range httpStats.TotalS34xxErrors.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3Requests4xxErrorsMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } for api, value := range httpStats.TotalS3Canceled.APIStats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3RequestsCanceledMD(), Value: float64(value), VariableLabels: map[string]string{"api": api}, }) } - return + return metrics } - for bucket, inOut := range globalBucketConnStats.getS3InOutBytes() { + // If we have too many, limit them + bConnStats := globalBucketConnStats.getS3InOutBytes() + buckets := mapKeysSorted(bConnStats) + buckets = buckets[:min(v2MetricsMaxBuckets, len(buckets))] + + for _, bucket := range buckets { + inOut := bConnStats[bucket] recvBytes := inOut.In if recvBytes > 0 { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketTrafficReceivedBytes(), Value: float64(recvBytes), VariableLabels: map[string]string{"bucket": bucket}, @@ -3015,7 +3050,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } sentBytes := inOut.Out if sentBytes > 0 { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketTrafficSentBytes(), Value: float64(sentBytes), VariableLabels: map[string]string{"bucket": bucket}, @@ -3024,7 +3059,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { httpStats := globalBucketHTTPStats.load(bucket) for k, v := range httpStats.currentS3Requests.Load(true) { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketS3RequestsInFlightMD(), Value: float64(v), VariableLabels: map[string]string{"bucket": bucket, "api": k}, @@ -3032,7 +3067,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } for k, v := range httpStats.totalS3Requests.Load(true) { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketS3RequestsTotalMD(), Value: float64(v), VariableLabels: map[string]string{"bucket": bucket, "api": k}, @@ -3040,7 +3075,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } for k, v := range httpStats.totalS3Canceled.Load(true) { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketS3RequestsCanceledMD(), Value: float64(v), VariableLabels: map[string]string{"bucket": bucket, "api": k}, @@ -3048,7 +3083,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } for k, v := range httpStats.totalS34xxErrors.Load(true) { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketS3Requests4xxErrorsMD(), Value: float64(v), VariableLabels: map[string]string{"bucket": bucket, "api": k}, @@ -3056,7 +3091,7 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } for k, v := range httpStats.totalS35xxErrors.Load(true) { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketS3Requests5xxErrorsMD(), Value: float64(v), VariableLabels: map[string]string{"bucket": bucket, "api": k}, @@ -3064,78 +3099,78 @@ func getHTTPMetrics(opts MetricsGroupOpts) *MetricsGroup { } } - return + return metrics }) return mg } -func getNetworkMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getNetworkMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { - metrics = make([]Metric, 0, 10) + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { + metrics = make([]MetricV2, 0, 10) connStats := globalConnStats.toServerConnStats() rpcStats := rest.GetRPCStats() if globalIsDistErasure { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getInternodeFailedRequests(), Value: float64(rpcStats.Errs), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getInternodeTCPDialTimeout(), Value: float64(rpcStats.DialErrs), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getInternodeTCPAvgDuration(), Value: float64(rpcStats.DialAvgDuration), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getInterNodeSentBytesMD(), Value: float64(connStats.internodeOutputBytes), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getInterNodeReceivedBytesMD(), Value: float64(connStats.internodeInputBytes), }) } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3SentBytesMD(), Value: float64(connStats.s3OutputBytes), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getS3ReceivedBytesMD(), Value: float64(connStats.s3InputBytes), }) - return + return metrics }) return mg } -func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() if objLayer == nil { - return + return metrics } - metrics = make([]Metric, 0, 50) + metrics = make([]MetricV2, 0, 50) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) - return + metricsLogIf(ctx, err) + return metrics } // data usage has not captured any data yet. if dataUsageInfo.LastUpdate.IsZero() { - return + return metrics } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getUsageLastScanActivityMD(), Value: float64(time.Since(dataUsageInfo.LastUpdate)), }) @@ -3176,106 +3211,111 @@ func getClusterUsageMetrics(opts MetricsGroupOpts) *MetricsGroup { } } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterUsageTotalBytesMD(), Value: float64(clusterSize), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterUsageObjectsTotalMD(), Value: float64(clusterObjectsCount), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterUsageVersionsTotalMD(), Value: float64(clusterVersionsCount), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterUsageDeleteMarkersTotalMD(), Value: float64(clusterDeleteMarkersCount), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterObjectDistributionMD(), Histogram: clusterObjectSizesHistogram, HistogramBucketLabel: "range", }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterObjectVersionsMD(), Histogram: clusterVersionsHistogram, HistogramBucketLabel: "range", }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterBucketsTotalMD(), Value: float64(clusterBuckets), }) - return + return metrics }) return mg } -func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() - metrics = make([]Metric, 0, 50) + metrics = make([]MetricV2, 0, 50) dataUsageInfo, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) - return + metricsLogIf(ctx, err) + return metrics } // data usage has not captured any data yet. if dataUsageInfo.LastUpdate.IsZero() { - return + return metrics } - metrics = append(metrics, Metric{ - Description: getUsageLastScanActivityMD(), + metrics = append(metrics, MetricV2{ + Description: getBucketUsageLastScanActivityMD(), Value: float64(time.Since(dataUsageInfo.LastUpdate)), }) var bucketReplStats map[string]BucketStats if !globalSiteReplicationSys.isEnabled() { - bucketReplStats = globalReplicationStats.getAllLatest(dataUsageInfo.BucketsUsage) + bucketReplStats = globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage) } - for bucket, usage := range dataUsageInfo.BucketsUsage { + buckets := mapKeysSorted(dataUsageInfo.BucketsUsage) + if len(buckets) > v2MetricsMaxBuckets { + buckets = buckets[:v2MetricsMaxBuckets] + } + for _, bucket := range buckets { + usage := dataUsageInfo.BucketsUsage[bucket] quota, _ := globalBucketQuotaSys.Get(ctx, bucket) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketUsageTotalBytesMD(), Value: float64(usage.Size), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketUsageObjectsTotalMD(), Value: float64(usage.ObjectsCount), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketUsageVersionsTotalMD(), Value: float64(usage.VersionsCount), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketUsageDeleteMarkersTotalMD(), Value: float64(usage.DeleteMarkersCount), VariableLabels: map[string]string{"bucket": bucket}, }) if quota != nil && quota.Quota > 0 { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketUsageQuotaTotalBytesMD(), Value: float64(quota.Quota), VariableLabels: map[string]string{"bucket": bucket}, @@ -3286,112 +3326,112 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroup { s, ok := bucketReplStats[bucket] if ok { stats = s.ReplicationStats - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepReceivedBytesMD(bucketMetricNamespace), Value: float64(stats.ReplicaSize), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepReceivedOperationsMD(bucketMetricNamespace), Value: float64(stats.ReplicaCount), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedGetOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.GetTotal), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedHeadOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.HeadTotal), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedPutTaggingOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.PutTagTotal), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedGetTaggingOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.GetTagTotal), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedRmvTaggingOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.RmvTagTotal), VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedGetFailedOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.GetFailedTotal), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedHeadFailedOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.HeadFailedTotal), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedPutTaggingFailedOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.PutTagFailedTotal), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedGetTaggingFailedOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.GetTagFailedTotal), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterReplProxiedRmvTaggingFailedOperationsMD(bucketMetricNamespace), Value: float64(s.ProxyStats.RmvTagFailedTotal), }) } if stats.hasReplicationUsage() { for arn, stat := range stats.Stats { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedBytesLastMinuteMD(bucketMetricNamespace), Value: float64(stat.Failed.LastMinute.Bytes), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedOperationsLastMinuteMD(bucketMetricNamespace), Value: stat.Failed.LastMinute.Count, VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedBytesLastHourMD(bucketMetricNamespace), Value: float64(stat.Failed.LastHour.Bytes), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedOperationsLastHourMD(bucketMetricNamespace), Value: stat.Failed.LastHour.Count, VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedBytesTotalMD(bucketMetricNamespace), Value: float64(stat.Failed.Totals.Bytes), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepFailedOperationsTotalMD(bucketMetricNamespace), Value: stat.Failed.Totals.Count, VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepSentBytesMD(bucketMetricNamespace), Value: float64(stat.ReplicatedSize), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getRepSentOperationsMD(bucketMetricNamespace), Value: float64(stat.ReplicatedCount), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketRepLatencyMD(), HistogramBucketLabel: "range", Histogram: stat.Latency.getUploadLatency(), VariableLabels: map[string]string{"bucket": bucket, "operation": "upload", "targetArn": arn}, }) if c, ok := stat.Failed.ErrCounts["AccessDenied"]; ok { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterRepCredentialErrorsMD(bucketMetricNamespace), Value: float64(c), VariableLabels: map[string]string{"bucket": bucket, "targetArn": arn}, @@ -3400,21 +3440,21 @@ func getBucketUsageMetrics(opts MetricsGroupOpts) *MetricsGroup { } } } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketObjectDistributionMD(), Histogram: usage.ObjectSizesHistogram, HistogramBucketLabel: "range", VariableLabels: map[string]string{"bucket": bucket}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getBucketObjectVersionsMD(), Histogram: usage.ObjectVersionsHistogram, HistogramBucketLabel: "range", VariableLabels: map[string]string{"bucket": bucket}, }) } - return + return metrics }) return mg } @@ -3449,26 +3489,26 @@ func getClusterTransitionedVersionsMD() MetricDescription { } } -func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() if globalTierConfigMgr.Empty() { - return + return metrics } dui, err := loadDataUsageFromBackend(ctx, objLayer) if err != nil { - logger.LogIf(ctx, err) - return + metricsLogIf(ctx, err) + return metrics } // data usage has not captured any tier stats yet. if dui.TierStats == nil { - return + return metrics } return dui.tierMetrics() @@ -3476,65 +3516,71 @@ func getClusterTierMetrics(opts MetricsGroupOpts) *MetricsGroup { return mg } -func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() - metrics = make([]Metric, 0, 50) + metrics = make([]MetricV2, 0, 50) storageInfo := objLayer.LocalStorageInfo(ctx, true) onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks) totalDrives := onlineDrives.Merge(offlineDrives) for _, disk := range storageInfo.Disks { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveUsedBytesMD(), Value: float64(disk.UsedSpace), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveFreeBytesMD(), Value: float64(disk.AvailableSpace), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveTotalBytesMD(), Value: float64(disk.TotalSpace), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDrivesFreeInodesMD(), Value: float64(disk.FreeInodes), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) if disk.Metrics != nil { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveTimeoutErrorsMD(), Value: float64(disk.Metrics.TotalErrorsTimeout), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ + Description: getNodeDriveIOErrorsMD(), + Value: float64(disk.Metrics.TotalErrorsAvailability - disk.Metrics.TotalErrorsTimeout), + VariableLabels: map[string]string{"drive": disk.DrivePath}, + }) + + metrics = append(metrics, MetricV2{ Description: getNodeDriveAvailabilityErrorsMD(), Value: float64(disk.Metrics.TotalErrorsAvailability), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveWaitingIOMD(), Value: float64(disk.Metrics.TotalWaiting), VariableLabels: map[string]string{"drive": disk.DrivePath}, }) for apiName, latency := range disk.Metrics.LastMinute { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDriveAPILatencyMD(), Value: float64(latency.Avg().Microseconds()), VariableLabels: map[string]string{"drive": disk.DrivePath, "api": "storage." + apiName}, @@ -3543,32 +3589,32 @@ func getLocalStorageMetrics(opts MetricsGroupOpts) *MetricsGroup { } } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDrivesOfflineTotalMD(), Value: float64(offlineDrives.Sum()), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDrivesOnlineTotalMD(), Value: float64(onlineDrives.Sum()), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeDrivesTotalMD(), Value: float64(totalDrives.Sum()), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeStandardParityMD(), Value: float64(storageInfo.Backend.StandardSCParity), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getNodeRRSParityMD(), Value: float64(storageInfo.Backend.RRSCParity), }) - return + return metrics }) return mg } @@ -3643,20 +3689,20 @@ func getClusterErasureSetHealingDrivesMD() MetricDescription { } } -func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() opts := HealthOptions{} result := objLayer.Health(ctx, opts) - metrics = make([]Metric, 0, 2+4*len(result.ESHealth)) + metrics = make([]MetricV2, 0, 2+4*len(result.ESHealth)) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterWriteQuorumMD(), Value: float64(result.WriteQuorum), }) @@ -3666,7 +3712,7 @@ func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroup { health = 0 } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterHealthStatusMD(), Value: float64(health), }) @@ -3676,22 +3722,22 @@ func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroup { "pool": strconv.Itoa(h.PoolID), "set": strconv.Itoa(h.SetID), } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterErasureSetReadQuorumMD(), VariableLabels: labels, Value: float64(h.ReadQuorum), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterErasureSetWriteQuorumMD(), VariableLabels: labels, Value: float64(h.WriteQuorum), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterErasureSetOnlineDrivesMD(), VariableLabels: labels, Value: float64(h.HealthyDrives), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterErasureSetHealingDrivesMD(), VariableLabels: labels, Value: float64(h.HealingDrives), @@ -3702,26 +3748,26 @@ func getClusterHealthMetrics(opts MetricsGroupOpts) *MetricsGroup { health = 0 } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterErasureSetHealthStatusMD(), VariableLabels: labels, Value: float64(health), }) } - return + return metrics }) return mg } -func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { var m madmin.RealtimeMetrics mLocal := collectLocalMetrics(madmin.MetricsBatchJobs, collectMetricsOpts{}) m.Merge(&mLocal) @@ -3730,7 +3776,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroup { m.Merge(&mRemote) if m.Aggregated.BatchJobs == nil { - return + return metrics } for _, mj := range m.Aggregated.BatchJobs.Jobs { @@ -3752,7 +3798,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroup { bucket = mj.Expired.Bucket } metrics = append(metrics, - Metric{ + MetricV2{ Description: MetricDescription{ Namespace: bucketMetricNamespace, Subsystem: "batch", @@ -3763,7 +3809,7 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroup { Value: objects, VariableLabels: map[string]string{"bucket": bucket, "jobId": mj.JobID}, }, - Metric{ + MetricV2{ Description: MetricDescription{ Namespace: bucketMetricNamespace, Subsystem: "batch", @@ -3776,71 +3822,71 @@ func getBatchJobsMetrics(opts MetricsGroupOpts) *MetricsGroup { }, ) } - return + return metrics }) return mg } -func getClusterStorageMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getClusterStorageMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 1 * time.Minute, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { objLayer := newObjectLayerFn() // Fetch disk space info, ignore errors - metrics = make([]Metric, 0, 10) + metrics = make([]MetricV2, 0, 10) storageInfo := objLayer.StorageInfo(ctx, true) onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks) totalDrives := onlineDrives.Merge(offlineDrives) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterCapacityTotalBytesMD(), Value: float64(GetTotalCapacity(storageInfo.Disks)), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterCapacityFreeBytesMD(), Value: float64(GetTotalCapacityFree(storageInfo.Disks)), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterCapacityUsageBytesMD(), Value: float64(GetTotalUsableCapacity(storageInfo.Disks, storageInfo)), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterCapacityUsageFreeBytesMD(), Value: float64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo)), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterDrivesOfflineTotalMD(), Value: float64(offlineDrives.Sum()), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterDrivesOnlineTotalMD(), Value: float64(onlineDrives.Sum()), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: getClusterDrivesTotalMD(), Value: float64(totalDrives.Sum()), }) - return + return metrics }) return mg } -func getKMSNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getKMSNodeMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { + mg.RegisterRead(func(ctx context.Context) (metrics []MetricV2) { const ( Online = 1 Offline = 0 @@ -3854,12 +3900,12 @@ func getKMSNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { } _, err := GlobalKMS.Metrics(ctx) if _, ok := kes.IsConnError(err); ok { - return []Metric{{ + return []MetricV2{{ Description: desc, Value: float64(Offline), }} } - return []Metric{{ + return []MetricV2{{ Description: desc, Value: float64(Online), }} @@ -3867,13 +3913,13 @@ func getKMSNodeMetrics(opts MetricsGroupOpts) *MetricsGroup { return mg } -func getWebhookMetrics() *MetricsGroup { - mg := &MetricsGroup{ +func getWebhookMetrics() *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, } - mg.RegisterRead(func(ctx context.Context) []Metric { + mg.RegisterRead(func(ctx context.Context) []MetricV2 { tgts := append(logger.SystemTargets(), logger.AuditTargets()...) - metrics := make([]Metric, 0, len(tgts)*4) + metrics := make([]MetricV2, 0, len(tgts)*4) for _, t := range tgts { isOnline := 0 if t.IsOnline(ctx) { @@ -3883,7 +3929,7 @@ func getWebhookMetrics() *MetricsGroup { "name": t.String(), "endpoint": t.Endpoint(), } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: webhookSubsystem, @@ -3894,18 +3940,18 @@ func getWebhookMetrics() *MetricsGroup { VariableLabels: labels, Value: float64(isOnline), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: webhookSubsystem, Name: webhookQueueLength, Help: "Webhook queue length", - Type: counterMetric, + Type: gaugeMetric, }, VariableLabels: labels, Value: float64(t.Stats().QueueLength), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: webhookSubsystem, @@ -3916,7 +3962,7 @@ func getWebhookMetrics() *MetricsGroup { VariableLabels: labels, Value: float64(t.Stats().TotalMessages), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: webhookSubsystem, @@ -3934,19 +3980,19 @@ func getWebhookMetrics() *MetricsGroup { return mg } -func getKMSMetrics(opts MetricsGroupOpts) *MetricsGroup { - mg := &MetricsGroup{ +func getKMSMetrics(opts MetricsGroupOpts) *MetricsGroupV2 { + mg := &MetricsGroupV2{ cacheInterval: 10 * time.Second, metricsGroupOpts: opts, } - mg.RegisterRead(func(ctx context.Context) []Metric { - metrics := make([]Metric, 0, 4) + mg.RegisterRead(func(ctx context.Context) []MetricV2 { + metrics := make([]MetricV2, 0, 4) metric, err := GlobalKMS.Metrics(ctx) if err != nil { return metrics } - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: kmsSubsystem, @@ -3954,9 +4000,9 @@ func getKMSMetrics(opts MetricsGroupOpts) *MetricsGroup { Help: "Number of KMS requests that succeeded", Type: counterMetric, }, - Value: float64(metric.RequestOK), + Value: float64(metric.ReqOK), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: kmsSubsystem, @@ -3964,9 +4010,9 @@ func getKMSMetrics(opts MetricsGroupOpts) *MetricsGroup { Help: "Number of KMS requests that failed due to some error. (HTTP 4xx status code)", Type: counterMetric, }, - Value: float64(metric.RequestErr), + Value: float64(metric.ReqErr), }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: MetricDescription{ Namespace: clusterMetricNamespace, Subsystem: kmsSubsystem, @@ -3974,25 +4020,14 @@ func getKMSMetrics(opts MetricsGroupOpts) *MetricsGroup { Help: "Number of KMS requests that failed due to some internal failure. (HTTP 5xx status code)", Type: counterMetric, }, - Value: float64(metric.RequestFail), + Value: float64(metric.ReqFail), }) - metrics = append(metrics, Metric{ - Description: MetricDescription{ - Namespace: clusterMetricNamespace, - Subsystem: kmsSubsystem, - Name: kmsUptime, - Help: "The time the KMS has been up and running in seconds.", - Type: counterMetric, - }, - Value: metric.UpTime.Seconds(), - }) - return metrics }) return mg } -func collectMetric(metric Metric, labels []string, values []string, metricName string, out chan<- prometheus.Metric) { +func collectMetric(metric MetricV2, labels []string, values []string, metricName string, out chan<- prometheus.Metric) { if metric.Description.Type == histogramMetric { if metric.Histogram == nil { return @@ -4013,7 +4048,7 @@ func collectMetric(metric Metric, labels []string, values []string, metricName s if err != nil { // Enable for debugging if serverDebugLog { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram), metricName+"-metrics-histogram") + bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v+%v", err, values, metric.Histogram)) } } else { out <- pmetric @@ -4040,7 +4075,7 @@ func collectMetric(metric Metric, labels []string, values []string, metricName s if err != nil { // Enable for debugging if serverDebugLog { - logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values), metricName+"-metrics") + bugLogIf(GlobalContext, fmt.Errorf("unable to validate prometheus metric (%w) %v", err, values)) } } else { out <- pmetric @@ -4049,11 +4084,11 @@ func collectMetric(metric Metric, labels []string, values []string, metricName s //msgp:ignore minioBucketCollector type minioBucketCollector struct { - metricsGroups []*MetricsGroup + metricsGroups []*MetricsGroupV2 desc *prometheus.Desc } -func newMinioBucketCollector(metricsGroups []*MetricsGroup) *minioBucketCollector { +func newMinioBucketCollector(metricsGroups []*MetricsGroupV2) *minioBucketCollector { return &minioBucketCollector{ metricsGroups: metricsGroups, desc: prometheus.NewDesc("minio_bucket_stats", "Statistics exposed by MinIO server cluster wide per bucket", nil, nil), @@ -4068,7 +4103,7 @@ func (c *minioBucketCollector) Describe(ch chan<- *prometheus.Desc) { // Collect is called by the Prometheus registry when collecting metrics. func (c *minioBucketCollector) Collect(out chan<- prometheus.Metric) { var wg sync.WaitGroup - publish := func(in <-chan Metric) { + publish := func(in <-chan MetricV2) { defer wg.Done() for metric := range in { labels, values := getOrderedLabelValueArrays(metric.VariableLabels) @@ -4085,11 +4120,11 @@ func (c *minioBucketCollector) Collect(out chan<- prometheus.Metric) { //msgp:ignore minioClusterCollector type minioClusterCollector struct { - metricsGroups []*MetricsGroup + metricsGroups []*MetricsGroupV2 desc *prometheus.Desc } -func newMinioClusterCollector(metricsGroups []*MetricsGroup) *minioClusterCollector { +func newMinioClusterCollector(metricsGroups []*MetricsGroupV2) *minioClusterCollector { return &minioClusterCollector{ metricsGroups: metricsGroups, desc: prometheus.NewDesc("minio_stats", "Statistics exposed by MinIO server per cluster", nil, nil), @@ -4104,7 +4139,7 @@ func (c *minioClusterCollector) Describe(ch chan<- *prometheus.Desc) { // Collect is called by the Prometheus registry when collecting metrics. func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) { var wg sync.WaitGroup - publish := func(in <-chan Metric) { + publish := func(in <-chan MetricV2) { defer wg.Done() for metric := range in { labels, values := getOrderedLabelValueArrays(metric.VariableLabels) @@ -4120,11 +4155,11 @@ func (c *minioClusterCollector) Collect(out chan<- prometheus.Metric) { } // ReportMetrics reports serialized metrics to the channel passed for the metrics generated. -func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroup) <-chan Metric { - ch := make(chan Metric) +func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroupV2) <-chan MetricV2 { + ch := make(chan MetricV2) go func() { defer xioutil.SafeClose(ch) - populateAndPublish(metricsGroups, func(m Metric) bool { + populateAndPublish(metricsGroups, func(m MetricV2) bool { if m.VariableLabels == nil { m.VariableLabels = make(map[string]string) } @@ -4146,7 +4181,7 @@ func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroup) <-chan Me // //msgp:ignore minioNodeCollector type minioNodeCollector struct { - metricsGroups []*MetricsGroup + metricsGroups []*MetricsGroupV2 desc *prometheus.Desc } @@ -4156,7 +4191,7 @@ func (c *minioNodeCollector) Describe(ch chan<- *prometheus.Desc) { } // populateAndPublish populates and then publishes the metrics generated by the generator function. -func populateAndPublish(metricsGroups []*MetricsGroup, publish func(m Metric) bool) { +func populateAndPublish(metricsGroups []*MetricsGroupV2, publish func(m MetricV2) bool) { for _, mg := range metricsGroups { if mg == nil { continue @@ -4174,7 +4209,7 @@ func (c *minioNodeCollector) Collect(ch chan<- prometheus.Metric) { // Expose MinIO's version information minioVersionInfo.WithLabelValues(Version, CommitID).Set(1.0) - populateAndPublish(c.metricsGroups, func(metric Metric) bool { + populateAndPublish(c.metricsGroups, func(metric MetricV2) bool { labels, values := getOrderedLabelValueArrays(metric.VariableLabels) values = append(values, globalLocalNodeName) labels = append(labels, serverName) @@ -4229,14 +4264,14 @@ func getOrderedLabelValueArrays(labelsWithValue map[string]string) (labels, valu labels = append(labels, l) values = append(values, v) } - return + return labels, values } // newMinioCollectorNode describes the collector // and returns reference of minioCollector for version 2 // It creates the Prometheus Description which is used // to define Metric and help string -func newMinioCollectorNode(metricsGroups []*MetricsGroup) *minioNodeCollector { +func newMinioCollectorNode(metricsGroups []*MetricsGroupV2) *minioNodeCollector { return &minioNodeCollector{ metricsGroups: metricsGroups, desc: prometheus.NewDesc("minio_stats", "Statistics exposed by MinIO server per node", nil, nil), @@ -4366,7 +4401,7 @@ func metricsNodeHandler() http.Handler { enc := expfmt.NewEncoder(w, contentType) for _, mf := range mfs { if err := enc.Encode(mf); err != nil { - logger.LogIf(r.Context(), err) + metricsLogIf(r.Context(), err) return } } diff --git a/cmd/metrics-v2_gen.go b/cmd/metrics-v2_gen.go index 2d9f4abe53318..9b13c4b8a7df0 100644 --- a/cmd/metrics-v2_gen.go +++ b/cmd/metrics-v2_gen.go @@ -1,216 +1,11 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) -// MarshalMsg implements msgp.Marshaler -func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 6 - // string "Description" - o = append(o, 0x86, 0xab, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e) - o, err = z.Description.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Description") - return - } - // string "StaticLabels" - o = append(o, 0xac, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.StaticLabels))) - for za0001, za0002 := range z.StaticLabels { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) - } - // string "Value" - o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) - o = msgp.AppendFloat64(o, z.Value) - // string "VariableLabels" - o = append(o, 0xae, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.VariableLabels))) - for za0003, za0004 := range z.VariableLabels { - o = msgp.AppendString(o, za0003) - o = msgp.AppendString(o, za0004) - } - // string "HistogramBucketLabel" - o = append(o, 0xb4, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c) - o = msgp.AppendString(o, z.HistogramBucketLabel) - // string "Histogram" - o = append(o, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) - o = msgp.AppendMapHeader(o, uint32(len(z.Histogram))) - for za0005, za0006 := range z.Histogram { - o = msgp.AppendString(o, za0005) - o = msgp.AppendUint64(o, za0006) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Description": - bts, err = z.Description.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Description") - return - } - case "StaticLabels": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StaticLabels") - return - } - if z.StaticLabels == nil { - z.StaticLabels = make(map[string]string, zb0002) - } else if len(z.StaticLabels) > 0 { - for key := range z.StaticLabels { - delete(z.StaticLabels, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 string - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StaticLabels") - return - } - za0002, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StaticLabels", za0001) - return - } - z.StaticLabels[za0001] = za0002 - } - case "Value": - z.Value, bts, err = msgp.ReadFloat64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Value") - return - } - case "VariableLabels": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "VariableLabels") - return - } - if z.VariableLabels == nil { - z.VariableLabels = make(map[string]string, zb0003) - } else if len(z.VariableLabels) > 0 { - for key := range z.VariableLabels { - delete(z.VariableLabels, key) - } - } - for zb0003 > 0 { - var za0003 string - var za0004 string - zb0003-- - za0003, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "VariableLabels") - return - } - za0004, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "VariableLabels", za0003) - return - } - z.VariableLabels[za0003] = za0004 - } - case "HistogramBucketLabel": - z.HistogramBucketLabel, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "HistogramBucketLabel") - return - } - case "Histogram": - var zb0004 uint32 - zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - if z.Histogram == nil { - z.Histogram = make(map[string]uint64, zb0004) - } else if len(z.Histogram) > 0 { - for key := range z.Histogram { - delete(z.Histogram, key) - } - } - for zb0004 > 0 { - var za0005 string - var za0006 uint64 - zb0004-- - za0005, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Histogram") - return - } - za0006, bts, err = msgp.ReadUint64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Histogram", za0005) - return - } - z.Histogram[za0005] = za0006 - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Metric) Msgsize() (s int) { - s = 1 + 12 + z.Description.Msgsize() + 13 + msgp.MapHeaderSize - if z.StaticLabels != nil { - for za0001, za0002 := range z.StaticLabels { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) - } - } - s += 6 + msgp.Float64Size + 15 + msgp.MapHeaderSize - if z.VariableLabels != nil { - for za0003, za0004 := range z.VariableLabels { - _ = za0004 - s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004) - } - } - s += 21 + msgp.StringPrefixSize + len(z.HistogramBucketLabel) + 10 + msgp.MapHeaderSize - if z.Histogram != nil { - for za0005, za0006 := range z.Histogram { - _ = za0006 - s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size - } - } - return -} - // MarshalMsg implements msgp.Marshaler func (z *MetricDescription) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) @@ -295,7 +90,7 @@ func (z *MetricDescription) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Type") return } - z.Type = MetricType(zb0005) + z.Type = MetricTypeV2(zb0005) } default: bts, err = msgp.Skip(bts) @@ -400,14 +195,14 @@ func (z MetricSubsystem) Msgsize() (s int) { } // MarshalMsg implements msgp.Marshaler -func (z MetricType) MarshalMsg(b []byte) (o []byte, err error) { +func (z MetricTypeV2) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) o = msgp.AppendString(o, string(z)) return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *MetricType) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *MetricTypeV2) UnmarshalMsg(bts []byte) (o []byte, err error) { { var zb0001 string zb0001, bts, err = msgp.ReadStringBytes(bts) @@ -415,37 +210,61 @@ func (z *MetricType) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - (*z) = MetricType(zb0001) + (*z) = MetricTypeV2(zb0001) } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z MetricType) Msgsize() (s int) { +func (z MetricTypeV2) Msgsize() (s int) { s = msgp.StringPrefixSize + len(string(z)) return } // MarshalMsg implements msgp.Marshaler -func (z *MetricsGroup) MarshalMsg(b []byte) (o []byte, err error) { +func (z *MetricV2) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "cacheInterval" - o = append(o, 0x82, 0xad, 0x63, 0x61, 0x63, 0x68, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c) - o = msgp.AppendDuration(o, z.cacheInterval) - // string "metricsGroupOpts" - o = append(o, 0xb0, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4f, 0x70, 0x74, 0x73) - o, err = z.metricsGroupOpts.MarshalMsg(o) + // map header, size 6 + // string "Description" + o = append(o, 0x86, 0xab, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e) + o, err = z.Description.MarshalMsg(o) if err != nil { - err = msgp.WrapError(err, "metricsGroupOpts") + err = msgp.WrapError(err, "Description") return } + // string "StaticLabels" + o = append(o, 0xac, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.StaticLabels))) + for za0001, za0002 := range z.StaticLabels { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + // string "Value" + o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.Value) + // string "VariableLabels" + o = append(o, 0xae, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.VariableLabels))) + for za0003, za0004 := range z.VariableLabels { + o = msgp.AppendString(o, za0003) + o = msgp.AppendString(o, za0004) + } + // string "HistogramBucketLabel" + o = append(o, 0xb4, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c) + o = msgp.AppendString(o, z.HistogramBucketLabel) + // string "Histogram" + o = append(o, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d) + o = msgp.AppendMapHeader(o, uint32(len(z.Histogram))) + for za0005, za0006 := range z.Histogram { + o = msgp.AppendString(o, za0005) + o = msgp.AppendUint64(o, za0006) + } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *MetricsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *MetricV2) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -462,18 +281,108 @@ func (z *MetricsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "cacheInterval": - z.cacheInterval, bts, err = msgp.ReadDurationBytes(bts) + case "Description": + bts, err = z.Description.UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "cacheInterval") + err = msgp.WrapError(err, "Description") return } - case "metricsGroupOpts": - bts, err = z.metricsGroupOpts.UnmarshalMsg(bts) + case "StaticLabels": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "metricsGroupOpts") + err = msgp.WrapError(err, "StaticLabels") + return + } + if z.StaticLabels == nil { + z.StaticLabels = make(map[string]string, zb0002) + } else if len(z.StaticLabels) > 0 { + clear(z.StaticLabels) + } + for zb0002 > 0 { + var za0002 string + zb0002-- + var za0001 string + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StaticLabels") + return + } + za0002, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StaticLabels", za0001) + return + } + z.StaticLabels[za0001] = za0002 + } + case "Value": + z.Value, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Value") return } + case "VariableLabels": + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VariableLabels") + return + } + if z.VariableLabels == nil { + z.VariableLabels = make(map[string]string, zb0003) + } else if len(z.VariableLabels) > 0 { + clear(z.VariableLabels) + } + for zb0003 > 0 { + var za0004 string + zb0003-- + var za0003 string + za0003, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VariableLabels") + return + } + za0004, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VariableLabels", za0003) + return + } + z.VariableLabels[za0003] = za0004 + } + case "HistogramBucketLabel": + z.HistogramBucketLabel, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "HistogramBucketLabel") + return + } + case "Histogram": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + if z.Histogram == nil { + z.Histogram = make(map[string]uint64, zb0004) + } else if len(z.Histogram) > 0 { + clear(z.Histogram) + } + for zb0004 > 0 { + var za0006 uint64 + zb0004-- + var za0005 string + za0005, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Histogram") + return + } + za0006, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Histogram", za0005) + return + } + z.Histogram[za0005] = za0006 + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -487,8 +396,28 @@ func (z *MetricsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *MetricsGroup) Msgsize() (s int) { - s = 1 + 14 + msgp.DurationSize + 17 + z.metricsGroupOpts.Msgsize() +func (z *MetricV2) Msgsize() (s int) { + s = 1 + 12 + z.Description.Msgsize() + 13 + msgp.MapHeaderSize + if z.StaticLabels != nil { + for za0001, za0002 := range z.StaticLabels { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += 6 + msgp.Float64Size + 15 + msgp.MapHeaderSize + if z.VariableLabels != nil { + for za0003, za0004 := range z.VariableLabels { + _ = za0004 + s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004) + } + } + s += 21 + msgp.StringPrefixSize + len(z.HistogramBucketLabel) + 10 + msgp.MapHeaderSize + if z.Histogram != nil { + for za0005, za0006 := range z.Histogram { + _ = za0006 + s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size + } + } return } @@ -642,3 +571,68 @@ func (z *MetricsGroupOpts) Msgsize() (s int) { s = 1 + 22 + msgp.BoolSize + 24 + msgp.BoolSize + 31 + msgp.BoolSize + 28 + msgp.BoolSize + 16 + msgp.BoolSize + 11 + msgp.BoolSize + 29 + msgp.BoolSize + 19 + msgp.BoolSize + 23 + msgp.BoolSize + 26 + msgp.BoolSize + 32 + msgp.BoolSize + 22 + msgp.BoolSize return } + +// MarshalMsg implements msgp.Marshaler +func (z *MetricsGroupV2) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "cacheInterval" + o = append(o, 0x82, 0xad, 0x63, 0x61, 0x63, 0x68, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c) + o = msgp.AppendDuration(o, z.cacheInterval) + // string "metricsGroupOpts" + o = append(o, 0xb0, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4f, 0x70, 0x74, 0x73) + o, err = z.metricsGroupOpts.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "metricsGroupOpts") + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *MetricsGroupV2) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "cacheInterval": + z.cacheInterval, bts, err = msgp.ReadDurationBytes(bts) + if err != nil { + err = msgp.WrapError(err, "cacheInterval") + return + } + case "metricsGroupOpts": + bts, err = z.metricsGroupOpts.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "metricsGroupOpts") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *MetricsGroupV2) Msgsize() (s int) { + s = 1 + 14 + msgp.DurationSize + 17 + z.metricsGroupOpts.Msgsize() + return +} diff --git a/cmd/metrics-v2_gen_test.go b/cmd/metrics-v2_gen_test.go index c55137cc2f587..026c16d768b9d 100644 --- a/cmd/metrics-v2_gen_test.go +++ b/cmd/metrics-v2_gen_test.go @@ -1,15 +1,15 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "testing" "github.com/tinylib/msgp/msgp" ) -func TestMarshalUnmarshalMetric(t *testing.T) { - v := Metric{} +func TestMarshalUnmarshalMetricDescription(t *testing.T) { + v := MetricDescription{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -31,8 +31,8 @@ func TestMarshalUnmarshalMetric(t *testing.T) { } } -func BenchmarkMarshalMsgMetric(b *testing.B) { - v := Metric{} +func BenchmarkMarshalMsgMetricDescription(b *testing.B) { + v := MetricDescription{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -40,8 +40,8 @@ func BenchmarkMarshalMsgMetric(b *testing.B) { } } -func BenchmarkAppendMsgMetric(b *testing.B) { - v := Metric{} +func BenchmarkAppendMsgMetricDescription(b *testing.B) { + v := MetricDescription{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -52,8 +52,8 @@ func BenchmarkAppendMsgMetric(b *testing.B) { } } -func BenchmarkUnmarshalMetric(b *testing.B) { - v := Metric{} +func BenchmarkUnmarshalMetricDescription(b *testing.B) { + v := MetricDescription{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -66,8 +66,8 @@ func BenchmarkUnmarshalMetric(b *testing.B) { } } -func TestMarshalUnmarshalMetricDescription(t *testing.T) { - v := MetricDescription{} +func TestMarshalUnmarshalMetricV2(t *testing.T) { + v := MetricV2{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -89,8 +89,8 @@ func TestMarshalUnmarshalMetricDescription(t *testing.T) { } } -func BenchmarkMarshalMsgMetricDescription(b *testing.B) { - v := MetricDescription{} +func BenchmarkMarshalMsgMetricV2(b *testing.B) { + v := MetricV2{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -98,8 +98,8 @@ func BenchmarkMarshalMsgMetricDescription(b *testing.B) { } } -func BenchmarkAppendMsgMetricDescription(b *testing.B) { - v := MetricDescription{} +func BenchmarkAppendMsgMetricV2(b *testing.B) { + v := MetricV2{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -110,8 +110,8 @@ func BenchmarkAppendMsgMetricDescription(b *testing.B) { } } -func BenchmarkUnmarshalMetricDescription(b *testing.B) { - v := MetricDescription{} +func BenchmarkUnmarshalMetricV2(b *testing.B) { + v := MetricV2{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -124,8 +124,8 @@ func BenchmarkUnmarshalMetricDescription(b *testing.B) { } } -func TestMarshalUnmarshalMetricsGroup(t *testing.T) { - v := MetricsGroup{} +func TestMarshalUnmarshalMetricsGroupOpts(t *testing.T) { + v := MetricsGroupOpts{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -147,8 +147,8 @@ func TestMarshalUnmarshalMetricsGroup(t *testing.T) { } } -func BenchmarkMarshalMsgMetricsGroup(b *testing.B) { - v := MetricsGroup{} +func BenchmarkMarshalMsgMetricsGroupOpts(b *testing.B) { + v := MetricsGroupOpts{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -156,8 +156,8 @@ func BenchmarkMarshalMsgMetricsGroup(b *testing.B) { } } -func BenchmarkAppendMsgMetricsGroup(b *testing.B) { - v := MetricsGroup{} +func BenchmarkAppendMsgMetricsGroupOpts(b *testing.B) { + v := MetricsGroupOpts{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -168,8 +168,8 @@ func BenchmarkAppendMsgMetricsGroup(b *testing.B) { } } -func BenchmarkUnmarshalMetricsGroup(b *testing.B) { - v := MetricsGroup{} +func BenchmarkUnmarshalMetricsGroupOpts(b *testing.B) { + v := MetricsGroupOpts{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -182,8 +182,8 @@ func BenchmarkUnmarshalMetricsGroup(b *testing.B) { } } -func TestMarshalUnmarshalMetricsGroupOpts(t *testing.T) { - v := MetricsGroupOpts{} +func TestMarshalUnmarshalMetricsGroupV2(t *testing.T) { + v := MetricsGroupV2{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -205,8 +205,8 @@ func TestMarshalUnmarshalMetricsGroupOpts(t *testing.T) { } } -func BenchmarkMarshalMsgMetricsGroupOpts(b *testing.B) { - v := MetricsGroupOpts{} +func BenchmarkMarshalMsgMetricsGroupV2(b *testing.B) { + v := MetricsGroupV2{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -214,8 +214,8 @@ func BenchmarkMarshalMsgMetricsGroupOpts(b *testing.B) { } } -func BenchmarkAppendMsgMetricsGroupOpts(b *testing.B) { - v := MetricsGroupOpts{} +func BenchmarkAppendMsgMetricsGroupV2(b *testing.B) { + v := MetricsGroupV2{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -226,8 +226,8 @@ func BenchmarkAppendMsgMetricsGroupOpts(b *testing.B) { } } -func BenchmarkUnmarshalMetricsGroupOpts(b *testing.B) { - v := MetricsGroupOpts{} +func BenchmarkUnmarshalMetricsGroupV2(b *testing.B) { + v := MetricsGroupV2{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) diff --git a/cmd/metrics-v2_test.go b/cmd/metrics-v2_test.go index 03f01e85f6b31..a3994fd082213 100644 --- a/cmd/metrics-v2_test.go +++ b/cmd/metrics-v2_test.go @@ -18,13 +18,15 @@ package cmd import ( + "slices" + "strings" "testing" "time" "github.com/prometheus/client_golang/prometheus" ) -func TestGetHistogramMetrics(t *testing.T) { +func TestGetHistogramMetrics_BucketCount(t *testing.T) { histBuckets := []float64{0.05, 0.1, 0.25, 0.5, 0.75} labels := []string{"GetObject", "PutObject", "CopyObject", "CompleteMultipartUpload"} ttfbHist := prometheus.NewHistogramVec( @@ -74,15 +76,140 @@ func TestGetHistogramMetrics(t *testing.T) { // Send observations once every 1ms, to simulate delay between // observations. This is to test the channel based // synchronization used internally. - select { - case <-ticker.C: - ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) - } + <-ticker.C + ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) + } + + metrics := getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), false, false) + // additional labels for +Inf for all histogram metrics + if expPoints := len(labels) * (len(histBuckets) + 1); expPoints != len(metrics) { + t.Fatalf("Expected %v data points when toLowerAPILabels=false but got %v", expPoints, len(metrics)) } - metrics := getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), false) + metrics = getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), true, false) // additional labels for +Inf for all histogram metrics if expPoints := len(labels) * (len(histBuckets) + 1); expPoints != len(metrics) { - t.Fatalf("Expected %v data points but got %v", expPoints, len(metrics)) + t.Fatalf("Expected %v data points when toLowerAPILabels=true but got %v", expPoints, len(metrics)) + } +} + +func TestGetHistogramMetrics_Values(t *testing.T) { + histBuckets := []float64{0.50, 5.00} + labels := []string{"PutObject", "CopyObject"} + ttfbHist := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "s3_ttfb_seconds", + Help: "Time taken by requests served by current MinIO server instance", + Buckets: histBuckets, + }, + []string{"api"}, + ) + observations := []struct { + val float64 + label string + }{ + { + val: 0.02, + label: labels[0], + }, + { + val: 0.19, + label: labels[1], + }, + { + val: 0.31, + label: labels[1], + }, + { + val: 0.61, + label: labels[0], + }, + { + val: 6.79, + label: labels[1], + }, + } + ticker := time.NewTicker(1 * time.Millisecond) + defer ticker.Stop() + for _, obs := range observations { + // Send observations once every 1ms, to simulate delay between + // observations. This is to test the channel based + // synchronization used internally. + <-ticker.C + ttfbHist.With(prometheus.Labels{"api": obs.label}).Observe(obs.val) + } + + // Accumulate regular-cased API label metrics for 'PutObject' for deeper verification + metrics := getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), false, false) + capitalPutObjects := make([]MetricV2, 0, len(histBuckets)+1) + for _, metric := range metrics { + if value := metric.VariableLabels["api"]; value == "PutObject" { + capitalPutObjects = append(capitalPutObjects, metric) + } + } + if expMetricsPerAPI := len(histBuckets) + 1; expMetricsPerAPI != len(capitalPutObjects) { + t.Fatalf("Expected %d api=PutObject metrics but got %d", expMetricsPerAPI, len(capitalPutObjects)) + } + + // Deterministic ordering + slices.SortFunc(capitalPutObjects, func(a MetricV2, b MetricV2) int { + le1 := a.VariableLabels["le"] + le2 := a.VariableLabels["le"] + return strings.Compare(le1, le2) + }) + if le := capitalPutObjects[0].VariableLabels["le"]; le != "0.500" { + t.Errorf("Expected le='0.050' api=PutObject metrics but got '%v'", le) + } + if value := capitalPutObjects[0].Value; value != 1 { + t.Errorf("Expected le='0.050' api=PutObject value to be 1 but got '%v'", value) + } + if le := capitalPutObjects[1].VariableLabels["le"]; le != "5.000" { + t.Errorf("Expected le='5.000' api=PutObject metrics but got '%v'", le) + } + if value := capitalPutObjects[1].Value; value != 2 { + t.Errorf("Expected le='5.000' api=PutObject value to be 2 but got '%v'", value) + } + if le := capitalPutObjects[2].VariableLabels["le"]; le != "+Inf" { + t.Errorf("Expected le='+Inf' api=PutObject metrics but got '%v'", le) + } + if value := capitalPutObjects[2].Value; value != 2 { + t.Errorf("Expected le='+Inf' api=PutObject value to be 2 but got '%v'", value) + } + + // Accumulate lower-cased API label metrics for 'copyobject' for deeper verification + metrics = getHistogramMetrics(ttfbHist, getBucketTTFBDistributionMD(), true, false) + lowerCopyObjects := make([]MetricV2, 0, len(histBuckets)+1) + for _, metric := range metrics { + if value := metric.VariableLabels["api"]; value == "copyobject" { + lowerCopyObjects = append(lowerCopyObjects, metric) + } + } + if expMetricsPerAPI := len(histBuckets) + 1; expMetricsPerAPI != len(lowerCopyObjects) { + t.Fatalf("Expected %d api=copyobject metrics but got %d", expMetricsPerAPI, len(lowerCopyObjects)) + } + + // Deterministic ordering + slices.SortFunc(lowerCopyObjects, func(a MetricV2, b MetricV2) int { + le1 := a.VariableLabels["le"] + le2 := a.VariableLabels["le"] + return strings.Compare(le1, le2) + }) + if le := lowerCopyObjects[0].VariableLabels["le"]; le != "0.500" { + t.Errorf("Expected le='0.050' api=copyobject metrics but got '%v'", le) + } + if value := lowerCopyObjects[0].Value; value != 2 { + t.Errorf("Expected le='0.050' api=copyobject value to be 2 but got '%v'", value) + } + if le := lowerCopyObjects[1].VariableLabels["le"]; le != "5.000" { + t.Errorf("Expected le='5.000' api=copyobject metrics but got '%v'", le) + } + if value := lowerCopyObjects[1].Value; value != 2 { + t.Errorf("Expected le='5.000' api=copyobject value to be 2 but got '%v'", value) + } + if le := lowerCopyObjects[2].VariableLabels["le"]; le != "+Inf" { + t.Errorf("Expected le='+Inf' api=copyobject metrics but got '%v'", le) + } + if value := lowerCopyObjects[2].Value; value != 3 { + t.Errorf("Expected le='+Inf' api=copyobject value to be 3 but got '%v'", value) } } diff --git a/cmd/metrics-v3-api.go b/cmd/metrics-v3-api.go new file mode 100644 index 0000000000000..07265f037447c --- /dev/null +++ b/cmd/metrics-v3-api.go @@ -0,0 +1,224 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + + "github.com/minio/minio-go/v7/pkg/set" +) + +const ( + apiRejectedAuthTotal MetricName = "rejected_auth_total" + apiRejectedHeaderTotal MetricName = "rejected_header_total" + apiRejectedTimestampTotal MetricName = "rejected_timestamp_total" + apiRejectedInvalidTotal MetricName = "rejected_invalid_total" + + apiRequestsWaitingTotal MetricName = "waiting_total" + apiRequestsIncomingTotal MetricName = "incoming_total" + + apiRequestsInFlightTotal MetricName = "inflight_total" + apiRequestsTotal MetricName = "total" + apiRequestsErrorsTotal MetricName = "errors_total" + apiRequests5xxErrorsTotal MetricName = "5xx_errors_total" + apiRequests4xxErrorsTotal MetricName = "4xx_errors_total" + apiRequestsCanceledTotal MetricName = "canceled_total" + + apiRequestsTTFBSecondsDistribution MetricName = "ttfb_seconds_distribution" + + apiTrafficSentBytes MetricName = "traffic_sent_bytes" + apiTrafficRecvBytes MetricName = "traffic_received_bytes" +) + +var ( + apiRejectedAuthTotalMD = NewCounterMD(apiRejectedAuthTotal, + "Total number of requests rejected for auth failure", "type") + apiRejectedHeaderTotalMD = NewCounterMD(apiRejectedHeaderTotal, + "Total number of requests rejected for invalid header", "type") + apiRejectedTimestampTotalMD = NewCounterMD(apiRejectedTimestampTotal, + "Total number of requests rejected for invalid timestamp", "type") + apiRejectedInvalidTotalMD = NewCounterMD(apiRejectedInvalidTotal, + "Total number of invalid requests", "type") + + apiRequestsWaitingTotalMD = NewGaugeMD(apiRequestsWaitingTotal, + "Total number of requests in the waiting queue", "type") + apiRequestsIncomingTotalMD = NewGaugeMD(apiRequestsIncomingTotal, + "Total number of incoming requests", "type") + + apiRequestsInFlightTotalMD = NewGaugeMD(apiRequestsInFlightTotal, + "Total number of requests currently in flight", "name", "type") + apiRequestsTotalMD = NewCounterMD(apiRequestsTotal, + "Total number of requests", "name", "type") + apiRequestsErrorsTotalMD = NewCounterMD(apiRequestsErrorsTotal, + "Total number of requests with (4xx and 5xx) errors", "name", "type") + apiRequests5xxErrorsTotalMD = NewCounterMD(apiRequests5xxErrorsTotal, + "Total number of requests with 5xx errors", "name", "type") + apiRequests4xxErrorsTotalMD = NewCounterMD(apiRequests4xxErrorsTotal, + "Total number of requests with 4xx errors", "name", "type") + apiRequestsCanceledTotalMD = NewCounterMD(apiRequestsCanceledTotal, + "Total number of requests canceled by the client", "name", "type") + + apiRequestsTTFBSecondsDistributionMD = NewCounterMD(apiRequestsTTFBSecondsDistribution, + "Distribution of time to first byte across API calls", "name", "type", "le") + + apiTrafficSentBytesMD = NewCounterMD(apiTrafficSentBytes, + "Total number of bytes sent", "type") + apiTrafficRecvBytesMD = NewCounterMD(apiTrafficRecvBytes, + "Total number of bytes received", "type") +) + +// loadAPIRequestsHTTPMetrics - reads S3 HTTP metrics. +// +// This is a `MetricsLoaderFn`. +// +// This includes node level S3 HTTP metrics. +// +// This function currently ignores `opts`. +func loadAPIRequestsHTTPMetrics(ctx context.Context, m MetricValues, _ *metricsCache) error { + // Collect node level S3 HTTP metrics. + httpStats := globalHTTPStats.toServerHTTPStats(false) + + // Currently we only collect S3 API related stats, so we set the "type" + // label to "s3". + + m.Set(apiRejectedAuthTotal, float64(httpStats.TotalS3RejectedAuth), "type", "s3") + m.Set(apiRejectedTimestampTotal, float64(httpStats.TotalS3RejectedTime), "type", "s3") + m.Set(apiRejectedHeaderTotal, float64(httpStats.TotalS3RejectedHeader), "type", "s3") + m.Set(apiRejectedInvalidTotal, float64(httpStats.TotalS3RejectedInvalid), "type", "s3") + m.Set(apiRequestsWaitingTotal, float64(httpStats.S3RequestsInQueue), "type", "s3") + m.Set(apiRequestsIncomingTotal, float64(httpStats.S3RequestsIncoming), "type", "s3") + + for name, value := range httpStats.CurrentS3Requests.APIStats { + m.Set(apiRequestsInFlightTotal, float64(value), "name", name, "type", "s3") + } + for name, value := range httpStats.TotalS3Requests.APIStats { + m.Set(apiRequestsTotal, float64(value), "name", name, "type", "s3") + } + for name, value := range httpStats.TotalS3Errors.APIStats { + m.Set(apiRequestsErrorsTotal, float64(value), "name", name, "type", "s3") + } + for name, value := range httpStats.TotalS35xxErrors.APIStats { + m.Set(apiRequests5xxErrorsTotal, float64(value), "name", name, "type", "s3") + } + for name, value := range httpStats.TotalS34xxErrors.APIStats { + m.Set(apiRequests4xxErrorsTotal, float64(value), "name", name, "type", "s3") + } + for name, value := range httpStats.TotalS3Canceled.APIStats { + m.Set(apiRequestsCanceledTotal, float64(value), "name", name, "type", "s3") + } + return nil +} + +// loadAPIRequestsTTFBMetrics - loads S3 TTFB metrics. +// +// This is a `MetricsLoaderFn`. +func loadAPIRequestsTTFBMetrics(ctx context.Context, m MetricValues, _ *metricsCache) error { + renameLabels := map[string]string{"api": "name"} + labelsFilter := map[string]set.StringSet{} + m.SetHistogram(apiRequestsTTFBSecondsDistribution, httpRequestsDuration, labelsFilter, renameLabels, nil, + "type", "s3") + return nil +} + +// loadAPIRequestsNetworkMetrics - loads S3 network metrics. +// +// This is a `MetricsLoaderFn`. +func loadAPIRequestsNetworkMetrics(ctx context.Context, m MetricValues, _ *metricsCache) error { + connStats := globalConnStats.toServerConnStats() + m.Set(apiTrafficSentBytes, float64(connStats.s3OutputBytes), "type", "s3") + m.Set(apiTrafficRecvBytes, float64(connStats.s3InputBytes), "type", "s3") + return nil +} + +// Metric Descriptions for bucket level S3 metrics. +var ( + bucketAPITrafficSentBytesMD = NewCounterMD(apiTrafficSentBytes, + "Total number of bytes received for a bucket", "bucket", "type") + bucketAPITrafficRecvBytesMD = NewCounterMD(apiTrafficRecvBytes, + "Total number of bytes sent for a bucket", "bucket", "type") + + bucketAPIRequestsInFlightMD = NewGaugeMD(apiRequestsInFlightTotal, + "Total number of requests currently in flight for a bucket", "bucket", "name", "type") + bucketAPIRequestsTotalMD = NewCounterMD(apiRequestsTotal, + "Total number of requests for a bucket", "bucket", "name", "type") + bucketAPIRequestsCanceledMD = NewCounterMD(apiRequestsCanceledTotal, + "Total number of requests canceled by the client for a bucket", "bucket", "name", "type") + bucketAPIRequests4xxErrorsMD = NewCounterMD(apiRequests4xxErrorsTotal, + "Total number of requests with 4xx errors for a bucket", "bucket", "name", "type") + bucketAPIRequests5xxErrorsMD = NewCounterMD(apiRequests5xxErrorsTotal, + "Total number of requests with 5xx errors for a bucket", "bucket", "name", "type") + + bucketAPIRequestsTTFBSecondsDistributionMD = NewCounterMD(apiRequestsTTFBSecondsDistribution, + "Distribution of time to first byte across API calls for a bucket", + "bucket", "name", "le", "type") +) + +// loadBucketAPIHTTPMetrics - loads bucket level S3 HTTP metrics. +// +// This is a `MetricsLoaderFn`. +// +// This includes bucket level S3 HTTP metrics and S3 network in/out metrics. +func loadBucketAPIHTTPMetrics(ctx context.Context, m MetricValues, _ *metricsCache, buckets []string) error { + if len(buckets) == 0 { + return nil + } + for bucket, inOut := range globalBucketConnStats.getBucketS3InOutBytes(buckets) { + recvBytes := inOut.In + if recvBytes > 0 { + m.Set(apiTrafficSentBytes, float64(recvBytes), "bucket", bucket, "type", "s3") + } + sentBytes := inOut.Out + if sentBytes > 0 { + m.Set(apiTrafficRecvBytes, float64(sentBytes), "bucket", bucket, "type", "s3") + } + + httpStats := globalBucketHTTPStats.load(bucket) + for k, v := range httpStats.currentS3Requests.Load(false) { + m.Set(apiRequestsInFlightTotal, float64(v), "bucket", bucket, "name", k, "type", "s3") + } + + for k, v := range httpStats.totalS3Requests.Load(false) { + m.Set(apiRequestsTotal, float64(v), "bucket", bucket, "name", k, "type", "s3") + } + + for k, v := range httpStats.totalS3Canceled.Load(false) { + m.Set(apiRequestsCanceledTotal, float64(v), "bucket", bucket, "name", k, "type", "s3") + } + + for k, v := range httpStats.totalS34xxErrors.Load(false) { + m.Set(apiRequests4xxErrorsTotal, float64(v), "bucket", bucket, "name", k, "type", "s3") + } + + for k, v := range httpStats.totalS35xxErrors.Load(false) { + m.Set(apiRequests5xxErrorsTotal, float64(v), "bucket", bucket, "name", k, "type", "s3") + } + } + + return nil +} + +// loadBucketAPITTFBMetrics - loads bucket S3 TTFB metrics. +// +// This is a `MetricsLoaderFn`. +func loadBucketAPITTFBMetrics(ctx context.Context, m MetricValues, _ *metricsCache, buckets []string) error { + renameLabels := map[string]string{"api": "name"} + labelsFilter := map[string]set.StringSet{} + m.SetHistogram(apiRequestsTTFBSecondsDistribution, bucketHTTPRequestsDuration, labelsFilter, renameLabels, + buckets, "type", "s3") + return nil +} diff --git a/cmd/metrics-v3-audit.go b/cmd/metrics-v3-audit.go new file mode 100644 index 0000000000000..a9f89796b8d32 --- /dev/null +++ b/cmd/metrics-v3-audit.go @@ -0,0 +1,57 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + + "github.com/minio/minio/internal/logger" +) + +const ( + auditFailedMessages = "failed_messages" + auditTargetQueueLength = "target_queue_length" + auditTotalMessages = "total_messages" + targetID = "target_id" +) + +var ( + auditFailedMessagesMD = NewCounterMD(auditFailedMessages, + "Total number of messages that failed to send since start", + targetID) + auditTargetQueueLengthMD = NewGaugeMD(auditTargetQueueLength, + "Number of unsent messages in queue for target", + targetID) + auditTotalMessagesMD = NewCounterMD(auditTotalMessages, + "Total number of messages sent since start", + targetID) +) + +// loadAuditMetrics - `MetricsLoaderFn` for audit +// such as failed messages and total messages. +func loadAuditMetrics(_ context.Context, m MetricValues, c *metricsCache) error { + audit := logger.CurrentStats() + for id, st := range audit { + labels := []string{targetID, id} + m.Set(auditFailedMessages, float64(st.FailedMessages), labels...) + m.Set(auditTargetQueueLength, float64(st.QueueLength), labels...) + m.Set(auditTotalMessages, float64(st.TotalMessages), labels...) + } + + return nil +} diff --git a/cmd/metrics-v3-bucket-replication.go b/cmd/metrics-v3-bucket-replication.go new file mode 100644 index 0000000000000..5f16d32e69bb8 --- /dev/null +++ b/cmd/metrics-v3-bucket-replication.go @@ -0,0 +1,155 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" +) + +const ( + bucketReplLastHrFailedBytes = "last_hour_failed_bytes" + bucketReplLastHrFailedCount = "last_hour_failed_count" + bucketReplLastMinFailedBytes = "last_minute_failed_bytes" + bucketReplLastMinFailedCount = "last_minute_failed_count" + bucketReplLatencyMs = "latency_ms" + bucketReplProxiedDeleteTaggingRequestsTotal = "proxied_delete_tagging_requests_total" + bucketReplProxiedGetRequestsFailures = "proxied_get_requests_failures" + bucketReplProxiedGetRequestsTotal = "proxied_get_requests_total" + bucketReplProxiedGetTaggingRequestsFailures = "proxied_get_tagging_requests_failures" + bucketReplProxiedGetTaggingRequestsTotal = "proxied_get_tagging_requests_total" + bucketReplProxiedHeadRequestsFailures = "proxied_head_requests_failures" + bucketReplProxiedHeadRequestsTotal = "proxied_head_requests_total" + bucketReplProxiedPutTaggingRequestsFailures = "proxied_put_tagging_requests_failures" + bucketReplProxiedPutTaggingRequestsTotal = "proxied_put_tagging_requests_total" + bucketReplSentBytes = "sent_bytes" + bucketReplSentCount = "sent_count" + bucketReplTotalFailedBytes = "total_failed_bytes" + bucketReplTotalFailedCount = "total_failed_count" + bucketReplProxiedDeleteTaggingRequestsFailures = "proxied_delete_tagging_requests_failures" + bucketL = "bucket" + operationL = "operation" + targetArnL = "targetArn" +) + +var ( + bucketReplLastHrFailedBytesMD = NewGaugeMD(bucketReplLastHrFailedBytes, + "Total number of bytes failed at least once to replicate in the last hour on a bucket", + bucketL, targetArnL) + bucketReplLastHrFailedCountMD = NewGaugeMD(bucketReplLastHrFailedCount, + "Total number of objects which failed replication in the last hour on a bucket", + bucketL, targetArnL) + bucketReplLastMinFailedBytesMD = NewGaugeMD(bucketReplLastMinFailedBytes, + "Total number of bytes failed at least once to replicate in the last full minute on a bucket", + bucketL, targetArnL) + bucketReplLastMinFailedCountMD = NewGaugeMD(bucketReplLastMinFailedCount, + "Total number of objects which failed replication in the last full minute on a bucket", + bucketL, targetArnL) + bucketReplLatencyMsMD = NewGaugeMD(bucketReplLatencyMs, + "Replication latency on a bucket in milliseconds", + bucketL, operationL, rangeL, targetArnL) + bucketReplProxiedDeleteTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsTotal, + "Number of DELETE tagging requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedGetRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetRequestsFailures, + "Number of failures in GET requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedGetRequestsTotalMD = NewCounterMD(bucketReplProxiedGetRequestsTotal, + "Number of GET requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedGetTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsFailures, + "Number of failures in GET tagging requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedGetTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedGetTaggingRequestsTotal, + "Number of GET tagging requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedHeadRequestsFailuresMD = NewCounterMD(bucketReplProxiedHeadRequestsFailures, + "Number of failures in HEAD requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedHeadRequestsTotalMD = NewCounterMD(bucketReplProxiedHeadRequestsTotal, + "Number of HEAD requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedPutTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsFailures, + "Number of failures in PUT tagging requests proxied to replication target", + bucketL, targetArnL) + bucketReplProxiedPutTaggingRequestsTotalMD = NewCounterMD(bucketReplProxiedPutTaggingRequestsTotal, + "Number of PUT tagging requests proxied to replication target", + bucketL, targetArnL) + bucketReplSentBytesMD = NewCounterMD(bucketReplSentBytes, + "Total number of bytes replicated to the target", + bucketL, targetArnL) + bucketReplSentCountMD = NewCounterMD(bucketReplSentCount, + "Total number of objects replicated to the target", + bucketL, targetArnL) + bucketReplTotalFailedBytesMD = NewCounterMD(bucketReplTotalFailedBytes, + "Total number of bytes failed at least once to replicate since server start", + bucketL, targetArnL) + bucketReplTotalFailedCountMD = NewCounterMD(bucketReplTotalFailedCount, + "Total number of objects which failed replication since server start", + bucketL, targetArnL) + bucketReplProxiedDeleteTaggingRequestsFailuresMD = NewCounterMD(bucketReplProxiedDeleteTaggingRequestsFailures, + "Number of failures in DELETE tagging requests proxied to replication target", + bucketL, targetArnL) +) + +// loadBucketReplicationMetrics - `BucketMetricsLoaderFn` for bucket replication metrics +// such as latency and sent bytes. +func loadBucketReplicationMetrics(ctx context.Context, m MetricValues, c *metricsCache, buckets []string) error { + if globalSiteReplicationSys.isEnabled() { + return nil + } + + dataUsageInfo, err := c.dataUsageInfo.Get() + if err != nil { + metricsLogIf(ctx, err) + return nil + } + + bucketReplStats := globalReplicationStats.Load().getAllLatest(dataUsageInfo.BucketsUsage) + for _, bucket := range buckets { + if s, ok := bucketReplStats[bucket]; ok { + stats := s.ReplicationStats + if stats.hasReplicationUsage() { + for arn, stat := range stats.Stats { + labels := []string{bucketL, bucket, targetArnL, arn} + m.Set(bucketReplLastHrFailedBytes, float64(stat.Failed.LastHour.Bytes), labels...) + m.Set(bucketReplLastHrFailedCount, float64(stat.Failed.LastHour.Count), labels...) + m.Set(bucketReplLastMinFailedBytes, float64(stat.Failed.LastMinute.Bytes), labels...) + m.Set(bucketReplLastMinFailedCount, float64(stat.Failed.LastMinute.Count), labels...) + m.Set(bucketReplProxiedDeleteTaggingRequestsTotal, float64(s.ProxyStats.RmvTagTotal), labels...) + m.Set(bucketReplProxiedGetRequestsFailures, float64(s.ProxyStats.GetFailedTotal), labels...) + m.Set(bucketReplProxiedGetRequestsTotal, float64(s.ProxyStats.GetTotal), labels...) + m.Set(bucketReplProxiedGetTaggingRequestsFailures, float64(s.ProxyStats.GetTagFailedTotal), labels...) + m.Set(bucketReplProxiedGetTaggingRequestsTotal, float64(s.ProxyStats.GetTagTotal), labels...) + m.Set(bucketReplProxiedHeadRequestsFailures, float64(s.ProxyStats.HeadFailedTotal), labels...) + m.Set(bucketReplProxiedHeadRequestsTotal, float64(s.ProxyStats.HeadTotal), labels...) + m.Set(bucketReplProxiedPutTaggingRequestsFailures, float64(s.ProxyStats.PutTagFailedTotal), labels...) + m.Set(bucketReplProxiedPutTaggingRequestsTotal, float64(s.ProxyStats.PutTagTotal), labels...) + m.Set(bucketReplSentCount, float64(stat.ReplicatedCount), labels...) + m.Set(bucketReplTotalFailedBytes, float64(stat.Failed.Totals.Bytes), labels...) + m.Set(bucketReplTotalFailedCount, float64(stat.Failed.Totals.Count), labels...) + m.Set(bucketReplProxiedDeleteTaggingRequestsFailures, float64(s.ProxyStats.RmvTagFailedTotal), labels...) + m.Set(bucketReplSentBytes, float64(stat.ReplicatedSize), labels...) + + SetHistogramValues(m, bucketReplLatencyMs, stat.Latency.getUploadLatency(), bucketL, bucket, operationL, "upload", targetArnL, arn) + } + } + } + } + + return nil +} diff --git a/cmd/metrics-v3-cache.go b/cmd/metrics-v3-cache.go new file mode 100644 index 0000000000000..e94eeef7f0acd --- /dev/null +++ b/cmd/metrics-v3-cache.go @@ -0,0 +1,277 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "sync" + "time" + + "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/cachevalue" +) + +// metricsCache - cache for metrics. +// +// When serving metrics, this cache is passed to the MetricsLoaderFn. +// +// This cache is used for metrics that would result in network/storage calls. +type metricsCache struct { + dataUsageInfo *cachevalue.Cache[DataUsageInfo] + esetHealthResult *cachevalue.Cache[HealthResult] + driveMetrics *cachevalue.Cache[storageMetrics] + memoryMetrics *cachevalue.Cache[madmin.MemInfo] + cpuMetrics *cachevalue.Cache[madmin.CPUMetrics] + clusterDriveMetrics *cachevalue.Cache[storageMetrics] + nodesUpDown *cachevalue.Cache[nodesOnline] +} + +func newMetricsCache() *metricsCache { + return &metricsCache{ + dataUsageInfo: newDataUsageInfoCache(), + esetHealthResult: newESetHealthResultCache(), + driveMetrics: newDriveMetricsCache(), + memoryMetrics: newMemoryMetricsCache(), + cpuMetrics: newCPUMetricsCache(), + clusterDriveMetrics: newClusterStorageInfoCache(), + nodesUpDown: newNodesUpDownCache(), + } +} + +type nodesOnline struct { + Online, Offline int +} + +func newNodesUpDownCache() *cachevalue.Cache[nodesOnline] { + loadNodesUpDown := func(ctx context.Context) (v nodesOnline, err error) { + v.Online, v.Offline = globalNotificationSys.GetPeerOnlineCount() + return v, err + } + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadNodesUpDown) +} + +type driveIOStatMetrics struct { + readsPerSec float64 + readsKBPerSec float64 + readsAwait float64 + writesPerSec float64 + writesKBPerSec float64 + writesAwait float64 + percUtil float64 +} + +// storageMetrics - cached storage metrics. +type storageMetrics struct { + storageInfo madmin.StorageInfo + ioStats map[string]driveIOStatMetrics + onlineDrives, offlineDrives, totalDrives int +} + +func newDataUsageInfoCache() *cachevalue.Cache[DataUsageInfo] { + loadDataUsage := func(ctx context.Context) (u DataUsageInfo, err error) { + objLayer := newObjectLayerFn() + if objLayer == nil { + return u, err + } + + // Collect cluster level object metrics. + u, err = loadDataUsageFromBackend(GlobalContext, objLayer) + return u, err + } + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadDataUsage) +} + +func newESetHealthResultCache() *cachevalue.Cache[HealthResult] { + loadHealth := func(ctx context.Context) (r HealthResult, err error) { + objLayer := newObjectLayerFn() + if objLayer == nil { + return r, err + } + + r = objLayer.Health(GlobalContext, HealthOptions{}) + return r, err + } + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadHealth, + ) +} + +func getDiffStats(initialStats, currentStats madmin.DiskIOStats) madmin.DiskIOStats { + return madmin.DiskIOStats{ + ReadIOs: currentStats.ReadIOs - initialStats.ReadIOs, + WriteIOs: currentStats.WriteIOs - initialStats.WriteIOs, + ReadSectors: currentStats.ReadSectors - initialStats.ReadSectors, + WriteSectors: currentStats.WriteSectors - initialStats.WriteSectors, + ReadTicks: currentStats.ReadTicks - initialStats.ReadTicks, + WriteTicks: currentStats.WriteTicks - initialStats.WriteTicks, + TotalTicks: currentStats.TotalTicks - initialStats.TotalTicks, + } +} + +func getDriveIOStatMetrics(ioStats madmin.DiskIOStats, duration time.Duration) (m driveIOStatMetrics) { + durationSecs := duration.Seconds() + + m.readsPerSec = float64(ioStats.ReadIOs) / durationSecs + m.readsKBPerSec = float64(ioStats.ReadSectors) * float64(sectorSize) / kib / durationSecs + if ioStats.ReadIOs > 0 { + m.readsAwait = float64(ioStats.ReadTicks) / float64(ioStats.ReadIOs) + } + + m.writesPerSec = float64(ioStats.WriteIOs) / durationSecs + m.writesKBPerSec = float64(ioStats.WriteSectors) * float64(sectorSize) / kib / durationSecs + if ioStats.WriteIOs > 0 { + m.writesAwait = float64(ioStats.WriteTicks) / float64(ioStats.WriteIOs) + } + + // TotalTicks is in milliseconds + m.percUtil = float64(ioStats.TotalTicks) * 100 / (durationSecs * 1000) + + return m +} + +func newDriveMetricsCache() *cachevalue.Cache[storageMetrics] { + var ( + // prevDriveIOStats is used to calculate "per second" + // values for IOStat related disk metrics e.g. reads/sec. + prevDriveIOStats map[string]madmin.DiskIOStats + prevDriveIOStatsMu sync.RWMutex + prevDriveIOStatsRefreshedAt time.Time + ) + + loadDriveMetrics := func(ctx context.Context) (v storageMetrics, err error) { + objLayer := newObjectLayerFn() + if objLayer == nil { + return v, err + } + + storageInfo := objLayer.LocalStorageInfo(GlobalContext, true) + onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks) + totalDrives := onlineDrives.Merge(offlineDrives) + + v = storageMetrics{ + storageInfo: storageInfo, + onlineDrives: onlineDrives.Sum(), + offlineDrives: offlineDrives.Sum(), + totalDrives: totalDrives.Sum(), + ioStats: map[string]driveIOStatMetrics{}, + } + + currentStats := getCurrentDriveIOStats() + now := time.Now().UTC() + + prevDriveIOStatsMu.Lock() + if prevDriveIOStats != nil { + duration := now.Sub(prevDriveIOStatsRefreshedAt) + if duration.Seconds() > 1 { + for d, cs := range currentStats { + if ps, found := prevDriveIOStats[d]; found { + v.ioStats[d] = getDriveIOStatMetrics(getDiffStats(ps, cs), duration) + } + } + } + } + + prevDriveIOStats = currentStats + prevDriveIOStatsRefreshedAt = now + prevDriveIOStatsMu.Unlock() + + return v, err + } + + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadDriveMetrics) +} + +func newCPUMetricsCache() *cachevalue.Cache[madmin.CPUMetrics] { + loadCPUMetrics := func(ctx context.Context) (v madmin.CPUMetrics, err error) { + types := madmin.MetricsCPU + + m := collectLocalMetrics(types, collectMetricsOpts{ + hosts: map[string]struct{}{ + globalLocalNodeName: {}, + }, + }) + + for _, hm := range m.ByHost { + if hm.CPU != nil { + v = *hm.CPU + break + } + } + + return v, err + } + + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadCPUMetrics) +} + +func newMemoryMetricsCache() *cachevalue.Cache[madmin.MemInfo] { + loadMemoryMetrics := func(ctx context.Context) (v madmin.MemInfo, err error) { + types := madmin.MetricsMem + + m := collectLocalMetrics(types, collectMetricsOpts{ + hosts: map[string]struct{}{ + globalLocalNodeName: {}, + }, + }) + + for _, hm := range m.ByHost { + if hm.Mem != nil && len(hm.Mem.Info.Addr) > 0 { + v = hm.Mem.Info + break + } + } + + return v, err + } + + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadMemoryMetrics) +} + +func newClusterStorageInfoCache() *cachevalue.Cache[storageMetrics] { + loadStorageInfo := func(ctx context.Context) (v storageMetrics, err error) { + objLayer := newObjectLayerFn() + if objLayer == nil { + return storageMetrics{}, nil + } + storageInfo := objLayer.StorageInfo(GlobalContext, true) + onlineDrives, offlineDrives := getOnlineOfflineDisksStats(storageInfo.Disks) + totalDrives := onlineDrives.Merge(offlineDrives) + v = storageMetrics{ + storageInfo: storageInfo, + onlineDrives: onlineDrives.Sum(), + offlineDrives: offlineDrives.Sum(), + totalDrives: totalDrives.Sum(), + } + return v, err + } + return cachevalue.NewFromFunc(1*time.Minute, + cachevalue.Opts{ReturnLastGood: true}, + loadStorageInfo, + ) +} diff --git a/cmd/metrics-v3-cluster-config.go b/cmd/metrics-v3-cluster-config.go new file mode 100644 index 0000000000000..9d96c6bb1c699 --- /dev/null +++ b/cmd/metrics-v3-cluster-config.go @@ -0,0 +1,46 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import "context" + +const ( + configRRSParity = "rrs_parity" + configStandardParity = "standard_parity" +) + +var ( + configRRSParityMD = NewGaugeMD(configRRSParity, + "Reduced redundancy storage class parity") + configStandardParityMD = NewGaugeMD(configStandardParity, + "Standard storage class parity") +) + +// loadClusterConfigMetrics - `MetricsLoaderFn` for cluster config +// such as standard and RRS parity. +func loadClusterConfigMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + clusterDriveMetrics, err := c.clusterDriveMetrics.Get() + if err != nil { + metricsLogIf(ctx, err) + } else { + m.Set(configStandardParity, float64(clusterDriveMetrics.storageInfo.Backend.StandardSCParity)) + m.Set(configRRSParity, float64(clusterDriveMetrics.storageInfo.Backend.RRSCParity)) + } + + return nil +} diff --git a/cmd/metrics-v3-cluster-erasure-set.go b/cmd/metrics-v3-cluster-erasure-set.go new file mode 100644 index 0000000000000..04824c624cfe3 --- /dev/null +++ b/cmd/metrics-v3-cluster-erasure-set.go @@ -0,0 +1,117 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "strconv" +) + +const ( + erasureSetOverallWriteQuorum = "overall_write_quorum" + erasureSetOverallHealth = "overall_health" + erasureSetReadQuorum = "read_quorum" + erasureSetWriteQuorum = "write_quorum" + erasureSetOnlineDrivesCount = "online_drives_count" + erasureSetHealingDrivesCount = "healing_drives_count" + erasureSetHealth = "health" + erasureSetReadTolerance = "read_tolerance" + erasureSetWriteTolerance = "write_tolerance" + erasureSetReadHealth = "read_health" + erasureSetWriteHealth = "write_health" +) + +const ( + poolIDL = "pool_id" + setIDL = "set_id" +) + +var ( + erasureSetOverallWriteQuorumMD = NewGaugeMD(erasureSetOverallWriteQuorum, + "Overall write quorum across pools and sets") + erasureSetOverallHealthMD = NewGaugeMD(erasureSetOverallHealth, + "Overall health across pools and sets (1=healthy, 0=unhealthy)") + erasureSetReadQuorumMD = NewGaugeMD(erasureSetReadQuorum, + "Read quorum for the erasure set in a pool", poolIDL, setIDL) + erasureSetWriteQuorumMD = NewGaugeMD(erasureSetWriteQuorum, + "Write quorum for the erasure set in a pool", poolIDL, setIDL) + erasureSetOnlineDrivesCountMD = NewGaugeMD(erasureSetOnlineDrivesCount, + "Count of online drives in the erasure set in a pool", poolIDL, setIDL) + erasureSetHealingDrivesCountMD = NewGaugeMD(erasureSetHealingDrivesCount, + "Count of healing drives in the erasure set in a pool", poolIDL, setIDL) + erasureSetHealthMD = NewGaugeMD(erasureSetHealth, + "Health of the erasure set in a pool (1=healthy, 0=unhealthy)", + poolIDL, setIDL) + erasureSetReadToleranceMD = NewGaugeMD(erasureSetReadTolerance, + "No of drive failures that can be tolerated without disrupting read operations", + poolIDL, setIDL) + erasureSetWriteToleranceMD = NewGaugeMD(erasureSetWriteTolerance, + "No of drive failures that can be tolerated without disrupting write operations", + poolIDL, setIDL) + erasureSetReadHealthMD = NewGaugeMD(erasureSetReadHealth, + "Health of the erasure set in a pool for read operations (1=healthy, 0=unhealthy)", + poolIDL, setIDL) + erasureSetWriteHealthMD = NewGaugeMD(erasureSetWriteHealth, + "Health of the erasure set in a pool for write operations (1=healthy, 0=unhealthy)", + poolIDL, setIDL) +) + +func b2f(v bool) float64 { + if v { + return 1 + } + return 0 +} + +// loadClusterErasureSetMetrics - `MetricsLoaderFn` for cluster storage erasure +// set metrics. +func loadClusterErasureSetMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + result, _ := c.esetHealthResult.Get() + + m.Set(erasureSetOverallWriteQuorum, float64(result.WriteQuorum)) + m.Set(erasureSetOverallHealth, b2f(result.Healthy)) + + for _, h := range result.ESHealth { + poolLV := strconv.Itoa(h.PoolID) + setLV := strconv.Itoa(h.SetID) + labels := []string{poolIDL, poolLV, setIDL, setLV} + m.Set(erasureSetReadQuorum, float64(h.ReadQuorum), labels...) + m.Set(erasureSetWriteQuorum, float64(h.WriteQuorum), labels...) + m.Set(erasureSetOnlineDrivesCount, float64(h.HealthyDrives), labels...) + m.Set(erasureSetHealingDrivesCount, float64(h.HealingDrives), labels...) + m.Set(erasureSetHealth, b2f(h.Healthy), labels...) + + readHealthy := true + readTolerance := float64(h.HealthyDrives - h.ReadQuorum) + if readTolerance < 0 { + readHealthy = false + } + m.Set(erasureSetReadTolerance, readTolerance, labels...) + m.Set(erasureSetReadHealth, b2f(readHealthy), labels...) + + writeHealthy := true + writeTolerance := float64(h.HealthyDrives + h.HealingDrives - h.WriteQuorum) + if writeTolerance < 0 { + writeHealthy = false + } + m.Set(erasureSetWriteTolerance, writeTolerance, labels...) + m.Set(erasureSetWriteHealth, b2f(writeHealthy), labels...) + } + + return nil +} diff --git a/cmd/metrics-v3-cluster-health.go b/cmd/metrics-v3-cluster-health.go new file mode 100644 index 0000000000000..8636fc96ba646 --- /dev/null +++ b/cmd/metrics-v3-cluster-health.go @@ -0,0 +1,109 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import "context" + +const ( + healthDrivesOfflineCount = "drives_offline_count" + healthDrivesOnlineCount = "drives_online_count" + healthDrivesCount = "drives_count" +) + +var ( + healthDrivesOfflineCountMD = NewGaugeMD(healthDrivesOfflineCount, + "Count of offline drives in the cluster") + healthDrivesOnlineCountMD = NewGaugeMD(healthDrivesOnlineCount, + "Count of online drives in the cluster") + healthDrivesCountMD = NewGaugeMD(healthDrivesCount, + "Count of all drives in the cluster") +) + +// loadClusterHealthDriveMetrics - `MetricsLoaderFn` for cluster storage drive metrics +// such as online, offline and total drives. +func loadClusterHealthDriveMetrics(ctx context.Context, m MetricValues, + c *metricsCache, +) error { + clusterDriveMetrics, _ := c.clusterDriveMetrics.Get() + + m.Set(healthDrivesOfflineCount, float64(clusterDriveMetrics.offlineDrives)) + m.Set(healthDrivesOnlineCount, float64(clusterDriveMetrics.onlineDrives)) + m.Set(healthDrivesCount, float64(clusterDriveMetrics.totalDrives)) + + return nil +} + +const ( + healthNodesOfflineCount = "nodes_offline_count" + healthNodesOnlineCount = "nodes_online_count" +) + +var ( + healthNodesOfflineCountMD = NewGaugeMD(healthNodesOfflineCount, + "Count of offline nodes in the cluster") + healthNodesOnlineCountMD = NewGaugeMD(healthNodesOnlineCount, + "Count of online nodes in the cluster") +) + +// loadClusterHealthNodeMetrics - `MetricsLoaderFn` for cluster health node +// metrics. +func loadClusterHealthNodeMetrics(ctx context.Context, m MetricValues, + c *metricsCache, +) error { + nodesUpDown, _ := c.nodesUpDown.Get() + + m.Set(healthNodesOfflineCount, float64(nodesUpDown.Offline)) + m.Set(healthNodesOnlineCount, float64(nodesUpDown.Online)) + + return nil +} + +const ( + healthCapacityRawTotalBytes = "capacity_raw_total_bytes" + healthCapacityRawFreeBytes = "capacity_raw_free_bytes" + healthCapacityUsableTotalBytes = "capacity_usable_total_bytes" + healthCapacityUsableFreeBytes = "capacity_usable_free_bytes" +) + +var ( + healthCapacityRawTotalBytesMD = NewGaugeMD(healthCapacityRawTotalBytes, + "Total cluster raw storage capacity in bytes") + healthCapacityRawFreeBytesMD = NewGaugeMD(healthCapacityRawFreeBytes, + "Total cluster raw storage free in bytes") + healthCapacityUsableTotalBytesMD = NewGaugeMD(healthCapacityUsableTotalBytes, + "Total cluster usable storage capacity in bytes") + healthCapacityUsableFreeBytesMD = NewGaugeMD(healthCapacityUsableFreeBytes, + "Total cluster usable storage free in bytes") +) + +// loadClusterHealthCapacityMetrics - `MetricsLoaderFn` for cluster storage +// capacity metrics. +func loadClusterHealthCapacityMetrics(ctx context.Context, m MetricValues, + c *metricsCache, +) error { + clusterDriveMetrics, _ := c.clusterDriveMetrics.Get() + + storageInfo := clusterDriveMetrics.storageInfo + + m.Set(healthCapacityRawTotalBytes, float64(GetTotalCapacity(storageInfo.Disks))) + m.Set(healthCapacityRawFreeBytes, float64(GetTotalCapacityFree(storageInfo.Disks))) + m.Set(healthCapacityUsableTotalBytes, float64(GetTotalUsableCapacity(storageInfo.Disks, storageInfo))) + m.Set(healthCapacityUsableFreeBytes, float64(GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo))) + + return nil +} diff --git a/cmd/metrics-v3-cluster-iam.go b/cmd/metrics-v3-cluster-iam.go new file mode 100644 index 0000000000000..6689fe558a210 --- /dev/null +++ b/cmd/metrics-v3-cluster-iam.go @@ -0,0 +1,69 @@ +// Copyright (c) 2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "sync/atomic" + "time" +) + +const ( + lastSyncDurationMillis = "last_sync_duration_millis" + pluginAuthnServiceFailedRequestsMinute = "plugin_authn_service_failed_requests_minute" + pluginAuthnServiceLastFailSeconds = "plugin_authn_service_last_fail_seconds" + pluginAuthnServiceLastSuccSeconds = "plugin_authn_service_last_succ_seconds" + pluginAuthnServiceSuccAvgRttMsMinute = "plugin_authn_service_succ_avg_rtt_ms_minute" + pluginAuthnServiceSuccMaxRttMsMinute = "plugin_authn_service_succ_max_rtt_ms_minute" + pluginAuthnServiceTotalRequestsMinute = "plugin_authn_service_total_requests_minute" + sinceLastSyncMillis = "since_last_sync_millis" + syncFailures = "sync_failures" + syncSuccesses = "sync_successes" +) + +var ( + lastSyncDurationMillisMD = NewCounterMD(lastSyncDurationMillis, "Last successful IAM data sync duration in milliseconds") + pluginAuthnServiceFailedRequestsMinuteMD = NewCounterMD(pluginAuthnServiceFailedRequestsMinute, "When plugin authentication is configured, returns failed requests count in the last full minute") + pluginAuthnServiceLastFailSecondsMD = NewCounterMD(pluginAuthnServiceLastFailSeconds, "When plugin authentication is configured, returns time (in seconds) since the last failed request to the service") + pluginAuthnServiceLastSuccSecondsMD = NewCounterMD(pluginAuthnServiceLastSuccSeconds, "When plugin authentication is configured, returns time (in seconds) since the last successful request to the service") + pluginAuthnServiceSuccAvgRttMsMinuteMD = NewCounterMD(pluginAuthnServiceSuccAvgRttMsMinute, "When plugin authentication is configured, returns average round-trip-time of successful requests in the last full minute") + pluginAuthnServiceSuccMaxRttMsMinuteMD = NewCounterMD(pluginAuthnServiceSuccMaxRttMsMinute, "When plugin authentication is configured, returns maximum round-trip-time of successful requests in the last full minute") + pluginAuthnServiceTotalRequestsMinuteMD = NewCounterMD(pluginAuthnServiceTotalRequestsMinute, "When plugin authentication is configured, returns total requests count in the last full minute") + sinceLastSyncMillisMD = NewCounterMD(sinceLastSyncMillis, "Time (in milliseconds) since last successful IAM data sync.") + syncFailuresMD = NewCounterMD(syncFailures, "Number of failed IAM data syncs since server start.") + syncSuccessesMD = NewCounterMD(syncSuccesses, "Number of successful IAM data syncs since server start.") +) + +// loadClusterIAMMetrics - `MetricsLoaderFn` for cluster IAM metrics. +func loadClusterIAMMetrics(_ context.Context, m MetricValues, _ *metricsCache) error { + m.Set(lastSyncDurationMillis, float64(atomic.LoadUint64(&globalIAMSys.LastRefreshDurationMilliseconds))) + pluginAuthNMetrics := globalAuthNPlugin.Metrics() + m.Set(pluginAuthnServiceFailedRequestsMinute, float64(pluginAuthNMetrics.FailedRequests)) + m.Set(pluginAuthnServiceLastFailSeconds, pluginAuthNMetrics.LastUnreachableSecs) + m.Set(pluginAuthnServiceLastSuccSeconds, pluginAuthNMetrics.LastReachableSecs) + m.Set(pluginAuthnServiceSuccAvgRttMsMinute, pluginAuthNMetrics.AvgSuccRTTMs) + m.Set(pluginAuthnServiceSuccMaxRttMsMinute, pluginAuthNMetrics.MaxSuccRTTMs) + m.Set(pluginAuthnServiceTotalRequestsMinute, float64(pluginAuthNMetrics.TotalRequests)) + lastSyncTime := atomic.LoadUint64(&globalIAMSys.LastRefreshTimeUnixNano) + if lastSyncTime != 0 { + m.Set(sinceLastSyncMillis, float64((uint64(time.Now().UnixNano())-lastSyncTime)/uint64(time.Millisecond))) + } + m.Set(syncFailures, float64(atomic.LoadUint64(&globalIAMSys.TotalRefreshFailures))) + m.Set(syncSuccesses, float64(atomic.LoadUint64(&globalIAMSys.TotalRefreshSuccesses))) + return nil +} diff --git a/cmd/metrics-v3-cluster-notification.go b/cmd/metrics-v3-cluster-notification.go new file mode 100644 index 0000000000000..f4d76b6281059 --- /dev/null +++ b/cmd/metrics-v3-cluster-notification.go @@ -0,0 +1,51 @@ +// Copyright (c) 2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" +) + +const ( + notificationCurrentSendInProgress = "current_send_in_progress" + notificationEventsErrorsTotal = "events_errors_total" + notificationEventsSentTotal = "events_sent_total" + notificationEventsSkippedTotal = "events_skipped_total" +) + +var ( + notificationCurrentSendInProgressMD = NewCounterMD(notificationCurrentSendInProgress, "Number of concurrent async Send calls active to all targets") + notificationEventsErrorsTotalMD = NewCounterMD(notificationEventsErrorsTotal, "Events that were failed to be sent to the targets") + notificationEventsSentTotalMD = NewCounterMD(notificationEventsSentTotal, "Total number of events sent to the targets") + notificationEventsSkippedTotalMD = NewCounterMD(notificationEventsSkippedTotal, "Events that were skipped to be sent to the targets due to the in-memory queue being full") +) + +// loadClusterNotificationMetrics - `MetricsLoaderFn` for cluster notification metrics. +func loadClusterNotificationMetrics(_ context.Context, m MetricValues, _ *metricsCache) error { + if globalEventNotifier == nil { + return nil + } + + nstats := globalEventNotifier.targetList.Stats() + m.Set(notificationCurrentSendInProgress, float64(nstats.CurrentSendCalls)) + m.Set(notificationEventsErrorsTotal, float64(nstats.EventsErrorsTotal)) + m.Set(notificationEventsSentTotal, float64(nstats.TotalEvents)) + m.Set(notificationEventsSkippedTotal, float64(nstats.EventsSkipped)) + + return nil +} diff --git a/cmd/metrics-v3-cluster-usage.go b/cmd/metrics-v3-cluster-usage.go new file mode 100644 index 0000000000000..38dc0aef3033c --- /dev/null +++ b/cmd/metrics-v3-cluster-usage.go @@ -0,0 +1,182 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "time" +) + +const ( + usageSinceLastUpdateSeconds = "since_last_update_seconds" + usageTotalBytes = "total_bytes" + usageObjectsCount = "count" + usageVersionsCount = "versions_count" + usageDeleteMarkersCount = "delete_markers_count" + usageBucketsCount = "buckets_count" + usageSizeDistribution = "size_distribution" + usageVersionCountDistribution = "version_count_distribution" +) + +var ( + usageSinceLastUpdateSecondsMD = NewGaugeMD(usageSinceLastUpdateSeconds, + "Time since last update of usage metrics in seconds") + usageTotalBytesMD = NewGaugeMD(usageTotalBytes, + "Total cluster usage in bytes") + usageObjectsCountMD = NewGaugeMD(usageObjectsCount, + "Total cluster objects count") + usageVersionsCountMD = NewGaugeMD(usageVersionsCount, + "Total cluster object versions (including delete markers) count") + usageDeleteMarkersCountMD = NewGaugeMD(usageDeleteMarkersCount, + "Total cluster delete markers count") + usageBucketsCountMD = NewGaugeMD(usageBucketsCount, + "Total cluster buckets count") + usageObjectsDistributionMD = NewGaugeMD(usageSizeDistribution, + "Cluster object size distribution", "range") + usageVersionsDistributionMD = NewGaugeMD(usageVersionCountDistribution, + "Cluster object version count distribution", "range") +) + +// loadClusterUsageObjectMetrics - reads cluster usage metrics. +// +// This is a `MetricsLoaderFn`. +func loadClusterUsageObjectMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + dataUsageInfo, err := c.dataUsageInfo.Get() + if err != nil { + metricsLogIf(ctx, err) + return nil + } + + // data usage has not captured any data yet. + if dataUsageInfo.LastUpdate.IsZero() { + return nil + } + + var ( + clusterSize uint64 + clusterBuckets uint64 + clusterObjectsCount uint64 + clusterVersionsCount uint64 + clusterDeleteMarkersCount uint64 + ) + + clusterObjectSizesHistogram := map[string]uint64{} + clusterVersionsHistogram := map[string]uint64{} + for _, usage := range dataUsageInfo.BucketsUsage { + clusterBuckets++ + clusterSize += usage.Size + clusterObjectsCount += usage.ObjectsCount + clusterVersionsCount += usage.VersionsCount + clusterDeleteMarkersCount += usage.DeleteMarkersCount + for k, v := range usage.ObjectSizesHistogram { + clusterObjectSizesHistogram[k] += v + } + for k, v := range usage.ObjectVersionsHistogram { + clusterVersionsHistogram[k] += v + } + } + + m.Set(usageSinceLastUpdateSeconds, time.Since(dataUsageInfo.LastUpdate).Seconds()) + m.Set(usageTotalBytes, float64(clusterSize)) + m.Set(usageObjectsCount, float64(clusterObjectsCount)) + m.Set(usageVersionsCount, float64(clusterVersionsCount)) + m.Set(usageDeleteMarkersCount, float64(clusterDeleteMarkersCount)) + m.Set(usageBucketsCount, float64(clusterBuckets)) + for k, v := range clusterObjectSizesHistogram { + m.Set(usageSizeDistribution, float64(v), "range", k) + } + for k, v := range clusterVersionsHistogram { + m.Set(usageVersionCountDistribution, float64(v), "range", k) + } + + return nil +} + +const ( + usageBucketQuotaTotalBytes = "quota_total_bytes" + + usageBucketTotalBytes = "total_bytes" + usageBucketObjectsCount = "objects_count" + usageBucketVersionsCount = "versions_count" + usageBucketDeleteMarkersCount = "delete_markers_count" + usageBucketObjectSizeDistribution = "object_size_distribution" + usageBucketObjectVersionCountDistribution = "object_version_count_distribution" +) + +var ( + usageBucketTotalBytesMD = NewGaugeMD(usageBucketTotalBytes, + "Total bucket size in bytes", "bucket") + usageBucketObjectsTotalMD = NewGaugeMD(usageBucketObjectsCount, + "Total objects count in bucket", "bucket") + usageBucketVersionsCountMD = NewGaugeMD(usageBucketVersionsCount, + "Total object versions (including delete markers) count in bucket", "bucket") + usageBucketDeleteMarkersCountMD = NewGaugeMD(usageBucketDeleteMarkersCount, + "Total delete markers count in bucket", "bucket") + + usageBucketQuotaTotalBytesMD = NewGaugeMD(usageBucketQuotaTotalBytes, + "Total bucket quota in bytes", "bucket") + + usageBucketObjectSizeDistributionMD = NewGaugeMD(usageBucketObjectSizeDistribution, + "Bucket object size distribution", "range", "bucket") + usageBucketObjectVersionCountDistributionMD = NewGaugeMD( + usageBucketObjectVersionCountDistribution, + "Bucket object version count distribution", "range", "bucket") +) + +// loadClusterUsageBucketMetrics - `MetricsLoaderFn` to load bucket usage metrics. +func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + dataUsageInfo, err := c.dataUsageInfo.Get() + if err != nil { + metricsLogIf(ctx, err) + return nil + } + + // data usage has not been captured yet. + if dataUsageInfo.LastUpdate.IsZero() { + return nil + } + + m.Set(usageSinceLastUpdateSeconds, float64(time.Since(dataUsageInfo.LastUpdate))) + + for bucket, usage := range dataUsageInfo.BucketsUsage { + quota, err := globalBucketQuotaSys.Get(ctx, bucket) + if err != nil { + // Log and continue if we are unable to retrieve metrics for this + // bucket. + metricsLogIf(ctx, err) + continue + } + + m.Set(usageBucketTotalBytes, float64(usage.Size), "bucket", bucket) + m.Set(usageBucketObjectsCount, float64(usage.ObjectsCount), "bucket", bucket) + m.Set(usageBucketVersionsCount, float64(usage.VersionsCount), "bucket", bucket) + m.Set(usageBucketDeleteMarkersCount, float64(usage.DeleteMarkersCount), "bucket", bucket) + + if quota != nil && quota.Quota > 0 { + m.Set(usageBucketQuotaTotalBytes, float64(quota.Quota), "bucket", bucket) + } + + for k, v := range usage.ObjectSizesHistogram { + m.Set(usageBucketObjectSizeDistribution, float64(v), "range", k, "bucket", bucket) + } + for k, v := range usage.ObjectVersionsHistogram { + m.Set(usageBucketObjectVersionCountDistribution, float64(v), "range", k, "bucket", bucket) + } + } + return nil +} diff --git a/cmd/metrics-v3-handler.go b/cmd/metrics-v3-handler.go new file mode 100644 index 0000000000000..7f07f58ef837e --- /dev/null +++ b/cmd/metrics-v3-handler.go @@ -0,0 +1,251 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "encoding/json" + "fmt" + "net/http" + "slices" + "strings" + "sync" + + "github.com/minio/minio/internal/config" + "github.com/minio/minio/internal/mcontext" + "github.com/minio/mux" + "github.com/minio/pkg/v3/env" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type promLogger struct{} + +func (p promLogger) Println(v ...any) { + metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v)) +} + +type metricsV3Server struct { + registry *prometheus.Registry + opts promhttp.HandlerOpts + auth func(http.Handler) http.Handler + + metricsData *metricsV3Collection +} + +var ( + globalMetricsV3CollectorPaths []collectorPath + globalMetricsV3Once sync.Once +) + +func newMetricsV3Server(auth func(h http.Handler) http.Handler) *metricsV3Server { + registry := prometheus.NewRegistry() + metricGroups := newMetricGroups(registry) + globalMetricsV3Once.Do(func() { + globalMetricsV3CollectorPaths = metricGroups.collectorPaths + }) + return &metricsV3Server{ + registry: registry, + opts: promhttp.HandlerOpts{ + ErrorLog: promLogger{}, + ErrorHandling: promhttp.ContinueOnError, + Registry: registry, + MaxRequestsInFlight: 2, + EnableOpenMetrics: env.Get(EnvPrometheusOpenMetrics, config.EnableOff) == config.EnableOn, + ProcessStartTime: globalBootTime, + }, + auth: auth, + metricsData: metricGroups, + } +} + +// metricDisplay - contains info on a metric for display purposes. +type metricDisplay struct { + Name string `json:"name"` + Help string `json:"help"` + Type string `json:"type"` + Labels []string `json:"labels"` +} + +func (md metricDisplay) String() string { + return fmt.Sprintf("Name: %s\nType: %s\nHelp: %s\nLabels: {%s}\n", md.Name, md.Type, md.Help, strings.Join(md.Labels, ",")) +} + +func (md metricDisplay) TableRow() string { + labels := strings.Join(md.Labels, ",") + if labels == "" { + labels = "" + } else { + labels = "`" + labels + "`" + } + return fmt.Sprintf("| `%s` | `%s` | %s | %s |\n", md.Name, md.Type, md.Help, labels) +} + +// listMetrics - returns a handler that lists all the metrics that could be +// returned for the requested path. +// +// FIXME: It currently only lists `minio_` prefixed metrics. +func (h *metricsV3Server) listMetrics(path string) http.Handler { + // First collect all matching MetricsGroup's + matchingMG := make(map[collectorPath]*MetricsGroup) + for _, collPath := range h.metricsData.collectorPaths { + if collPath.isDescendantOf(path) { + if v, ok := h.metricsData.mgMap[collPath]; ok { + matchingMG[collPath] = v + } else if v, ok := h.metricsData.bucketMGMap[collPath]; ok { + matchingMG[collPath] = v + } + } + } + + if len(matchingMG) == 0 { + return nil + } + + var metrics []metricDisplay + for _, collectorPath := range h.metricsData.collectorPaths { + if mg, ok := matchingMG[collectorPath]; ok { + var commonLabels []string + for k := range mg.ExtraLabels { + commonLabels = append(commonLabels, k) + } + for _, d := range mg.Descriptors { + labels := slices.Clone(d.VariableLabels) + labels = append(labels, commonLabels...) + metric := metricDisplay{ + Name: mg.MetricFQN(d.Name), + Help: d.Help, + Type: d.Type.String(), + Labels: labels, + } + metrics = append(metrics, metric) + } + } + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + contentType := r.Header.Get("Content-Type") + if contentType == "application/json" { + w.Header().Set("Content-Type", "application/json") + jsonEncoder := json.NewEncoder(w) + jsonEncoder.Encode(metrics) + return + } + + // If not JSON, return plain text. We format it as a markdown table for + // readability. + w.Header().Set("Content-Type", "text/plain") + var b strings.Builder + b.WriteString("| Name | Type | Help | Labels |\n") + b.WriteString("| ---- | ---- | ---- | ------ |\n") + for _, metric := range metrics { + b.WriteString(metric.TableRow()) + } + w.Write([]byte(b.String())) + }) +} + +func (h *metricsV3Server) handle(path string, isListingRequest bool, buckets []string) http.Handler { + var notFoundHandler http.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Metrics Resource Not found", http.StatusNotFound) + }) + + // Require that metrics path has one component at least. + if path == "/" { + return notFoundHandler + } + + if isListingRequest { + handler := h.listMetrics(path) + if handler == nil { + return notFoundHandler + } + return handler + } + + // In each of the following cases, we check if the collect path is a + // descendant of `path`, and if so, we add the corresponding gatherer to + // the list of gatherers. This way, /api/a will return all metrics returned + // by /api/a/b and /api/a/c (and any other matching descendant collector + // paths). + + var gatherers []prometheus.Gatherer + for _, collectorPath := range h.metricsData.collectorPaths { + if collectorPath.isDescendantOf(path) { + gatherer := h.metricsData.mgGatherers[collectorPath] + + // For Bucket metrics we need to set the buckets argument inside the + // metric group, so that it will affect collection. If no buckets + // are provided, we will not return bucket metrics. + if bmg, ok := h.metricsData.bucketMGMap[collectorPath]; ok { + if len(buckets) == 0 { + continue + } + unLocker := bmg.LockAndSetBuckets(buckets) + defer unLocker() + } + gatherers = append(gatherers, gatherer) + } + } + + if len(gatherers) == 0 { + return notFoundHandler + } + + return promhttp.HandlerFor(prometheus.Gatherers(gatherers), h.opts) +} + +// ServeHTTP - implements http.Handler interface. +// +// When the `list` query parameter is provided (its value is ignored), the +// server lists all metrics that could be returned for the requested path. +// +// The (repeatable) `buckets` query parameter is a list of bucket names (or it +// could be a comma separated value) to return metrics with a bucket label. +// Bucket metrics will be returned only for the provided buckets. If no buckets +// parameter is provided, no bucket metrics are returned. +func (h *metricsV3Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + pathComponents := mux.Vars(r)["pathComps"] + isListingRequest := r.Form.Has("list") + + var buckets []string + if strings.HasPrefix(pathComponents, "/bucket/") { + // bucket specific metrics, extract the bucket name from the path. + // it's the last part of the path. e.g. /bucket/api/ + bucketIdx := strings.LastIndex(pathComponents, "/") + buckets = append(buckets, pathComponents[bucketIdx+1:]) + // remove bucket from pathComponents as it is dynamic and + // hence not included in the collector path. + pathComponents = pathComponents[:bucketIdx] + } + + innerHandler := h.handle(pathComponents, isListingRequest, buckets) + + // Add tracing to the prom. handler + tracedHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + tc, ok := r.Context().Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt) + if ok { + tc.FuncName = "handler.MetricsV3" + tc.ResponseRecorder.LogErrBody = true + } + + innerHandler.ServeHTTP(w, r) + }) + + // Add authentication + h.auth(tracedHandler).ServeHTTP(w, r) +} diff --git a/cmd/metrics-v3-ilm.go b/cmd/metrics-v3-ilm.go new file mode 100644 index 0000000000000..604beaae78cb2 --- /dev/null +++ b/cmd/metrics-v3-ilm.go @@ -0,0 +1,53 @@ +// Copyright (c) 2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" +) + +const ( + expiryPendingTasks = "expiry_pending_tasks" + transitionActiveTasks = "transition_active_tasks" + transitionPendingTasks = "transition_pending_tasks" + transitionMissedImmediateTasks = "transition_missed_immediate_tasks" + versionsScanned = "versions_scanned" +) + +var ( + ilmExpiryPendingTasksMD = NewGaugeMD(expiryPendingTasks, "Number of pending ILM expiry tasks in the queue") + ilmTransitionActiveTasksMD = NewGaugeMD(transitionActiveTasks, "Number of active ILM transition tasks") + ilmTransitionPendingTasksMD = NewGaugeMD(transitionPendingTasks, "Number of pending ILM transition tasks in the queue") + ilmTransitionMissedImmediateTasksMD = NewCounterMD(transitionMissedImmediateTasks, "Number of missed immediate ILM transition tasks") + ilmVersionsScannedMD = NewCounterMD(versionsScanned, "Total number of object versions checked for ILM actions since server start") +) + +// loadILMMetrics - `MetricsLoaderFn` for ILM metrics. +func loadILMMetrics(_ context.Context, m MetricValues, _ *metricsCache) error { + if globalExpiryState != nil { + m.Set(expiryPendingTasks, float64(globalExpiryState.PendingTasks())) + } + if globalTransitionState != nil { + m.Set(transitionActiveTasks, float64(globalTransitionState.ActiveTasks())) + m.Set(transitionPendingTasks, float64(globalTransitionState.PendingTasks())) + m.Set(transitionMissedImmediateTasks, float64(globalTransitionState.MissedImmediateTasks())) + } + m.Set(versionsScanned, float64(globalScannerMetrics.lifetime(scannerMetricILM))) + + return nil +} diff --git a/cmd/metrics-v3-logger-webhook.go b/cmd/metrics-v3-logger-webhook.go new file mode 100644 index 0000000000000..aa85de94b639f --- /dev/null +++ b/cmd/metrics-v3-logger-webhook.go @@ -0,0 +1,59 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + + "github.com/minio/minio/internal/logger" +) + +const ( + webhookQueueLength = "queue_length" + webhookTotalMessages = "total_messages" + webhookFailedMessages = "failed_messages" + nameL = "name" + endpointL = "endpoint" +) + +var ( + allWebhookLabels = []string{nameL, endpointL} + webhookFailedMessagesMD = NewCounterMD(webhookFailedMessages, + "Number of messages that failed to send", + allWebhookLabels...) + webhookQueueLengthMD = NewGaugeMD(webhookQueueLength, + "Webhook queue length", + allWebhookLabels...) + webhookTotalMessagesMD = NewCounterMD(webhookTotalMessages, + "Total number of messages sent to this target", + allWebhookLabels...) +) + +// loadLoggerWebhookMetrics - `MetricsLoaderFn` for logger webhook +// such as failed messages and total messages. +func loadLoggerWebhookMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + tgts := append(logger.SystemTargets(), logger.AuditTargets()...) + for _, t := range tgts { + labels := []string{nameL, t.String(), endpointL, t.Endpoint()} + m.Set(webhookFailedMessages, float64(t.Stats().FailedMessages), labels...) + m.Set(webhookQueueLength, float64(t.Stats().QueueLength), labels...) + m.Set(webhookTotalMessages, float64(t.Stats().TotalMessages), labels...) + } + + return nil +} diff --git a/cmd/metrics-v3-replication.go b/cmd/metrics-v3-replication.go new file mode 100644 index 0000000000000..44a8e87aea1f7 --- /dev/null +++ b/cmd/metrics-v3-replication.go @@ -0,0 +1,101 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" +) + +const ( + replicationAverageActiveWorkers = "average_active_workers" + replicationAverageQueuedBytes = "average_queued_bytes" + replicationAverageQueuedCount = "average_queued_count" + replicationAverageDataTransferRate = "average_data_transfer_rate" + replicationCurrentActiveWorkers = "current_active_workers" + replicationCurrentDataTransferRate = "current_data_transfer_rate" + replicationLastMinuteQueuedBytes = "last_minute_queued_bytes" + replicationLastMinuteQueuedCount = "last_minute_queued_count" + replicationMaxActiveWorkers = "max_active_workers" + replicationMaxQueuedBytes = "max_queued_bytes" + replicationMaxQueuedCount = "max_queued_count" + replicationMaxDataTransferRate = "max_data_transfer_rate" + replicationRecentBacklogCount = "recent_backlog_count" +) + +var ( + replicationAverageActiveWorkersMD = NewGaugeMD(replicationAverageActiveWorkers, + "Average number of active replication workers") + replicationAverageQueuedBytesMD = NewGaugeMD(replicationAverageQueuedBytes, + "Average number of bytes queued for replication since server start") + replicationAverageQueuedCountMD = NewGaugeMD(replicationAverageQueuedCount, + "Average number of objects queued for replication since server start") + replicationAverageDataTransferRateMD = NewGaugeMD(replicationAverageDataTransferRate, + "Average replication data transfer rate in bytes/sec") + replicationCurrentActiveWorkersMD = NewGaugeMD(replicationCurrentActiveWorkers, + "Total number of active replication workers") + replicationCurrentDataTransferRateMD = NewGaugeMD(replicationCurrentDataTransferRate, + "Current replication data transfer rate in bytes/sec") + replicationLastMinuteQueuedBytesMD = NewGaugeMD(replicationLastMinuteQueuedBytes, + "Number of bytes queued for replication in the last full minute") + replicationLastMinuteQueuedCountMD = NewGaugeMD(replicationLastMinuteQueuedCount, + "Number of objects queued for replication in the last full minute") + replicationMaxActiveWorkersMD = NewGaugeMD(replicationMaxActiveWorkers, + "Maximum number of active replication workers seen since server start") + replicationMaxQueuedBytesMD = NewGaugeMD(replicationMaxQueuedBytes, + "Maximum number of bytes queued for replication since server start") + replicationMaxQueuedCountMD = NewGaugeMD(replicationMaxQueuedCount, + "Maximum number of objects queued for replication since server start") + replicationMaxDataTransferRateMD = NewGaugeMD(replicationMaxDataTransferRate, + "Maximum replication data transfer rate in bytes/sec seen since server start") + replicationRecentBacklogCountMD = NewGaugeMD(replicationRecentBacklogCount, + "Total number of objects seen in replication backlog in the last 5 minutes") +) + +// loadClusterReplicationMetrics - `MetricsLoaderFn` for cluster replication metrics +// such as transfer rate and objects queued. +func loadClusterReplicationMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + st := globalReplicationStats.Load() + if st == nil { + return nil + } + + qs := st.getNodeQueueStatsSummary() + + qt := qs.QStats + m.Set(replicationAverageQueuedBytes, float64(qt.Avg.Bytes)) + m.Set(replicationAverageQueuedCount, float64(qt.Avg.Count)) + m.Set(replicationMaxQueuedBytes, float64(qt.Max.Bytes)) + m.Set(replicationMaxQueuedCount, float64(qt.Max.Count)) + m.Set(replicationLastMinuteQueuedBytes, float64(qt.Curr.Bytes)) + m.Set(replicationLastMinuteQueuedCount, float64(qt.Curr.Count)) + + qa := qs.ActiveWorkers + m.Set(replicationAverageActiveWorkers, float64(qa.Avg)) + m.Set(replicationCurrentActiveWorkers, float64(qa.Curr)) + m.Set(replicationMaxActiveWorkers, float64(qa.Max)) + + if len(qs.XferStats) > 0 { + tots := qs.XferStats[Total] + m.Set(replicationAverageDataTransferRate, tots.Avg) + m.Set(replicationCurrentDataTransferRate, tots.Curr) + m.Set(replicationMaxDataTransferRate, tots.Peak) + } + m.Set(replicationRecentBacklogCount, float64(qs.MRFStats.LastFailedCount)) + + return nil +} diff --git a/cmd/metrics-v3-scanner.go b/cmd/metrics-v3-scanner.go new file mode 100644 index 0000000000000..8f661e23afbba --- /dev/null +++ b/cmd/metrics-v3-scanner.go @@ -0,0 +1,66 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "time" +) + +const ( + scannerBucketScansFinished = "bucket_scans_finished" + scannerBucketScansStarted = "bucket_scans_started" + scannerDirectoriesScanned = "directories_scanned" + scannerObjectsScanned = "objects_scanned" + scannerVersionsScanned = "versions_scanned" + scannerLastActivitySeconds = "last_activity_seconds" +) + +var ( + scannerBucketScansFinishedMD = NewCounterMD(scannerBucketScansFinished, + "Total number of bucket scans finished since server start") + scannerBucketScansStartedMD = NewCounterMD(scannerBucketScansStarted, + "Total number of bucket scans started since server start") + scannerDirectoriesScannedMD = NewCounterMD(scannerDirectoriesScanned, + "Total number of directories scanned since server start") + scannerObjectsScannedMD = NewCounterMD(scannerObjectsScanned, + "Total number of unique objects scanned since server start") + scannerVersionsScannedMD = NewCounterMD(scannerVersionsScanned, + "Total number of object versions scanned since server start") + scannerLastActivitySecondsMD = NewGaugeMD(scannerLastActivitySeconds, + "Time elapsed (in seconds) since last scan activity.") +) + +// loadClusterScannerMetrics - `MetricsLoaderFn` for cluster webhook +// such as failed objects and directories scanned. +func loadClusterScannerMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + m.Set(scannerBucketScansFinished, float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive))) + m.Set(scannerBucketScansStarted, float64(globalScannerMetrics.lifetime(scannerMetricScanBucketDrive)+uint64(globalScannerMetrics.activeDrives()))) + m.Set(scannerDirectoriesScanned, float64(globalScannerMetrics.lifetime(scannerMetricScanFolder))) + m.Set(scannerObjectsScanned, float64(globalScannerMetrics.lifetime(scannerMetricScanObject))) + m.Set(scannerVersionsScanned, float64(globalScannerMetrics.lifetime(scannerMetricApplyVersion))) + + dui, err := c.dataUsageInfo.Get() + if err != nil { + metricsLogIf(ctx, err) + } else { + m.Set(scannerLastActivitySeconds, time.Since(dui.LastUpdate).Seconds()) + } + + return nil +} diff --git a/cmd/metrics-v3-system-cpu.go b/cmd/metrics-v3-system-cpu.go new file mode 100644 index 0000000000000..cb31b83582893 --- /dev/null +++ b/cmd/metrics-v3-system-cpu.go @@ -0,0 +1,84 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "math" +) + +const ( + sysCPUAvgIdle = "avg_idle" + sysCPUAvgIOWait = "avg_iowait" + sysCPULoad = "load" + sysCPULoadPerc = "load_perc" + sysCPUNice = "nice" + sysCPUSteal = "steal" + sysCPUSystem = "system" + sysCPUUser = "user" +) + +var ( + sysCPUAvgIdleMD = NewGaugeMD(sysCPUAvgIdle, "Average CPU idle time") + sysCPUAvgIOWaitMD = NewGaugeMD(sysCPUAvgIOWait, "Average CPU IOWait time") + sysCPULoadMD = NewGaugeMD(sysCPULoad, "CPU load average 1min") + sysCPULoadPercMD = NewGaugeMD(sysCPULoadPerc, "CPU load average 1min (percentage)") + sysCPUNiceMD = NewGaugeMD(sysCPUNice, "CPU nice time") + sysCPUStealMD = NewGaugeMD(sysCPUSteal, "CPU steal time") + sysCPUSystemMD = NewGaugeMD(sysCPUSystem, "CPU system time") + sysCPUUserMD = NewGaugeMD(sysCPUUser, "CPU user time") +) + +// loadCPUMetrics - `MetricsLoaderFn` for system CPU metrics. +func loadCPUMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + cpuMetrics, _ := c.cpuMetrics.Get() + + if cpuMetrics.LoadStat != nil { + m.Set(sysCPULoad, cpuMetrics.LoadStat.Load1) + perc := cpuMetrics.LoadStat.Load1 * 100 / float64(cpuMetrics.CPUCount) + m.Set(sysCPULoadPerc, math.Round(perc*100)/100) + } + + ts := cpuMetrics.TimesStat + if ts != nil { + tot := ts.User + ts.System + ts.Idle + ts.Iowait + ts.Nice + ts.Steal + cpuUserVal := math.Round(ts.User/tot*100*100) / 100 + m.Set(sysCPUUser, cpuUserVal) + cpuSystemVal := math.Round(ts.System/tot*100*100) / 100 + m.Set(sysCPUSystem, cpuSystemVal) + cpuNiceVal := math.Round(ts.Nice/tot*100*100) / 100 + m.Set(sysCPUNice, cpuNiceVal) + cpuStealVal := math.Round(ts.Steal/tot*100*100) / 100 + m.Set(sysCPUSteal, cpuStealVal) + } + + // metrics-resource.go runs a job to collect resource metrics including their Avg values and + // stores them in resourceMetricsMap. We can use it to get the Avg values of CPU idle and IOWait. + cpuResourceMetrics, found := resourceMetricsMap[cpuSubsystem] + if found { + if cpuIdleMetric, ok := cpuResourceMetrics[getResourceKey(cpuIdle, nil)]; ok { + avgVal := math.Round(cpuIdleMetric.Avg*100) / 100 + m.Set(sysCPUAvgIdle, avgVal) + } + if cpuIOWaitMetric, ok := cpuResourceMetrics[getResourceKey(cpuIOWait, nil)]; ok { + avgVal := math.Round(cpuIOWaitMetric.Avg*100) / 100 + m.Set(sysCPUAvgIOWait, avgVal) + } + } + return nil +} diff --git a/cmd/metrics-v3-system-drive.go b/cmd/metrics-v3-system-drive.go new file mode 100644 index 0000000000000..d25a623c41632 --- /dev/null +++ b/cmd/metrics-v3-system-drive.go @@ -0,0 +1,234 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "strconv" + + "github.com/minio/madmin-go/v3" +) + +// label constants +const ( + driveL = "drive" + poolIndexL = "pool_index" + setIndexL = "set_index" + driveIndexL = "drive_index" + + apiL = "api" + + sectorSize = uint64(512) + kib = float64(1 << 10) + + driveHealthOffline = float64(0) + driveHealthOnline = float64(1) + driveHealthHealing = float64(2) +) + +var allDriveLabels = []string{driveL, poolIndexL, setIndexL, driveIndexL} + +const ( + driveUsedBytes = "used_bytes" + driveFreeBytes = "free_bytes" + driveTotalBytes = "total_bytes" + driveUsedInodes = "used_inodes" + driveFreeInodes = "free_inodes" + driveTotalInodes = "total_inodes" + driveTimeoutErrorsTotal = "timeout_errors_total" + driveIOErrorsTotal = "io_errors_total" + driveAvailabilityErrorsTotal = "availability_errors_total" + driveWaitingIO = "waiting_io" + driveAPILatencyMicros = "api_latency_micros" + driveHealth = "health" + + driveOfflineCount = "offline_count" + driveOnlineCount = "online_count" + driveCount = "count" + + // iostat related + driveReadsPerSec = "reads_per_sec" + driveReadsKBPerSec = "reads_kb_per_sec" + driveReadsAwait = "reads_await" + driveWritesPerSec = "writes_per_sec" + driveWritesKBPerSec = "writes_kb_per_sec" + driveWritesAwait = "writes_await" + drivePercUtil = "perc_util" +) + +var ( + driveUsedBytesMD = NewGaugeMD(driveUsedBytes, + "Total storage used on a drive in bytes", allDriveLabels...) + driveFreeBytesMD = NewGaugeMD(driveFreeBytes, + "Total storage free on a drive in bytes", allDriveLabels...) + driveTotalBytesMD = NewGaugeMD(driveTotalBytes, + "Total storage available on a drive in bytes", allDriveLabels...) + driveUsedInodesMD = NewGaugeMD(driveUsedInodes, + "Total used inodes on a drive", allDriveLabels...) + driveFreeInodesMD = NewGaugeMD(driveFreeInodes, + "Total free inodes on a drive", allDriveLabels...) + driveTotalInodesMD = NewGaugeMD(driveTotalInodes, + "Total inodes available on a drive", allDriveLabels...) + driveTimeoutErrorsMD = NewCounterMD(driveTimeoutErrorsTotal, + "Total timeout errors on a drive", allDriveLabels...) + driveIOErrorsMD = NewCounterMD(driveIOErrorsTotal, + "Total I/O errors on a drive", allDriveLabels...) + driveAvailabilityErrorsMD = NewCounterMD(driveAvailabilityErrorsTotal, + "Total availability errors (I/O errors, timeouts) on a drive", + allDriveLabels...) + driveWaitingIOMD = NewGaugeMD(driveWaitingIO, + "Total waiting I/O operations on a drive", allDriveLabels...) + driveAPILatencyMD = NewGaugeMD(driveAPILatencyMicros, + "Average last minute latency in µs for drive API storage operations", + append(allDriveLabels, apiL)...) + driveHealthMD = NewGaugeMD(driveHealth, + "Drive health (0 = offline, 1 = healthy, 2 = healing)", allDriveLabels...) + + driveOfflineCountMD = NewGaugeMD(driveOfflineCount, + "Count of offline drives") + driveOnlineCountMD = NewGaugeMD(driveOnlineCount, + "Count of online drives") + driveCountMD = NewGaugeMD(driveCount, + "Count of all drives") + + // iostat related + driveReadsPerSecMD = NewGaugeMD(driveReadsPerSec, + "Reads per second on a drive", + allDriveLabels...) + driveReadsKBPerSecMD = NewGaugeMD(driveReadsKBPerSec, + "Kilobytes read per second on a drive", + allDriveLabels...) + driveReadsAwaitMD = NewGaugeMD(driveReadsAwait, + "Average time for read requests served on a drive", + allDriveLabels...) + driveWritesPerSecMD = NewGaugeMD(driveWritesPerSec, + "Writes per second on a drive", + allDriveLabels...) + driveWritesKBPerSecMD = NewGaugeMD(driveWritesKBPerSec, + "Kilobytes written per second on a drive", + allDriveLabels...) + driveWritesAwaitMD = NewGaugeMD(driveWritesAwait, + "Average time for write requests served on a drive", + allDriveLabels...) + drivePercUtilMD = NewGaugeMD(drivePercUtil, + "Percentage of time the disk was busy", + allDriveLabels...) +) + +func getCurrentDriveIOStats() map[string]madmin.DiskIOStats { + types := madmin.MetricsDisk + driveRealtimeMetrics := collectLocalMetrics(types, collectMetricsOpts{ + hosts: map[string]struct{}{ + globalLocalNodeName: {}, + }, + }) + + stats := map[string]madmin.DiskIOStats{} + for d, m := range driveRealtimeMetrics.ByDisk { + stats[d] = m.IOStats + } + return stats +} + +func (m *MetricValues) setDriveBasicMetrics(drive madmin.Disk, labels []string) { + m.Set(driveUsedBytes, float64(drive.UsedSpace), labels...) + m.Set(driveFreeBytes, float64(drive.AvailableSpace), labels...) + m.Set(driveTotalBytes, float64(drive.TotalSpace), labels...) + m.Set(driveUsedInodes, float64(drive.UsedInodes), labels...) + m.Set(driveFreeInodes, float64(drive.FreeInodes), labels...) + m.Set(driveTotalInodes, float64(drive.UsedInodes+drive.FreeInodes), labels...) + + var health float64 + switch drive.Healing { + case true: + health = driveHealthHealing + case false: + if drive.State == "ok" { + health = driveHealthOnline + } else { + health = driveHealthOffline + } + } + m.Set(driveHealth, health, labels...) +} + +func (m *MetricValues) setDriveAPIMetrics(disk madmin.Disk, labels []string) { + if disk.Metrics == nil { + return + } + + m.Set(driveTimeoutErrorsTotal, float64(disk.Metrics.TotalErrorsTimeout), labels...) + m.Set(driveIOErrorsTotal, float64(disk.Metrics.TotalErrorsAvailability-disk.Metrics.TotalErrorsTimeout), labels...) + m.Set(driveAvailabilityErrorsTotal, float64(disk.Metrics.TotalErrorsAvailability), labels...) + m.Set(driveWaitingIO, float64(disk.Metrics.TotalWaiting), labels...) + + // Append the api label for the drive API latencies. + labels = append(labels, "api", "") + lastIdx := len(labels) - 1 + for apiName, latency := range disk.Metrics.LastMinute { + labels[lastIdx] = "storage." + apiName + m.Set(driveAPILatencyMicros, float64(latency.Avg().Microseconds()), + labels...) + } +} + +func (m *MetricValues) setDriveIOStatMetrics(ioStats driveIOStatMetrics, labels []string) { + m.Set(driveReadsPerSec, ioStats.readsPerSec, labels...) + m.Set(driveReadsKBPerSec, ioStats.readsKBPerSec, labels...) + if ioStats.readsPerSec > 0 { + m.Set(driveReadsAwait, ioStats.readsAwait, labels...) + } + + m.Set(driveWritesPerSec, ioStats.writesPerSec, labels...) + m.Set(driveWritesKBPerSec, ioStats.writesKBPerSec, labels...) + if ioStats.writesPerSec > 0 { + m.Set(driveWritesAwait, ioStats.writesAwait, labels...) + } + + m.Set(drivePercUtil, ioStats.percUtil, labels...) +} + +// loadDriveMetrics - `MetricsLoaderFn` for node drive metrics. +func loadDriveMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + driveMetrics, err := c.driveMetrics.Get() + if err != nil { + metricsLogIf(ctx, err) + return nil + } + + for _, disk := range driveMetrics.storageInfo.Disks { + labels := []string{ + driveL, disk.DrivePath, + poolIndexL, strconv.Itoa(disk.PoolIndex), + setIndexL, strconv.Itoa(disk.SetIndex), + driveIndexL, strconv.Itoa(disk.DiskIndex), + } + + m.setDriveBasicMetrics(disk, labels) + if dm, found := driveMetrics.ioStats[disk.DrivePath]; found { + m.setDriveIOStatMetrics(dm, labels) + } + m.setDriveAPIMetrics(disk, labels) + } + + m.Set(driveOfflineCount, float64(driveMetrics.offlineDrives)) + m.Set(driveOnlineCount, float64(driveMetrics.onlineDrives)) + m.Set(driveCount, float64(driveMetrics.totalDrives)) + + return nil +} diff --git a/cmd/metrics-v3-system-memory.go b/cmd/metrics-v3-system-memory.go new file mode 100644 index 0000000000000..f304631bc5415 --- /dev/null +++ b/cmd/metrics-v3-system-memory.go @@ -0,0 +1,65 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" +) + +const ( + memTotal = "total" + memUsed = "used" + memFree = "free" + memBuffers = "buffers" + memCache = "cache" + memUsedPerc = "used_perc" + memShared = "shared" + memAvailable = "available" +) + +var ( + memTotalMD = NewGaugeMD(memTotal, "Total memory on the node") + memUsedMD = NewGaugeMD(memUsed, "Used memory on the node") + memUsedPercMD = NewGaugeMD(memUsedPerc, "Used memory percentage on the node") + memFreeMD = NewGaugeMD(memFree, "Free memory on the node") + memBuffersMD = NewGaugeMD(memBuffers, "Buffers memory on the node") + memCacheMD = NewGaugeMD(memCache, "Cache memory on the node") + memSharedMD = NewGaugeMD(memShared, "Shared memory on the node") + memAvailableMD = NewGaugeMD(memAvailable, "Available memory on the node") +) + +// loadMemoryMetrics - `MetricsLoaderFn` for node memory metrics. +func loadMemoryMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + memMetrics, err := c.memoryMetrics.Get() + if err != nil { + metricsLogIf(ctx, err) + return err + } + + m.Set(memTotal, float64(memMetrics.Total)) + m.Set(memUsed, float64(memMetrics.Used)) + usedPerc := float64(memMetrics.Used) * 100 / float64(memMetrics.Total) + m.Set(memUsedPerc, usedPerc) + m.Set(memFree, float64(memMetrics.Free)) + m.Set(memBuffers, float64(memMetrics.Buffers)) + m.Set(memCache, float64(memMetrics.Cache)) + m.Set(memShared, float64(memMetrics.Shared)) + m.Set(memAvailable, float64(memMetrics.Available)) + + return nil +} diff --git a/cmd/metrics-v3-system-network.go b/cmd/metrics-v3-system-network.go new file mode 100644 index 0000000000000..e0328afc67588 --- /dev/null +++ b/cmd/metrics-v3-system-network.go @@ -0,0 +1,61 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + + "github.com/minio/minio/internal/rest" +) + +const ( + internodeErrorsTotal MetricName = "errors_total" + internodeDialErrorsTotal MetricName = "dial_errors_total" + internodeDialAvgTimeNanos MetricName = "dial_avg_time_nanos" + internodeSentBytesTotal MetricName = "sent_bytes_total" + internodeRecvBytesTotal MetricName = "recv_bytes_total" +) + +var ( + internodeErrorsTotalMD = NewCounterMD(internodeErrorsTotal, + "Total number of failed internode calls") + internodeDialedErrorsTotalMD = NewCounterMD(internodeDialErrorsTotal, + "Total number of internode TCP dial timeouts and errors") + internodeDialAvgTimeNanosMD = NewGaugeMD(internodeDialAvgTimeNanos, + "Average dial time of internode TCP calls in nanoseconds") + internodeSentBytesTotalMD = NewCounterMD(internodeSentBytesTotal, + "Total number of bytes sent to other peer nodes") + internodeRecvBytesTotalMD = NewCounterMD(internodeRecvBytesTotal, + "Total number of bytes received from other peer nodes") +) + +// loadNetworkInternodeMetrics - reads internode network metrics. +// +// This is a `MetricsLoaderFn`. +func loadNetworkInternodeMetrics(ctx context.Context, m MetricValues, _ *metricsCache) error { + connStats := globalConnStats.toServerConnStats() + rpcStats := rest.GetRPCStats() + if globalIsDistErasure { + m.Set(internodeErrorsTotal, float64(rpcStats.Errs)) + m.Set(internodeDialErrorsTotal, float64(rpcStats.DialErrs)) + m.Set(internodeDialAvgTimeNanos, float64(rpcStats.DialAvgDuration)) + m.Set(internodeSentBytesTotal, float64(connStats.internodeOutputBytes)) + m.Set(internodeRecvBytesTotal, float64(connStats.internodeInputBytes)) + } + return nil +} diff --git a/cmd/metrics-v3-system-process.go b/cmd/metrics-v3-system-process.go new file mode 100644 index 0000000000000..01dbba88ed2e7 --- /dev/null +++ b/cmd/metrics-v3-system-process.go @@ -0,0 +1,174 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "runtime" + "time" + + "github.com/prometheus/procfs" +) + +const ( + processLocksReadTotal = "locks_read_total" + processLocksWriteTotal = "locks_write_total" + processCPUTotalSeconds = "cpu_total_seconds" + processGoRoutineTotal = "go_routine_total" + processIORCharBytes = "io_rchar_bytes" + processIOReadBytes = "io_read_bytes" + processIOWCharBytes = "io_wchar_bytes" + processIOWriteBytes = "io_write_bytes" + processStartTimeSeconds = "start_time_seconds" + processUptimeSeconds = "uptime_seconds" + processFileDescriptorLimitTotal = "file_descriptor_limit_total" + processFileDescriptorOpenTotal = "file_descriptor_open_total" + processSyscallReadTotal = "syscall_read_total" + processSyscallWriteTotal = "syscall_write_total" + processResidentMemoryBytes = "resident_memory_bytes" + processVirtualMemoryBytes = "virtual_memory_bytes" + processVirtualMemoryMaxBytes = "virtual_memory_max_bytes" +) + +var ( + processLocksReadTotalMD = NewGaugeMD(processLocksReadTotal, "Number of current READ locks on this peer") + processLocksWriteTotalMD = NewGaugeMD(processLocksWriteTotal, "Number of current WRITE locks on this peer") + processCPUTotalSecondsMD = NewCounterMD(processCPUTotalSeconds, "Total user and system CPU time spent in seconds") + processGoRoutineTotalMD = NewGaugeMD(processGoRoutineTotal, "Total number of go routines running") + processIORCharBytesMD = NewCounterMD(processIORCharBytes, "Total bytes read by the process from the underlying storage system including cache, /proc/[pid]/io rchar") + processIOReadBytesMD = NewCounterMD(processIOReadBytes, "Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes") + processIOWCharBytesMD = NewCounterMD(processIOWCharBytes, "Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar") + processIOWriteBytesMD = NewCounterMD(processIOWriteBytes, "Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes") + processStarttimeSecondsMD = NewGaugeMD(processStartTimeSeconds, "Start time for MinIO process in seconds since Unix epoc") + processUptimeSecondsMD = NewGaugeMD(processUptimeSeconds, "Uptime for MinIO process in seconds") + processFileDescriptorLimitTotalMD = NewGaugeMD(processFileDescriptorLimitTotal, "Limit on total number of open file descriptors for the MinIO Server process") + processFileDescriptorOpenTotalMD = NewGaugeMD(processFileDescriptorOpenTotal, "Total number of open file descriptors by the MinIO Server process") + processSyscallReadTotalMD = NewCounterMD(processSyscallReadTotal, "Total read SysCalls to the kernel. /proc/[pid]/io syscr") + processSyscallWriteTotalMD = NewCounterMD(processSyscallWriteTotal, "Total write SysCalls to the kernel. /proc/[pid]/io syscw") + processResidentMemoryBytesMD = NewGaugeMD(processResidentMemoryBytes, "Resident memory size in bytes") + processVirtualMemoryBytesMD = NewGaugeMD(processVirtualMemoryBytes, "Virtual memory size in bytes") + processVirtualMemoryMaxBytesMD = NewGaugeMD(processVirtualMemoryMaxBytes, "Maximum virtual memory size in bytes") +) + +func loadProcStatMetrics(ctx context.Context, stat procfs.ProcStat, m MetricValues) { + if stat.CPUTime() > 0 { + m.Set(processCPUTotalSeconds, float64(stat.CPUTime())) + } + + if stat.ResidentMemory() > 0 { + m.Set(processResidentMemoryBytes, float64(stat.ResidentMemory())) + } + + if stat.VirtualMemory() > 0 { + m.Set(processVirtualMemoryBytes, float64(stat.VirtualMemory())) + } + + startTime, err := stat.StartTime() + if err != nil { + metricsLogIf(ctx, err) + } else if startTime > 0 { + m.Set(processStartTimeSeconds, float64(startTime)) + } +} + +func loadProcIOMetrics(ctx context.Context, io procfs.ProcIO, m MetricValues) { + if io.RChar > 0 { + m.Set(processIORCharBytes, float64(io.RChar)) + } + + if io.ReadBytes > 0 { + m.Set(processIOReadBytes, float64(io.ReadBytes)) + } + + if io.WChar > 0 { + m.Set(processIOWCharBytes, float64(io.WChar)) + } + + if io.WriteBytes > 0 { + m.Set(processIOWriteBytes, float64(io.WriteBytes)) + } + + if io.SyscR > 0 { + m.Set(processSyscallReadTotal, float64(io.SyscR)) + } + + if io.SyscW > 0 { + m.Set(processSyscallWriteTotal, float64(io.SyscW)) + } +} + +func loadProcFSMetrics(ctx context.Context, p procfs.Proc, m MetricValues) { + stat, err := p.Stat() + if err != nil { + metricsLogIf(ctx, err) + } else { + loadProcStatMetrics(ctx, stat, m) + } + + io, err := p.IO() + if err != nil { + metricsLogIf(ctx, err) + } else { + loadProcIOMetrics(ctx, io, m) + } + + l, err := p.Limits() + if err != nil { + metricsLogIf(ctx, err) + } else { + if l.OpenFiles > 0 { + m.Set(processFileDescriptorLimitTotal, float64(l.OpenFiles)) + } + + if l.AddressSpace > 0 { + m.Set(processVirtualMemoryMaxBytes, float64(l.AddressSpace)) + } + } + + openFDs, err := p.FileDescriptorsLen() + if err != nil { + metricsLogIf(ctx, err) + } else if openFDs > 0 { + m.Set(processFileDescriptorOpenTotal, float64(openFDs)) + } +} + +// loadProcessMetrics - `MetricsLoaderFn` for process metrics +func loadProcessMetrics(ctx context.Context, m MetricValues, c *metricsCache) error { + m.Set(processGoRoutineTotal, float64(runtime.NumGoroutine())) + + if !globalBootTime.IsZero() { + m.Set(processUptimeSeconds, time.Since(globalBootTime).Seconds()) + } + + if runtime.GOOS != globalWindowsOSName && runtime.GOOS != globalMacOSName { + p, err := procfs.Self() + if err != nil { + metricsLogIf(ctx, err) + } else { + loadProcFSMetrics(ctx, p, m) + } + } + + if globalIsDistErasure && globalLockServer != nil { + st := globalLockServer.stats() + m.Set(processLocksReadTotal, float64(st.Reads)) + m.Set(processLocksWriteTotal, float64(st.Writes)) + } + return nil +} diff --git a/cmd/metrics-v3-types.go b/cmd/metrics-v3-types.go new file mode 100644 index 0000000000000..92004c961c87f --- /dev/null +++ b/cmd/metrics-v3-types.go @@ -0,0 +1,515 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "fmt" + "slices" + "strings" + "sync" + + "github.com/minio/minio-go/v7/pkg/set" + "github.com/minio/minio/internal/logger" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" +) + +type collectorPath string + +// metricPrefix converts a collector path to a metric name prefix. The path is +// converted to snake-case (by replaced '/' and '-' with '_') and prefixed with +// `minio_`. +func (cp collectorPath) metricPrefix() string { + s := strings.TrimPrefix(string(cp), SlashSeparator) + s = strings.ReplaceAll(s, SlashSeparator, "_") + s = strings.ReplaceAll(s, "-", "_") + return "minio_" + s +} + +// isDescendantOf returns true if it is a descendant of (or the same as) +// `ancestor`. +// +// For example: +// +// /a, /a/b, /a/b/c are all descendants of /a. +// /abc or /abd/a are not descendants of /ab. +func (cp collectorPath) isDescendantOf(arg string) bool { + descendant := string(cp) + if descendant == arg { + return true + } + if len(arg) >= len(descendant) { + return false + } + if !strings.HasSuffix(arg, SlashSeparator) { + arg += SlashSeparator + } + return strings.HasPrefix(descendant, arg) +} + +// MetricType - represents the type of a metric. +type MetricType int + +const ( + // CounterMT - represents a counter metric. + CounterMT MetricType = iota + // GaugeMT - represents a gauge metric. + GaugeMT + // HistogramMT - represents a histogram metric. + HistogramMT +) + +// rangeL - represents a range label. +const rangeL = "range" + +func (mt MetricType) String() string { + switch mt { + case CounterMT: + return "counter" + case GaugeMT: + return "gauge" + case HistogramMT: + return "histogram" + default: + return "*unknown*" + } +} + +func (mt MetricType) toProm() prometheus.ValueType { + switch mt { + case CounterMT: + return prometheus.CounterValue + case GaugeMT: + return prometheus.GaugeValue + case HistogramMT: + return prometheus.CounterValue + default: + panic(fmt.Sprintf("unknown metric type: %d", mt)) + } +} + +// MetricDescriptor - represents a metric descriptor. +type MetricDescriptor struct { + Name MetricName + Type MetricType + Help string + VariableLabels []string + + // managed values follow: + labelSet map[string]struct{} +} + +func (md *MetricDescriptor) getLabelSet() map[string]struct{} { + if md.labelSet != nil { + return md.labelSet + } + md.labelSet = make(map[string]struct{}, len(md.VariableLabels)) + for _, label := range md.VariableLabels { + md.labelSet[label] = struct{}{} + } + return md.labelSet +} + +func (md *MetricDescriptor) toPromName(namePrefix string) string { + return prometheus.BuildFQName(namePrefix, "", string(md.Name)) +} + +func (md *MetricDescriptor) toPromDesc(namePrefix string, extraLabels map[string]string) *prometheus.Desc { + return prometheus.NewDesc( + md.toPromName(namePrefix), + md.Help, + md.VariableLabels, extraLabels, + ) +} + +// NewCounterMD - creates a new counter metric descriptor. +func NewCounterMD(name MetricName, help string, labels ...string) MetricDescriptor { + return MetricDescriptor{ + Name: name, + Type: CounterMT, + Help: help, + VariableLabels: labels, + } +} + +// NewGaugeMD - creates a new gauge metric descriptor. +func NewGaugeMD(name MetricName, help string, labels ...string) MetricDescriptor { + return MetricDescriptor{ + Name: name, + Type: GaugeMT, + Help: help, + VariableLabels: labels, + } +} + +type metricValue struct { + Labels map[string]string + Value float64 +} + +// MetricValues - type to set metric values retrieved while loading metrics. A +// value of this type is passed to the `MetricsLoaderFn`. +type MetricValues struct { + values map[MetricName][]metricValue + descriptors map[MetricName]MetricDescriptor +} + +func newMetricValues(d map[MetricName]MetricDescriptor) MetricValues { + return MetricValues{ + values: make(map[MetricName][]metricValue, len(d)), + descriptors: d, + } +} + +// ToPromMetrics - converts the internal metric values to Prometheus +// adding the given name prefix. The extraLabels are added to each metric as +// constant labels. +func (m *MetricValues) ToPromMetrics(namePrefix string, extraLabels map[string]string, +) []prometheus.Metric { + metrics := make([]prometheus.Metric, 0, len(m.values)) + for metricName, mv := range m.values { + desc := m.descriptors[metricName] + promDesc := desc.toPromDesc(namePrefix, extraLabels) + for _, v := range mv { + // labelValues is in the same order as the variable labels in the + // descriptor. + labelValues := make([]string, 0, len(v.Labels)) + for _, k := range desc.VariableLabels { + labelValues = append(labelValues, v.Labels[k]) + } + metrics = append(metrics, + prometheus.MustNewConstMetric(promDesc, desc.Type.toProm(), v.Value, + labelValues...)) + } + } + return metrics +} + +// Set - sets a metric value along with any provided labels. It is used only +// with Gauge and Counter metrics. +// +// If the MetricName given here is not present in the `MetricsGroup`'s +// descriptors, this function panics. +// +// Panics if `labels` is not a list of ordered label name and label value pairs +// or if all labels for the metric are not provided. +func (m *MetricValues) Set(name MetricName, value float64, labels ...string) { + desc, ok := m.descriptors[name] + if !ok { + panic(fmt.Sprintf("metric has no description: %s", name)) + } + + if len(labels)%2 != 0 { + panic("labels must be a list of ordered key-value pairs") + } + + validLabels := desc.getLabelSet() + labelMap := make(map[string]string, len(labels)/2) + for i := 0; i < len(labels); i += 2 { + if _, ok := validLabels[labels[i]]; !ok { + panic(fmt.Sprintf("invalid label: %s (metric: %s)", labels[i], name)) + } + labelMap[labels[i]] = labels[i+1] + } + + if len(labels)/2 != len(validLabels) { + panic("not all labels were given values") + } + + v, ok := m.values[name] + if !ok { + v = make([]metricValue, 0, 1) + } + // If valid non zero value set the metrics + if value > 0 { + m.values[name] = append(v, metricValue{ + Labels: labelMap, + Value: value, + }) + } +} + +// SetHistogram - sets values for the given MetricName using the provided +// histogram. +// +// `filterByLabels` is a map of label names to list of allowed label values to +// filter by. Note that this filtering happens before any renaming of labels. +// +// `renameLabels` is a map of label names to rename. The keys are the original +// label names and the values are the new label names. +// +// `bucketFilter` is a list of bucket values to filter. If this is non-empty, +// only metrics for the given buckets are added. +// +// `extraLabels` are additional labels to add to each metric. They are ordered +// label name and value pairs. +func (m *MetricValues) SetHistogram(name MetricName, hist *prometheus.HistogramVec, + filterByLabels map[string]set.StringSet, renameLabels map[string]string, bucketFilter []string, + extraLabels ...string, +) { + if _, ok := m.descriptors[name]; !ok { + panic(fmt.Sprintf("metric has no description: %s", name)) + } + dummyDesc := MetricDescription{} + metricsV2 := getHistogramMetrics(hist, dummyDesc, false, false) +mainLoop: + for _, metric := range metricsV2 { + for label, allowedValues := range filterByLabels { + if !allowedValues.Contains(metric.VariableLabels[label]) { + continue mainLoop + } + } + + // If a bucket filter is provided, only add metrics for the given + // buckets. + if len(bucketFilter) > 0 && !slices.Contains(bucketFilter, metric.VariableLabels["bucket"]) { + continue + } + + labels := make([]string, 0, len(metric.VariableLabels)*2) + for k, v := range metric.VariableLabels { + if newLabel, ok := renameLabels[k]; ok { + labels = append(labels, newLabel, v) + } else { + labels = append(labels, k, v) + } + } + labels = append(labels, extraLabels...) + // If valid non zero value set the metrics + if metric.Value > 0 { + m.Set(name, metric.Value, labels...) + } + } +} + +// SetHistogramValues - sets values for the given MetricName using the provided map of +// range to value. +func SetHistogramValues[V uint64 | int64 | float64](m MetricValues, name MetricName, values map[string]V, labels ...string) { + for rng, val := range values { + m.Set(name, float64(val), append(labels, rangeL, rng)...) + } +} + +// MetricsLoaderFn - represents a function to load metrics from the +// metricsCache. +// +// Note that returning an error here will cause the Metrics handler to return a +// 500 Internal Server Error. +type MetricsLoaderFn func(context.Context, MetricValues, *metricsCache) error + +// JoinLoaders - joins multiple loaders into a single loader. The returned +// loader will call each of the given loaders in order. If any of the loaders +// return an error, the returned loader will return that error. +func JoinLoaders(loaders ...MetricsLoaderFn) MetricsLoaderFn { + return func(ctx context.Context, m MetricValues, c *metricsCache) error { + for _, loader := range loaders { + if err := loader(ctx, m, c); err != nil { + return err + } + } + return nil + } +} + +// BucketMetricsLoaderFn - represents a function to load metrics from the +// metricsCache and the system for a given list of buckets. +// +// Note that returning an error here will cause the Metrics handler to return a +// 500 Internal Server Error. +type BucketMetricsLoaderFn func(context.Context, MetricValues, *metricsCache, []string) error + +// JoinBucketLoaders - joins multiple bucket loaders into a single loader, +// similar to `JoinLoaders`. +func JoinBucketLoaders(loaders ...BucketMetricsLoaderFn) BucketMetricsLoaderFn { + return func(ctx context.Context, m MetricValues, c *metricsCache, b []string) error { + for _, loader := range loaders { + if err := loader(ctx, m, c, b); err != nil { + return err + } + } + return nil + } +} + +// MetricsGroup - represents a group of metrics. It includes a `MetricsLoaderFn` +// function that provides a way to load the metrics from the system. The metrics +// are cached and refreshed after a given timeout. +// +// For metrics with a `bucket` dimension, a list of buckets argument is required +// to collect the metrics. +// +// It implements the prometheus.Collector interface for metric groups without a +// bucket dimension. For metric groups with a bucket dimension, use the +// `GetBucketCollector` method to get a `BucketCollector` that implements the +// prometheus.Collector interface. +type MetricsGroup struct { + // Path (relative to the Metrics v3 base endpoint) at which this group of + // metrics is served. This value is converted into a metric name prefix + // using `.metricPrefix()` and is added to each metric returned. + CollectorPath collectorPath + // List of all metric descriptors that could be returned by the loader. + Descriptors []MetricDescriptor + // (Optional) Extra (constant) label KV pairs to be added to each metric in + // the group. + ExtraLabels map[string]string + + // Loader functions to load metrics. Only one of these will be set. Metrics + // returned by these functions must be present in the `Descriptors` list. + loader MetricsLoaderFn + bucketLoader BucketMetricsLoaderFn + + // Cache for all metrics groups. Set via `.SetCache` method. + cache *metricsCache + + // managed values follow: + + // map of metric descriptors by metric name. + descriptorMap map[MetricName]MetricDescriptor + + // For bucket metrics, the list of buckets is stored here. It is used in the + // Collect() call. This is protected by the `bucketsLock`. + bucketsLock sync.Mutex + buckets []string +} + +// NewMetricsGroup creates a new MetricsGroup. To create a metrics group for +// metrics with a `bucket` dimension (label), use `NewBucketMetricsGroup`. +// +// The `loader` function loads metrics from the cache and the system. +func NewMetricsGroup(path collectorPath, descriptors []MetricDescriptor, + loader MetricsLoaderFn, +) *MetricsGroup { + mg := &MetricsGroup{ + CollectorPath: path, + Descriptors: descriptors, + loader: loader, + } + mg.validate() + return mg +} + +// NewBucketMetricsGroup creates a new MetricsGroup for metrics with a `bucket` +// dimension (label). +// +// The `loader` function loads metrics from the cache and the system for a given +// list of buckets. +func NewBucketMetricsGroup(path collectorPath, descriptors []MetricDescriptor, + loader BucketMetricsLoaderFn, +) *MetricsGroup { + mg := &MetricsGroup{ + CollectorPath: path, + Descriptors: descriptors, + bucketLoader: loader, + } + mg.validate() + return mg +} + +// AddExtraLabels - adds extra (constant) label KV pairs to the metrics group. +// This is a helper to initialize the `ExtraLabels` field. The argument is a +// list of ordered label name and value pairs. +func (mg *MetricsGroup) AddExtraLabels(labels ...string) { + if len(labels)%2 != 0 { + panic("Labels must be an ordered list of name value pairs") + } + if mg.ExtraLabels == nil { + mg.ExtraLabels = make(map[string]string, len(labels)) + } + for i := 0; i < len(labels); i += 2 { + mg.ExtraLabels[labels[i]] = labels[i+1] + } +} + +// IsBucketMetricsGroup - returns true if the given MetricsGroup is a bucket +// metrics group. +func (mg *MetricsGroup) IsBucketMetricsGroup() bool { + return mg.bucketLoader != nil +} + +// Describe - implements prometheus.Collector interface. +func (mg *MetricsGroup) Describe(ch chan<- *prometheus.Desc) { + for _, desc := range mg.Descriptors { + ch <- desc.toPromDesc(mg.CollectorPath.metricPrefix(), mg.ExtraLabels) + } +} + +// Collect - implements prometheus.Collector interface. +func (mg *MetricsGroup) Collect(ch chan<- prometheus.Metric) { + metricValues := newMetricValues(mg.descriptorMap) + + var err error + if mg.IsBucketMetricsGroup() { + err = mg.bucketLoader(GlobalContext, metricValues, mg.cache, mg.buckets) + } else { + err = mg.loader(GlobalContext, metricValues, mg.cache) + } + + // There is no way to handle errors here, so we panic the current goroutine + // and the Metrics API handler returns a 500 HTTP status code. This should + // normally not happen, and usually indicates a bug. + logger.CriticalIf(GlobalContext, errors.Wrap(err, "failed to get metrics")) + + promMetrics := metricValues.ToPromMetrics(mg.CollectorPath.metricPrefix(), + mg.ExtraLabels) + for _, metric := range promMetrics { + ch <- metric + } +} + +// LockAndSetBuckets - locks the buckets and sets the given buckets. It returns +// a function to unlock the buckets. +func (mg *MetricsGroup) LockAndSetBuckets(buckets []string) func() { + mg.bucketsLock.Lock() + mg.buckets = buckets + return func() { + mg.bucketsLock.Unlock() + } +} + +// MetricFQN - returns the fully qualified name for the given metric name. +func (mg *MetricsGroup) MetricFQN(name MetricName) string { + v, ok := mg.descriptorMap[name] + if !ok { + // This should never happen. + return "" + } + return v.toPromName(mg.CollectorPath.metricPrefix()) +} + +func (mg *MetricsGroup) validate() { + if len(mg.Descriptors) == 0 { + panic("Descriptors must be set") + } + + // For bools A and B, A XOR B <=> A != B. + isExactlyOneSet := (mg.loader == nil) != (mg.bucketLoader == nil) + if !isExactlyOneSet { + panic("Exactly one Loader function must be set") + } + + mg.descriptorMap = make(map[MetricName]MetricDescriptor, len(mg.Descriptors)) + for _, desc := range mg.Descriptors { + mg.descriptorMap[desc.Name] = desc + } +} + +// SetCache is a helper to initialize MetricsGroup. It sets the cache object. +func (mg *MetricsGroup) SetCache(c *metricsCache) { + mg.cache = c +} diff --git a/cmd/metrics-v3.go b/cmd/metrics-v3.go new file mode 100644 index 0000000000000..93749258cb11a --- /dev/null +++ b/cmd/metrics-v3.go @@ -0,0 +1,487 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "slices" + "strings" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" +) + +// Collector paths. +// +// These are paths under the top-level /minio/metrics/v3 metrics endpoint. Each +// of these paths returns a set of V3 metrics. +// +// Per-bucket metrics endpoints always start with /bucket and the bucket name is +// appended to the path. e.g. if the collector path is /bucket/api, the endpoint +// for the bucket "mybucket" would be /minio/metrics/v3/bucket/api/mybucket +const ( + apiRequestsCollectorPath collectorPath = "/api/requests" + + bucketAPICollectorPath collectorPath = "/bucket/api" + bucketReplicationCollectorPath collectorPath = "/bucket/replication" + + systemNetworkInternodeCollectorPath collectorPath = "/system/network/internode" + systemDriveCollectorPath collectorPath = "/system/drive" + systemMemoryCollectorPath collectorPath = "/system/memory" + systemCPUCollectorPath collectorPath = "/system/cpu" + systemProcessCollectorPath collectorPath = "/system/process" + + debugGoCollectorPath collectorPath = "/debug/go" + + clusterHealthCollectorPath collectorPath = "/cluster/health" + clusterUsageObjectsCollectorPath collectorPath = "/cluster/usage/objects" + clusterUsageBucketsCollectorPath collectorPath = "/cluster/usage/buckets" + clusterErasureSetCollectorPath collectorPath = "/cluster/erasure-set" + clusterIAMCollectorPath collectorPath = "/cluster/iam" + clusterConfigCollectorPath collectorPath = "/cluster/config" + + ilmCollectorPath collectorPath = "/ilm" + auditCollectorPath collectorPath = "/audit" + loggerWebhookCollectorPath collectorPath = "/logger/webhook" + replicationCollectorPath collectorPath = "/replication" + notificationCollectorPath collectorPath = "/notification" + scannerCollectorPath collectorPath = "/scanner" +) + +const ( + clusterBasePath = "/cluster" +) + +type metricsV3Collection struct { + mgMap map[collectorPath]*MetricsGroup + bucketMGMap map[collectorPath]*MetricsGroup + + // Gatherers for non-bucket MetricsGroup's + mgGatherers map[collectorPath]prometheus.Gatherer + + collectorPaths []collectorPath +} + +func newMetricGroups(r *prometheus.Registry) *metricsV3Collection { + // Create all metric groups. + apiRequestsMG := NewMetricsGroup(apiRequestsCollectorPath, + []MetricDescriptor{ + apiRejectedAuthTotalMD, + apiRejectedHeaderTotalMD, + apiRejectedTimestampTotalMD, + apiRejectedInvalidTotalMD, + + apiRequestsWaitingTotalMD, + apiRequestsIncomingTotalMD, + apiRequestsInFlightTotalMD, + apiRequestsTotalMD, + apiRequestsErrorsTotalMD, + apiRequests5xxErrorsTotalMD, + apiRequests4xxErrorsTotalMD, + apiRequestsCanceledTotalMD, + + apiRequestsTTFBSecondsDistributionMD, + + apiTrafficSentBytesMD, + apiTrafficRecvBytesMD, + }, + JoinLoaders(loadAPIRequestsHTTPMetrics, loadAPIRequestsTTFBMetrics, + loadAPIRequestsNetworkMetrics), + ) + + bucketAPIMG := NewBucketMetricsGroup(bucketAPICollectorPath, + []MetricDescriptor{ + bucketAPITrafficRecvBytesMD, + bucketAPITrafficSentBytesMD, + + bucketAPIRequestsInFlightMD, + bucketAPIRequestsTotalMD, + bucketAPIRequestsCanceledMD, + bucketAPIRequests4xxErrorsMD, + bucketAPIRequests5xxErrorsMD, + + bucketAPIRequestsTTFBSecondsDistributionMD, + }, + JoinBucketLoaders(loadBucketAPIHTTPMetrics, loadBucketAPITTFBMetrics), + ) + + bucketReplicationMG := NewBucketMetricsGroup(bucketReplicationCollectorPath, + []MetricDescriptor{ + bucketReplLastHrFailedBytesMD, + bucketReplLastHrFailedCountMD, + bucketReplLastMinFailedBytesMD, + bucketReplLastMinFailedCountMD, + bucketReplLatencyMsMD, + bucketReplProxiedDeleteTaggingRequestsTotalMD, + bucketReplProxiedGetRequestsFailuresMD, + bucketReplProxiedGetRequestsTotalMD, + bucketReplProxiedGetTaggingRequestsFailuresMD, + bucketReplProxiedGetTaggingRequestsTotalMD, + bucketReplProxiedHeadRequestsFailuresMD, + bucketReplProxiedHeadRequestsTotalMD, + bucketReplProxiedPutTaggingRequestsFailuresMD, + bucketReplProxiedPutTaggingRequestsTotalMD, + bucketReplSentBytesMD, + bucketReplSentCountMD, + bucketReplTotalFailedBytesMD, + bucketReplTotalFailedCountMD, + bucketReplProxiedDeleteTaggingRequestsFailuresMD, + }, + loadBucketReplicationMetrics, + ) + + systemNetworkInternodeMG := NewMetricsGroup(systemNetworkInternodeCollectorPath, + []MetricDescriptor{ + internodeErrorsTotalMD, + internodeDialedErrorsTotalMD, + internodeDialAvgTimeNanosMD, + internodeSentBytesTotalMD, + internodeRecvBytesTotalMD, + }, + loadNetworkInternodeMetrics, + ) + + systemMemoryMG := NewMetricsGroup(systemMemoryCollectorPath, + []MetricDescriptor{ + memTotalMD, + memUsedMD, + memFreeMD, + memAvailableMD, + memBuffersMD, + memCacheMD, + memSharedMD, + memUsedPercMD, + }, + loadMemoryMetrics, + ) + + systemCPUMG := NewMetricsGroup(systemCPUCollectorPath, + []MetricDescriptor{ + sysCPUAvgIdleMD, + sysCPUAvgIOWaitMD, + sysCPULoadMD, + sysCPULoadPercMD, + sysCPUNiceMD, + sysCPUStealMD, + sysCPUSystemMD, + sysCPUUserMD, + }, + loadCPUMetrics, + ) + + systemProcessMG := NewMetricsGroup(systemProcessCollectorPath, + []MetricDescriptor{ + processLocksReadTotalMD, + processLocksWriteTotalMD, + processCPUTotalSecondsMD, + processGoRoutineTotalMD, + processIORCharBytesMD, + processIOReadBytesMD, + processIOWCharBytesMD, + processIOWriteBytesMD, + processStarttimeSecondsMD, + processUptimeSecondsMD, + processFileDescriptorLimitTotalMD, + processFileDescriptorOpenTotalMD, + processSyscallReadTotalMD, + processSyscallWriteTotalMD, + processResidentMemoryBytesMD, + processVirtualMemoryBytesMD, + processVirtualMemoryMaxBytesMD, + }, + loadProcessMetrics, + ) + + systemDriveMG := NewMetricsGroup(systemDriveCollectorPath, + []MetricDescriptor{ + driveUsedBytesMD, + driveFreeBytesMD, + driveTotalBytesMD, + driveUsedInodesMD, + driveFreeInodesMD, + driveTotalInodesMD, + driveTimeoutErrorsMD, + driveIOErrorsMD, + driveAvailabilityErrorsMD, + driveWaitingIOMD, + driveAPILatencyMD, + driveHealthMD, + + driveOfflineCountMD, + driveOnlineCountMD, + driveCountMD, + + // iostat related + driveReadsPerSecMD, + driveReadsKBPerSecMD, + driveReadsAwaitMD, + driveWritesPerSecMD, + driveWritesKBPerSecMD, + driveWritesAwaitMD, + drivePercUtilMD, + }, + loadDriveMetrics, + ) + + clusterHealthMG := NewMetricsGroup(clusterHealthCollectorPath, + []MetricDescriptor{ + healthDrivesOfflineCountMD, + healthDrivesOnlineCountMD, + healthDrivesCountMD, + + healthNodesOfflineCountMD, + healthNodesOnlineCountMD, + + healthCapacityRawTotalBytesMD, + healthCapacityRawFreeBytesMD, + healthCapacityUsableTotalBytesMD, + healthCapacityUsableFreeBytesMD, + }, + JoinLoaders(loadClusterHealthDriveMetrics, + loadClusterHealthNodeMetrics, + loadClusterHealthCapacityMetrics), + ) + + clusterUsageObjectsMG := NewMetricsGroup(clusterUsageObjectsCollectorPath, + []MetricDescriptor{ + usageSinceLastUpdateSecondsMD, + usageTotalBytesMD, + usageObjectsCountMD, + usageVersionsCountMD, + usageDeleteMarkersCountMD, + usageBucketsCountMD, + usageObjectsDistributionMD, + usageVersionsDistributionMD, + }, + loadClusterUsageObjectMetrics, + ) + + clusterUsageBucketsMG := NewMetricsGroup(clusterUsageBucketsCollectorPath, + []MetricDescriptor{ + usageSinceLastUpdateSecondsMD, + usageBucketTotalBytesMD, + usageBucketObjectsTotalMD, + usageBucketVersionsCountMD, + usageBucketDeleteMarkersCountMD, + usageBucketQuotaTotalBytesMD, + usageBucketObjectSizeDistributionMD, + usageBucketObjectVersionCountDistributionMD, + }, + loadClusterUsageBucketMetrics, + ) + + clusterErasureSetMG := NewMetricsGroup(clusterErasureSetCollectorPath, + []MetricDescriptor{ + erasureSetOverallWriteQuorumMD, + erasureSetOverallHealthMD, + erasureSetReadQuorumMD, + erasureSetWriteQuorumMD, + erasureSetOnlineDrivesCountMD, + erasureSetHealingDrivesCountMD, + erasureSetHealthMD, + erasureSetReadToleranceMD, + erasureSetWriteToleranceMD, + erasureSetReadHealthMD, + erasureSetWriteHealthMD, + }, + loadClusterErasureSetMetrics, + ) + + clusterNotificationMG := NewMetricsGroup(notificationCollectorPath, + []MetricDescriptor{ + notificationCurrentSendInProgressMD, + notificationEventsErrorsTotalMD, + notificationEventsSentTotalMD, + notificationEventsSkippedTotalMD, + }, + loadClusterNotificationMetrics, + ) + + clusterIAMMG := NewMetricsGroup(clusterIAMCollectorPath, + []MetricDescriptor{ + lastSyncDurationMillisMD, + pluginAuthnServiceFailedRequestsMinuteMD, + pluginAuthnServiceLastFailSecondsMD, + pluginAuthnServiceLastSuccSecondsMD, + pluginAuthnServiceSuccAvgRttMsMinuteMD, + pluginAuthnServiceSuccMaxRttMsMinuteMD, + pluginAuthnServiceTotalRequestsMinuteMD, + sinceLastSyncMillisMD, + syncFailuresMD, + syncSuccessesMD, + }, + loadClusterIAMMetrics, + ) + + clusterReplicationMG := NewMetricsGroup(replicationCollectorPath, + []MetricDescriptor{ + replicationAverageActiveWorkersMD, + replicationAverageQueuedBytesMD, + replicationAverageQueuedCountMD, + replicationAverageDataTransferRateMD, + replicationCurrentActiveWorkersMD, + replicationCurrentDataTransferRateMD, + replicationLastMinuteQueuedBytesMD, + replicationLastMinuteQueuedCountMD, + replicationMaxActiveWorkersMD, + replicationMaxQueuedBytesMD, + replicationMaxQueuedCountMD, + replicationMaxDataTransferRateMD, + replicationRecentBacklogCountMD, + }, + loadClusterReplicationMetrics, + ) + + clusterConfigMG := NewMetricsGroup(clusterConfigCollectorPath, + []MetricDescriptor{ + configRRSParityMD, + configStandardParityMD, + }, + loadClusterConfigMetrics, + ) + + scannerMG := NewMetricsGroup(scannerCollectorPath, + []MetricDescriptor{ + scannerBucketScansFinishedMD, + scannerBucketScansStartedMD, + scannerDirectoriesScannedMD, + scannerObjectsScannedMD, + scannerVersionsScannedMD, + scannerLastActivitySecondsMD, + }, + loadClusterScannerMetrics, + ) + + loggerWebhookMG := NewMetricsGroup(loggerWebhookCollectorPath, + []MetricDescriptor{ + webhookFailedMessagesMD, + webhookQueueLengthMD, + webhookTotalMessagesMD, + }, + loadLoggerWebhookMetrics, + ) + + auditMG := NewMetricsGroup(auditCollectorPath, + []MetricDescriptor{ + auditFailedMessagesMD, + auditTargetQueueLengthMD, + auditTotalMessagesMD, + }, + loadAuditMetrics, + ) + + ilmMG := NewMetricsGroup(ilmCollectorPath, + []MetricDescriptor{ + ilmExpiryPendingTasksMD, + ilmTransitionActiveTasksMD, + ilmTransitionPendingTasksMD, + ilmTransitionMissedImmediateTasksMD, + ilmVersionsScannedMD, + }, + loadILMMetrics, + ) + + allMetricGroups := []*MetricsGroup{ + apiRequestsMG, + bucketAPIMG, + bucketReplicationMG, + + systemNetworkInternodeMG, + systemDriveMG, + systemMemoryMG, + systemCPUMG, + systemProcessMG, + + clusterHealthMG, + clusterUsageObjectsMG, + clusterUsageBucketsMG, + clusterErasureSetMG, + clusterNotificationMG, + clusterIAMMG, + clusterReplicationMG, + clusterConfigMG, + + ilmMG, + scannerMG, + auditMG, + loggerWebhookMG, + } + + // Bucket metrics are special, they always include the bucket label. These + // metrics required a list of buckets to be passed to the loader, and the list + // of buckets is not known until the request is made. So we keep a separate + // map for bucket metrics and handle them specially. + + // Add the serverName and poolIndex labels to all non-cluster metrics. + // + // Also create metric group maps and set the cache. + metricsCache := newMetricsCache() + mgMap := make(map[collectorPath]*MetricsGroup) + bucketMGMap := make(map[collectorPath]*MetricsGroup) + for _, mg := range allMetricGroups { + if !strings.HasPrefix(string(mg.CollectorPath), clusterBasePath) { + mg.AddExtraLabels( + serverName, globalLocalNodeName, + // poolIndex, strconv.Itoa(globalLocalPoolIdx), + ) + } + mg.SetCache(metricsCache) + if mg.IsBucketMetricsGroup() { + bucketMGMap[mg.CollectorPath] = mg + } else { + mgMap[mg.CollectorPath] = mg + } + } + + // Prepare to register the collectors. Other than `MetricGroup` collectors, + // we also have standard collectors like `GoCollector`. + + // Create all Non-`MetricGroup` collectors here. + collectors := map[collectorPath]prometheus.Collector{ + debugGoCollectorPath: collectors.NewGoCollector(), + } + + // Add all `MetricGroup` collectors to the map. + for _, mg := range allMetricGroups { + collectors[mg.CollectorPath] = mg + } + + // Helper function to register a collector and return a gatherer for it. + mustRegister := func(c ...prometheus.Collector) prometheus.Gatherer { + subRegistry := prometheus.NewRegistry() + for _, col := range c { + subRegistry.MustRegister(col) + } + r.MustRegister(subRegistry) + return subRegistry + } + + // Register all collectors and create gatherers for them. + gatherers := make(map[collectorPath]prometheus.Gatherer, len(collectors)) + collectorPaths := make([]collectorPath, 0, len(collectors)) + for path, collector := range collectors { + gatherers[path] = mustRegister(collector) + collectorPaths = append(collectorPaths, path) + } + slices.Sort(collectorPaths) + return &metricsV3Collection{ + mgMap: mgMap, + bucketMGMap: bucketMGMap, + mgGatherers: gatherers, + collectorPaths: collectorPaths, + } +} diff --git a/cmd/metrics.go b/cmd/metrics.go index a198593b4d278..c5dc3cdfdcc93 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -19,13 +19,12 @@ package cmd import ( "net/http" - "strings" "time" "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mcontext" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/expfmt" ) @@ -156,9 +155,9 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(healMetricsNamespace, "objects", "scanned"), - "Objects scanned in current self healing run", + "Objects scanned since uptime", []string{"type"}, nil), - prometheus.GaugeValue, + prometheus.CounterValue, float64(v), string(k), ) } @@ -166,23 +165,20 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(healMetricsNamespace, "objects", "healed"), - "Objects healed in current self healing run", + "Objects healed since uptime", []string{"type"}, nil), - prometheus.GaugeValue, + prometheus.CounterValue, float64(v), string(k), ) } - for k, v := range bgSeq.gethealFailedItemsMap() { - // healFailedItemsMap stores the endpoint and volume state separated by comma, - // split the fields and pass to channel at correct index - s := strings.Split(k, ",") + for k, v := range bgSeq.getHealFailedItemsMap() { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(healMetricsNamespace, "objects", "heal_failed"), - "Objects for which healing failed in current self healing run", - []string{"mount_path", "volume_status"}, nil), - prometheus.GaugeValue, - float64(v), s[0], s[1], + "Objects for which healing failed since uptime", + []string{"type"}, nil), + prometheus.CounterValue, + float64(v), string(k), ) } } @@ -304,7 +300,7 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) { } for bucket, usageInfo := range dataUsageInfo.BucketsUsage { - stat := globalReplicationStats.getLatestReplicationStats(bucket) + stat := globalReplicationStats.Load().getLatestReplicationStats(bucket) // Total space used by bucket ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( @@ -390,7 +386,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(minioNamespace, "capacity_raw", "total"), - "Total capacity online in the cluster", + "Total capacity online in current MinIO server instance", nil, nil), prometheus.GaugeValue, float64(GetTotalCapacity(server.Disks)), @@ -400,7 +396,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(minioNamespace, "capacity_raw_free", "total"), - "Total free capacity online in the cluster", + "Total free capacity online in current MinIO server instance", nil, nil), prometheus.GaugeValue, float64(GetTotalCapacityFree(server.Disks)), @@ -412,7 +408,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(minioNamespace, "capacity_usable", "total"), - "Total usable capacity online in the cluster", + "Total usable capacity online in current MinIO server instance", nil, nil), prometheus.GaugeValue, float64(GetTotalUsableCapacity(server.Disks, sinfo)), @@ -422,7 +418,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( prometheus.BuildFQName(minioNamespace, "capacity_usable_free", "total"), - "Total free usable capacity online in the cluster", + "Total free usable capacity online in current MinIO server instance", nil, nil), prometheus.GaugeValue, float64(GetTotalUsableCapacityFree(server.Disks, sinfo)), diff --git a/cmd/mrf.go b/cmd/mrf.go index bd82863e5e083..4d002c27aa088 100644 --- a/cmd/mrf.go +++ b/cmd/mrf.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -15,49 +15,201 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . +//go:generate msgp -file=$GOFILE + package cmd import ( "context" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" "time" + "github.com/google/uuid" "github.com/minio/madmin-go/v3" + "github.com/minio/pkg/v3/wildcard" + "github.com/tinylib/msgp/msgp" ) const ( mrfOpsQueueSize = 100000 ) -// partialOperation is a successful upload/delete of an object +const ( + healDir = ".heal" + healMRFDir = bucketMetaPrefix + SlashSeparator + healDir + SlashSeparator + "mrf" + healMRFMetaFormat = 1 + healMRFMetaVersionV1 = 1 +) + +// PartialOperation is a successful upload/delete of an object // but not written in all disks (having quorum) -type partialOperation struct { - bucket string - object string - versionID string - allVersions bool - setIndex, poolIndex int - queued time.Time - scanMode madmin.HealScanMode +type PartialOperation struct { + Bucket string + Object string + VersionID string + Versions []byte + SetIndex, PoolIndex int + Queued time.Time + BitrotScan bool } // mrfState sncapsulates all the information // related to the global background MRF. type mrfState struct { - opCh chan partialOperation + opCh chan PartialOperation + + closed int32 + closing int32 + wg sync.WaitGroup +} + +func newMRFState() mrfState { + return mrfState{ + opCh: make(chan PartialOperation, mrfOpsQueueSize), + } } // Add a partial S3 operation (put/delete) when one or more disks are offline. -func (m *mrfState) addPartialOp(op partialOperation) { +func (m *mrfState) addPartialOp(op PartialOperation) { if m == nil { return } + if atomic.LoadInt32(&m.closed) == 1 { + return + } + + m.wg.Add(1) + defer m.wg.Done() + + if atomic.LoadInt32(&m.closing) == 1 { + return + } + select { case m.opCh <- op: default: } } +// Do not accept new MRF operations anymore and start to save +// the current heal status in one available disk +func (m *mrfState) shutdown() { + atomic.StoreInt32(&m.closing, 1) + m.wg.Wait() + close(m.opCh) + atomic.StoreInt32(&m.closed, 1) + + if len(m.opCh) > 0 { + healingLogEvent(context.Background(), "Saving MRF healing data (%d entries)", len(m.opCh)) + } + + newReader := func() io.ReadCloser { + r, w := io.Pipe() + go func() { + // Initialize MRF meta header. + var data [4]byte + binary.LittleEndian.PutUint16(data[0:2], healMRFMetaFormat) + binary.LittleEndian.PutUint16(data[2:4], healMRFMetaVersionV1) + mw := msgp.NewWriter(w) + n, err := mw.Write(data[:]) + if err != nil { + w.CloseWithError(err) + return + } + if n != len(data) { + w.CloseWithError(io.ErrShortWrite) + return + } + for item := range m.opCh { + err = item.EncodeMsg(mw) + if err != nil { + break + } + } + mw.Flush() + w.CloseWithError(err) + }() + return r + } + + globalLocalDrivesMu.RLock() + localDrives := cloneDrives(globalLocalDrivesMap) + globalLocalDrivesMu.RUnlock() + + for _, localDrive := range localDrives { + r := newReader() + err := localDrive.CreateFile(context.Background(), "", minioMetaBucket, pathJoin(healMRFDir, "list.bin"), -1, r) + r.Close() + if err == nil { + break + } + } +} + +func (m *mrfState) startMRFPersistence() { + loadMRF := func(rc io.ReadCloser, opCh chan PartialOperation) error { + defer rc.Close() + var data [4]byte + n, err := rc.Read(data[:]) + if err != nil { + return err + } + if n != len(data) { + return errors.New("heal mrf: no data") + } + // Read resync meta header + switch binary.LittleEndian.Uint16(data[0:2]) { + case healMRFMetaFormat: + default: + return fmt.Errorf("heal mrf: unknown format: %d", binary.LittleEndian.Uint16(data[0:2])) + } + switch binary.LittleEndian.Uint16(data[2:4]) { + case healMRFMetaVersionV1: + default: + return fmt.Errorf("heal mrf: unknown version: %d", binary.LittleEndian.Uint16(data[2:4])) + } + + mr := msgp.NewReader(rc) + for { + op := PartialOperation{} + err = op.DecodeMsg(mr) + if err != nil { + break + } + opCh <- op + } + + return nil + } + + globalLocalDrivesMu.RLock() + localDrives := cloneDrives(globalLocalDrivesMap) + globalLocalDrivesMu.RUnlock() + + for _, localDrive := range localDrives { + if localDrive == nil { + continue + } + rc, err := localDrive.ReadFileStream(context.Background(), minioMetaBucket, pathJoin(healMRFDir, "list.bin"), 0, -1) + if err != nil { + continue + } + err = loadMRF(rc, m.opCh) + if err != nil { + continue + } + // finally delete the file after processing mrf entries + localDrive.Delete(GlobalContext, minioMetaBucket, pathJoin(healMRFDir, "list.bin"), DeleteOptions{}) + break + } +} + var healSleeper = newDynamicSleeper(5, time.Second, false) // healRoutine listens to new disks reconnection events and @@ -73,8 +225,27 @@ func (m *mrfState) healRoutine(z *erasureServerPools) { return } + // We might land at .metacache, .trash, .multipart + // no need to heal them skip, only when bucket + // is '.minio.sys' + if u.Bucket == minioMetaBucket { + // No MRF needed for temporary objects + if wildcard.Match("buckets/*/.metacache/*", u.Object) { + continue + } + if wildcard.Match("tmp/*", u.Object) { + continue + } + if wildcard.Match("multipart/*", u.Object) { + continue + } + if wildcard.Match("tmp-old/*", u.Object) { + continue + } + } + now := time.Now() - if now.Sub(u.queued) < time.Second { + if now.Sub(u.Queued) < time.Second { // let recently failed networks to reconnect // making MRF wait for 1s before retrying, // i.e 4 reconnect attempts. @@ -85,16 +256,22 @@ func (m *mrfState) healRoutine(z *erasureServerPools) { wait := healSleeper.Timer(context.Background()) scan := madmin.HealNormalScan - if u.scanMode != 0 { - scan = u.scanMode + if u.BitrotScan { + scan = madmin.HealDeepScan } - if u.object == "" { - healBucket(u.bucket, scan) + + if u.Object == "" { + healBucket(u.Bucket, scan) } else { - if u.allVersions { - z.serverPools[u.poolIndex].sets[u.setIndex].listAndHeal(u.bucket, u.object, u.scanMode, healObjectVersionsDisparity) + if len(u.Versions) > 0 { + vers := len(u.Versions) / 16 + if vers > 0 { + for i := range vers { + healObject(u.Bucket, u.Object, uuid.UUID(u.Versions[16*i:]).String(), scan) + } + } } else { - healObject(u.bucket, u.object, u.versionID, scan) + healObject(u.Bucket, u.Object, u.VersionID, scan) } } diff --git a/cmd/mrf_gen.go b/cmd/mrf_gen.go new file mode 100644 index 0000000000000..8c0467b521dd4 --- /dev/null +++ b/cmd/mrf_gen.go @@ -0,0 +1,285 @@ +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +package cmd + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *PartialOperation) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Bucket": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Object": + z.Object, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + case "VersionID": + z.VersionID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "Versions": + z.Versions, err = dc.ReadBytes(z.Versions) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + case "SetIndex": + z.SetIndex, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "SetIndex") + return + } + case "PoolIndex": + z.PoolIndex, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "PoolIndex") + return + } + case "Queued": + z.Queued, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Queued") + return + } + case "BitrotScan": + z.BitrotScan, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "BitrotScan") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *PartialOperation) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 8 + // write "Bucket" + err = en.Append(0x88, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + // write "Object" + err = en.Append(0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74) + if err != nil { + return + } + err = en.WriteString(z.Object) + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + // write "VersionID" + err = en.Append(0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + if err != nil { + return + } + err = en.WriteString(z.VersionID) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + // write "Versions" + err = en.Append(0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + if err != nil { + return + } + err = en.WriteBytes(z.Versions) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + // write "SetIndex" + err = en.Append(0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.SetIndex) + if err != nil { + err = msgp.WrapError(err, "SetIndex") + return + } + // write "PoolIndex" + err = en.Append(0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.PoolIndex) + if err != nil { + err = msgp.WrapError(err, "PoolIndex") + return + } + // write "Queued" + err = en.Append(0xa6, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64) + if err != nil { + return + } + err = en.WriteTime(z.Queued) + if err != nil { + err = msgp.WrapError(err, "Queued") + return + } + // write "BitrotScan" + err = en.Append(0xaa, 0x42, 0x69, 0x74, 0x72, 0x6f, 0x74, 0x53, 0x63, 0x61, 0x6e) + if err != nil { + return + } + err = en.WriteBool(z.BitrotScan) + if err != nil { + err = msgp.WrapError(err, "BitrotScan") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *PartialOperation) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 8 + // string "Bucket" + o = append(o, 0x88, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + o = msgp.AppendString(o, z.Bucket) + // string "Object" + o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74) + o = msgp.AppendString(o, z.Object) + // string "VersionID" + o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x44) + o = msgp.AppendString(o, z.VersionID) + // string "Versions" + o = append(o, 0xa8, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73) + o = msgp.AppendBytes(o, z.Versions) + // string "SetIndex" + o = append(o, 0xa8, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.SetIndex) + // string "PoolIndex" + o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.PoolIndex) + // string "Queued" + o = append(o, 0xa6, 0x51, 0x75, 0x65, 0x75, 0x65, 0x64) + o = msgp.AppendTime(o, z.Queued) + // string "BitrotScan" + o = append(o, 0xaa, 0x42, 0x69, 0x74, 0x72, 0x6f, 0x74, 0x53, 0x63, 0x61, 0x6e) + o = msgp.AppendBool(o, z.BitrotScan) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *PartialOperation) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Bucket": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "Object": + z.Object, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Object") + return + } + case "VersionID": + z.VersionID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return + } + case "Versions": + z.Versions, bts, err = msgp.ReadBytesBytes(bts, z.Versions) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + case "SetIndex": + z.SetIndex, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SetIndex") + return + } + case "PoolIndex": + z.PoolIndex, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PoolIndex") + return + } + case "Queued": + z.Queued, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Queued") + return + } + case "BitrotScan": + z.BitrotScan, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BitrotScan") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *PartialOperation) Msgsize() (s int) { + s = 1 + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Object) + 10 + msgp.StringPrefixSize + len(z.VersionID) + 9 + msgp.BytesPrefixSize + len(z.Versions) + 9 + msgp.IntSize + 10 + msgp.IntSize + 7 + msgp.TimeSize + 11 + msgp.BoolSize + return +} diff --git a/cmd/mrf_gen_test.go b/cmd/mrf_gen_test.go new file mode 100644 index 0000000000000..b8e039f8ab4f6 --- /dev/null +++ b/cmd/mrf_gen_test.go @@ -0,0 +1,123 @@ +// Code generated by github.com/tinylib/msgp DO NOT EDIT. + +package cmd + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalPartialOperation(t *testing.T) { + v := PartialOperation{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgPartialOperation(b *testing.B) { + v := PartialOperation{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgPartialOperation(b *testing.B) { + v := PartialOperation{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalPartialOperation(b *testing.B) { + v := PartialOperation{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodePartialOperation(t *testing.T) { + v := PartialOperation{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodePartialOperation Msgsize() is inaccurate") + } + + vn := PartialOperation{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodePartialOperation(b *testing.B) { + v := PartialOperation{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodePartialOperation(b *testing.B) { + v := PartialOperation{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/cmd/namespace-lock.go b/cmd/namespace-lock.go index 35235a4571cc7..0cfe44dea8399 100644 --- a/cmd/namespace-lock.go +++ b/cmd/namespace-lock.go @@ -126,7 +126,7 @@ func (n *nsLockMap) lock(ctx context.Context, volume string, path string, lockSo n.lockMapMutex.Unlock() } - return + return locked } // Unlock the namespace resource. @@ -185,7 +185,7 @@ func (di *distLockInstance) Unlock(lc LockContext) { if lc.cancel != nil { lc.cancel() } - di.rwMutex.Unlock(lc.ctx) + di.rwMutex.Unlock(context.Background()) } // RLock - block until read lock is taken or timeout has occurred. @@ -229,6 +229,7 @@ type localLockInstance struct { // path. The returned lockInstance object encapsulates the nsLockMap, // volume, path and operation ID. func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume string, paths ...string) RWLocker { + sort.Strings(paths) opsID := mustGetUUID() if n.isDistErasure { drwmutex := dsync.NewDRWMutex(&dsync.Dsync{ @@ -237,7 +238,6 @@ func (n *nsLockMap) NewNSLock(lockers func() ([]dsync.NetLocker, string), volume }, pathsJoinPrefix(volume, paths...)...) return &distLockInstance{drwmutex, opsID} } - sort.Strings(paths) return &localLockInstance{n, volume, paths, opsID} } diff --git a/cmd/namespace-lock_test.go b/cmd/namespace-lock_test.go index 90499951bf34f..3f44e5f9a677d 100644 --- a/cmd/namespace-lock_test.go +++ b/cmd/namespace-lock_test.go @@ -18,7 +18,6 @@ package cmd import ( - "context" "runtime" "testing" "time" @@ -33,8 +32,8 @@ import ( func TestGetSource(t *testing.T) { currentSource := func() string { return getSource(2) } gotSource := currentSource() - // Hard coded line number, 35, in the "expectedSource" value - expectedSource := "[namespace-lock_test.go:35:TestGetSource()]" + // Hard coded line number, 34, in the "expectedSource" value + expectedSource := "[namespace-lock_test.go:34:TestGetSource()]" if gotSource != expectedSource { t.Errorf("expected : %s, got : %s", expectedSource, gotSource) } @@ -44,9 +43,9 @@ func TestGetSource(t *testing.T) { func TestNSLockRace(t *testing.T) { t.Skip("long test skip it") - ctx := context.Background() + ctx := t.Context() - for i := 0; i < 10000; i++ { + for i := range 10000 { nsLk := newNSLock(false) // lk1; ref=1 diff --git a/cmd/naughty-disk_test.go b/cmd/naughty-disk_test.go index e36ae82cea937..3316225341a93 100644 --- a/cmd/naughty-disk_test.go +++ b/cmd/naughty-disk_test.go @@ -102,16 +102,10 @@ func (d *naughtyDisk) GetDiskLoc() (poolIdx, setIdx, diskIdx int) { return -1, -1, -1 } -func (d *naughtyDisk) SetDiskLoc(poolIdx, setIdx, diskIdx int) {} - func (d *naughtyDisk) GetDiskID() (string, error) { return d.disk.GetDiskID() } -func (d *naughtyDisk) SetFormatData(b []byte) { - d.disk.SetFormatData(b) -} - func (d *naughtyDisk) SetDiskID(id string) { d.disk.SetDiskID(id) } @@ -207,13 +201,27 @@ func (d *naughtyDisk) AppendFile(ctx context.Context, volume string, path string return d.disk.AppendFile(ctx, volume, path, buf) } -func (d *naughtyDisk) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (uint64, error) { +func (d *naughtyDisk) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (RenameDataResp, error) { if err := d.calcError(); err != nil { - return 0, err + return RenameDataResp{}, err } return d.disk.RenameData(ctx, srcVolume, srcPath, fi, dstVolume, dstPath, opts) } +func (d *naughtyDisk) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string, meta []byte, skipParent string) error { + if err := d.calcError(); err != nil { + return err + } + return d.disk.RenamePart(ctx, srcVolume, srcPath, dstVolume, dstPath, meta, skipParent) +} + +func (d *naughtyDisk) ReadParts(ctx context.Context, bucket string, partMetaPaths ...string) ([]*ObjectPartInfo, error) { + if err := d.calcError(); err != nil { + return nil, err + } + return d.disk.ReadParts(ctx, bucket, partMetaPaths...) +} + func (d *naughtyDisk) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error { if err := d.calcError(); err != nil { return err @@ -221,13 +229,20 @@ func (d *naughtyDisk) RenameFile(ctx context.Context, srcVolume, srcPath, dstVol return d.disk.RenameFile(ctx, srcVolume, srcPath, dstVolume, dstPath) } -func (d *naughtyDisk) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) { +func (d *naughtyDisk) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) { if err := d.calcError(); err != nil { - return err + return nil, err } return d.disk.CheckParts(ctx, volume, path, fi) } +func (d *naughtyDisk) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) { + if err := d.calcError(); err != nil { + return err + } + return d.disk.DeleteBulk(ctx, volume, paths...) +} + func (d *naughtyDisk) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) { if err := d.calcError(); err != nil { return err @@ -295,9 +310,9 @@ func (d *naughtyDisk) ReadXL(ctx context.Context, volume string, path string, re return d.disk.ReadXL(ctx, volume, path, readData) } -func (d *naughtyDisk) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error { +func (d *naughtyDisk) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) { if err := d.calcError(); err != nil { - return err + return nil, err } return d.disk.VerifyFile(ctx, volume, path, fi) } diff --git a/cmd/net.go b/cmd/net.go index e468532d27133..f0462851bd764 100644 --- a/cmd/net.go +++ b/cmd/net.go @@ -19,7 +19,6 @@ package cmd import ( "errors" - "fmt" "net" "net/url" "runtime" @@ -29,11 +28,19 @@ import ( "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/logger" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) -// IPv4 addresses of local host. -var localIP4 = mustGetLocalIP4() +var ( + // IPv4 addresses of localhost. + localIP4 = mustGetLocalIP4() + + // IPv6 addresses of localhost. + localIP6 = mustGetLocalIP6() + + // List of all local loopback addresses. + localLoopbacks = mustGetLocalLoopbacks() +) // mustSplitHostPort is a wrapper to net.SplitHostPort() where error is assumed to be a fatal. func mustSplitHostPort(hostPort string) (host, port string) { @@ -74,6 +81,16 @@ func mustGetLocalIPs() (ipList []net.IP) { return ipList } +func mustGetLocalLoopbacks() (ipList set.StringSet) { + ipList = set.NewStringSet() + for _, ip := range mustGetLocalIPs() { + if ip != nil && ip.IsLoopback() { + ipList.Add(ip.String()) + } + } + return ipList +} + // mustGetLocalIP4 returns IPv4 addresses of localhost. It panics on error. func mustGetLocalIP4() (ipList set.StringSet) { ipList = set.NewStringSet() @@ -82,7 +99,7 @@ func mustGetLocalIP4() (ipList set.StringSet) { ipList.Add(ip.String()) } } - return + return ipList } // mustGetLocalIP6 returns IPv6 addresses of localhost. It panics on error. @@ -93,17 +110,12 @@ func mustGetLocalIP6() (ipList set.StringSet) { ipList.Add(ip.String()) } } - return + return ipList } // getHostIP returns IP address of given host. func getHostIP(host string) (ipList set.StringSet, err error) { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = net.DefaultResolver.LookupHost - } - - addrs, err := lookupHost(GlobalContext, host) + addrs, err := globalDNSCache.LookupHost(GlobalContext, host) if err != nil { return ipList, err } @@ -165,15 +177,15 @@ func getConsoleEndpoints() (consoleEndpoints []string) { } var ipList []string if globalMinioConsoleHost == "" { - ipList = sortIPs(mustGetLocalIP4().ToSlice()) - ipList = append(ipList, mustGetLocalIP6().ToSlice()...) + ipList = sortIPs(localIP4.ToSlice()) + ipList = append(ipList, localIP6.ToSlice()...) } else { ipList = []string{globalMinioConsoleHost} } + consoleEndpoints = make([]string, 0, len(ipList)) for _, ip := range ipList { - endpoint := fmt.Sprintf("%s://%s", getURLScheme(globalIsTLS), net.JoinHostPort(ip, globalMinioConsolePort)) - consoleEndpoints = append(consoleEndpoints, endpoint) + consoleEndpoints = append(consoleEndpoints, getURLScheme(globalIsTLS)+"://"+net.JoinHostPort(ip, globalMinioConsolePort)) } return consoleEndpoints @@ -185,15 +197,15 @@ func getAPIEndpoints() (apiEndpoints []string) { } var ipList []string if globalMinioHost == "" { - ipList = sortIPs(mustGetLocalIP4().ToSlice()) - ipList = append(ipList, mustGetLocalIP6().ToSlice()...) + ipList = sortIPs(localIP4.ToSlice()) + ipList = append(ipList, localIP6.ToSlice()...) } else { ipList = []string{globalMinioHost} } + apiEndpoints = make([]string, 0, len(ipList)) for _, ip := range ipList { - endpoint := fmt.Sprintf("%s://%s", getURLScheme(globalIsTLS), net.JoinHostPort(ip, globalMinioPort)) - apiEndpoints = append(apiEndpoints, endpoint) + apiEndpoints = append(apiEndpoints, getURLScheme(globalIsTLS)+"://"+net.JoinHostPort(ip, globalMinioPort)) } return apiEndpoints diff --git a/cmd/net_test.go b/cmd/net_test.go index 594cdb6244bc3..d482cad29db7c 100644 --- a/cmd/net_test.go +++ b/cmd/net_test.go @@ -82,7 +82,7 @@ func TestSortIPs(t *testing.T) { ipList: []string{"127.0.0.1"}, sortedIPList: []string{"127.0.0.1"}, }, - // Non parsable ip is assumed to be hostame and gets preserved + // Non parsable ip is assumed to be hostname and gets preserved // as the left most elements, regardless of IP based sorting. { ipList: []string{"hostname", "127.0.0.1", "192.168.1.106"}, @@ -131,7 +131,6 @@ func TestGetHostIP(t *testing.T) { expectedErr error }{ {"localhost", set.CreateStringSet("127.0.0.1"), nil}, - {"example.org", set.CreateStringSet("93.184.216.34"), nil}, } for _, testCase := range testCases { @@ -147,8 +146,16 @@ func TestGetHostIP(t *testing.T) { t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err) } - if testCase.expectedIPList != nil && testCase.expectedIPList.Intersection(ipList).IsEmpty() { - t.Fatalf("host: expected = %v, got = %v", testCase.expectedIPList, ipList) + if testCase.expectedIPList != nil { + var found bool + for _, ip := range ipList.ToSlice() { + if testCase.expectedIPList.Contains(ip) { + found = true + } + } + if !found { + t.Fatalf("host: expected = %v, got = %v", testCase.expectedIPList, ipList) + } } } } @@ -194,7 +201,6 @@ func TestCheckLocalServerAddr(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { err := CheckLocalServerAddr(testCase.serverAddr) switch { @@ -266,7 +272,6 @@ func TestSameLocalAddrs(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { sameAddr, err := sameLocalAddrs(testCase.addr1, testCase.addr2) if testCase.expectedErr != nil && err == nil { diff --git a/cmd/notification-summary.go b/cmd/notification-summary.go index 86fb684e3410d..28d07c7eac8f8 100644 --- a/cmd/notification-summary.go +++ b/cmd/notification-summary.go @@ -26,7 +26,7 @@ func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) { for _, disk := range diskInfo { capacity += disk.TotalSpace } - return + return capacity } // GetTotalUsableCapacity gets the total usable capacity in the cluster. @@ -42,7 +42,7 @@ func GetTotalUsableCapacity(diskInfo []madmin.Disk, s StorageInfo) (capacity uin capacity += disk.TotalSpace } } - return + return capacity } // GetTotalCapacityFree gets the total capacity free in the cluster. @@ -50,7 +50,7 @@ func GetTotalCapacityFree(diskInfo []madmin.Disk) (capacity uint64) { for _, d := range diskInfo { capacity += d.AvailableSpace } - return + return capacity } // GetTotalUsableCapacityFree gets the total usable capacity free in the cluster. @@ -66,5 +66,5 @@ func GetTotalUsableCapacityFree(diskInfo []madmin.Disk, s StorageInfo) (capacity capacity += disk.AvailableSpace } } - return + return capacity } diff --git a/cmd/notification.go b/cmd/notification.go index 14da581930d3f..152856ac23085 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -34,9 +34,9 @@ import ( "github.com/klauspost/compress/zip" "github.com/minio/madmin-go/v3" xioutil "github.com/minio/minio/internal/ioutil" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/sync/errgroup" - "github.com/minio/pkg/v2/workers" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/minio/pkg/v3/workers" "github.com/minio/minio/internal/bucket/bandwidth" "github.com/minio/minio/internal/logger" @@ -84,6 +84,9 @@ func WithNPeersThrottled(nerrs, wks int) *NotificationGroup { if nerrs <= 0 { nerrs = 1 } + if wks > nerrs { + wks = nerrs + } wk, _ := workers.New(wks) return &NotificationGroup{errs: make([]NotificationPeerErr, nerrs), workers: wk, retryCount: 3} } @@ -118,21 +121,30 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a g.errs[index] = NotificationPeerErr{ Host: addr, } - for i := 0; i < g.retryCount; i++ { + + retryCount := g.retryCount + for i := range retryCount { g.errs[index].Err = nil if err := f(); err != nil { g.errs[index].Err = err + + if contextCanceled(ctx) { + // context already canceled no retries. + retryCount = 0 + } + // Last iteration log the error. - if i == g.retryCount-1 { + if i == retryCount-1 { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", addr.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, addr.String()) + peersLogOnceIf(ctx, err, addr.String()) } + // Wait for a minimum of 100ms and dynamically increase this based on number of attempts. - if i < g.retryCount-1 { + if i < retryCount-1 { time.Sleep(100*time.Millisecond + time.Duration(r.Float64()*float64(time.Second))) + continue } - continue } break } @@ -140,137 +152,128 @@ func (g *NotificationGroup) Go(ctx context.Context, f func() error, index int, a } // DeletePolicy - deletes policy across all peers. -func (sys *NotificationSys) DeletePolicy(policyName string) []NotificationPeerErr { +func (sys *NotificationSys) DeletePolicy(ctx context.Context, policyName string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.DeletePolicy(policyName) + return client.DeletePolicy(ctx, policyName) }, idx, *client.host) } return ng.Wait() } // LoadPolicy - reloads a specific modified policy across all peers -func (sys *NotificationSys) LoadPolicy(policyName string) []NotificationPeerErr { +func (sys *NotificationSys) LoadPolicy(ctx context.Context, policyName string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.LoadPolicy(policyName) + return client.LoadPolicy(ctx, policyName) }, idx, *client.host) } return ng.Wait() } // LoadPolicyMapping - reloads a policy mapping across all peers -func (sys *NotificationSys) LoadPolicyMapping(userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr { +func (sys *NotificationSys) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.LoadPolicyMapping(userOrGroup, userType, isGroup) + return client.LoadPolicyMapping(ctx, userOrGroup, userType, isGroup) }, idx, *client.host) } return ng.Wait() } // DeleteUser - deletes a specific user across all peers -func (sys *NotificationSys) DeleteUser(accessKey string) []NotificationPeerErr { +func (sys *NotificationSys) DeleteUser(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.DeleteUser(accessKey) + return client.DeleteUser(ctx, accessKey) }, idx, *client.host) } return ng.Wait() } // LoadUser - reloads a specific user across all peers -func (sys *NotificationSys) LoadUser(accessKey string, temp bool) []NotificationPeerErr { +func (sys *NotificationSys) LoadUser(ctx context.Context, accessKey string, temp bool) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.LoadUser(accessKey, temp) + return client.LoadUser(ctx, accessKey, temp) }, idx, *client.host) } return ng.Wait() } // LoadGroup - loads a specific group on all peers. -func (sys *NotificationSys) LoadGroup(group string) []NotificationPeerErr { +func (sys *NotificationSys) LoadGroup(ctx context.Context, group string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.LoadGroup(group) + return client.LoadGroup(ctx, group) }, idx, *client.host) } return ng.Wait() } // DeleteServiceAccount - deletes a specific service account across all peers -func (sys *NotificationSys) DeleteServiceAccount(accessKey string) []NotificationPeerErr { +func (sys *NotificationSys) DeleteServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.DeleteServiceAccount(accessKey) + return client.DeleteServiceAccount(ctx, accessKey) }, idx, *client.host) } return ng.Wait() } // LoadServiceAccount - reloads a specific service account across all peers -func (sys *NotificationSys) LoadServiceAccount(accessKey string) []NotificationPeerErr { +func (sys *NotificationSys) LoadServiceAccount(ctx context.Context, accessKey string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)).WithRetries(1) for idx, client := range sys.peerClients { - client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - return client.LoadServiceAccount(accessKey) + return client.LoadServiceAccount(ctx, accessKey) }, idx, *client.host) } return ng.Wait() } // BackgroundHealStatus - returns background heal status of all peers -func (sys *NotificationSys) BackgroundHealStatus() ([]madmin.BgHealState, []NotificationPeerErr) { +func (sys *NotificationSys) BackgroundHealStatus(ctx context.Context) ([]madmin.BgHealState, []NotificationPeerErr) { ng := WithNPeers(len(sys.peerClients)) states := make([]madmin.BgHealState, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client - ng.Go(GlobalContext, func() error { + ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - st, err := client.BackgroundHealStatus() + st, err := client.BackgroundHealStatus(ctx) if err != nil { return err } @@ -283,15 +286,15 @@ func (sys *NotificationSys) BackgroundHealStatus() ([]madmin.BgHealState, []Noti } // StartProfiling - start profiling on remote peers, by initiating a remote RPC. -func (sys *NotificationSys) StartProfiling(profiler string) []NotificationPeerErr { +func (sys *NotificationSys) StartProfiling(ctx context.Context, profiler string) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)) for idx, client := range sys.peerClients { if client == nil { continue } client := client - ng.Go(GlobalContext, func() error { - return client.StartProfiling(profiler) + ng.Go(ctx, func() error { + return client.StartProfiling(ctx, profiler) }, idx, *client.host) } return ng.Wait() @@ -304,34 +307,55 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io zipWriter := zip.NewWriter(writer) defer zipWriter.Close() - for _, client := range sys.peerClients { + // Start by embedding cluster info. + if b := getClusterMetaInfo(ctx); len(b) > 0 { + internalLogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600)) + } + + // Profiles can be quite big, so we limit to max 16 concurrent downloads. + ng := WithNPeersThrottled(len(sys.peerClients), 16) + var writeMu sync.Mutex + for i, client := range sys.peerClients { if client == nil { continue } - data, err := client.DownloadProfileData() - if err != nil { - reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) - ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) - continue - } - - profilingDataFound = true - - for typ, data := range data { - err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", client.host.String(), typ), data, 0o600) + ng.Go(ctx, func() error { + // Give 15 seconds to each remote call. + // Errors are logged but not returned. + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + data, err := client.DownloadProfileData(ctx) if err != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) + peersLogOnceIf(ctx, err, client.host.String()) + return nil } - } + + for typ, data := range data { + // zip writer only handles one concurrent write + writeMu.Lock() + profilingDataFound = true + err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", client.host.String(), typ), data, 0o600) + writeMu.Unlock() + if err != nil { + reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", client.host.String()) + ctx := logger.SetReqInfo(ctx, reqInfo) + peersLogOnceIf(ctx, err, client.host.String()) + } + } + return nil + }, i, *client.host) + } + ng.Wait() + if ctx.Err() != nil { + return false } // Local host thisAddr, err := xnet.ParseHost(globalLocalNodeName) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return profilingDataFound } @@ -339,7 +363,7 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io if err != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", thisAddr.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return profilingDataFound } @@ -348,13 +372,10 @@ func (sys *NotificationSys) DownloadProfilingData(ctx context.Context, writer io // Send profiling data to zip as file for typ, data := range data { err := embedFileInZip(zipWriter, fmt.Sprintf("profile-%s-%s", thisAddr, typ), data, 0o600) - logger.LogIf(ctx, err) - } - if b := getClusterMetaInfo(ctx); len(b) > 0 { - logger.LogIf(ctx, embedFileInZip(zipWriter, "cluster.info", b, 0o600)) + internalLogIf(ctx, err) } - return + return profilingDataFound } // VerifyBinary - asks remote peers to verify the checksum @@ -374,10 +395,6 @@ func (sys *NotificationSys) VerifyBinary(ctx context.Context, u *url.URL, sha256 // further discussion advised. Remove this comment and remove the worker model // for this function in future. maxWorkers := runtime.GOMAXPROCS(0) / 2 - if maxWorkers > len(sys.peerClients) { - maxWorkers = len(sys.peerClients) - } - ng := WithNPeersThrottled(len(sys.peerClients), maxWorkers) for idx, client := range sys.peerClients { if client == nil { @@ -415,7 +432,7 @@ func (sys *NotificationSys) SignalConfigReload(subSys string) []NotificationPeer } client := client ng.Go(GlobalContext, func() error { - return client.SignalService(serviceReloadDynamic, subSys, false) + return client.SignalService(serviceReloadDynamic, subSys, false, nil) }, idx, *client.host) } return ng.Wait() @@ -431,14 +448,14 @@ func (sys *NotificationSys) SignalService(sig serviceSignal) []NotificationPeerE client := client ng.Go(GlobalContext, func() error { // force == true preserves the current behavior - return client.SignalService(sig, "", false) + return client.SignalService(sig, "", false, nil) }, idx, *client.host) } return ng.Wait() } // SignalServiceV2 - calls signal service RPC call on all peers with v2 API -func (sys *NotificationSys) SignalServiceV2(sig serviceSignal, dryRun bool) []NotificationPeerErr { +func (sys *NotificationSys) SignalServiceV2(sig serviceSignal, dryRun bool, execAt *time.Time) []NotificationPeerErr { ng := WithNPeers(len(sys.peerClients)) for idx, client := range sys.peerClients { if client == nil { @@ -446,7 +463,7 @@ func (sys *NotificationSys) SignalServiceV2(sig serviceSignal, dryRun bool) []No } client := client ng.Go(GlobalContext, func() error { - return client.SignalService(sig, "", dryRun) + return client.SignalService(sig, "", dryRun, execAt) }, idx, *client.host) } return ng.Wait() @@ -459,13 +476,12 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe locksResp := make([]*PeerLocks, len(sys.peerClients)) g := errgroup.WithNErrs(len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client g.Go(func() error { if client == nil { return errPeerNotReachable } - serverLocksResp, err := sys.peerClients[index].GetLocks() + serverLocksResp, err := sys.peerClients[index].GetLocks(ctx) if err != nil { return err } @@ -480,7 +496,7 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) + peersLogOnceIf(ctx, err, sys.peerClients[index].host.String()) } locksResp = append(locksResp, &PeerLocks{ Addr: getHostName(r), @@ -498,20 +514,20 @@ func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName s } client := client ng.Go(ctx, func() error { - return client.LoadBucketMetadata(bucketName) + return client.LoadBucketMetadata(ctx, bucketName) }, idx, *client.host) } for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } // DeleteBucketMetadata - calls DeleteBucketMetadata call on all peers func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName string) { - globalReplicationStats.Delete(bucketName) + globalReplicationStats.Load().Delete(bucketName) globalBucketMetadataSys.Remove(bucketName) globalBucketTargetSys.Delete(bucketName) globalEventNotifier.RemoveNotification(bucketName) @@ -528,13 +544,13 @@ func (sys *NotificationSys) DeleteBucketMetadata(ctx context.Context, bucketName } client := client ng.Go(ctx, func() error { - return client.DeleteBucketMetadata(bucketName) + return client.DeleteBucketMetadata(ctx, bucketName) }, idx, *client.host) } for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -544,13 +560,12 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck ng := WithNPeers(len(sys.peerClients)).WithRetries(1) replicationStats := make([]BucketStatsMap, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - bsMap, err := client.GetAllBucketStats() + bsMap, err := client.GetAllBucketStats(ctx) if err != nil { return err } @@ -561,11 +576,11 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } - replicationStatsList := globalReplicationStats.GetAll() + replicationStatsList := globalReplicationStats.Load().GetAll() bucketStatsMap := BucketStatsMap{ Stats: make(map[string]BucketStats, len(replicationStatsList)), Timestamp: UTCNow(), @@ -573,7 +588,7 @@ func (sys *NotificationSys) GetClusterAllBucketStats(ctx context.Context) []Buck for k, replicationStats := range replicationStatsList { bucketStatsMap.Stats[k] = BucketStats{ ReplicationStats: replicationStats, - ProxyStats: globalReplicationStats.getProxyStats(k), + ProxyStats: globalReplicationStats.Load().getProxyStats(k), } } @@ -586,13 +601,12 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam ng := WithNPeers(len(sys.peerClients)).WithRetries(1) bucketStats := make([]BucketStats, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - bs, err := client.GetBucketStats(bucketName) + bs, err := client.GetBucketStats(ctx, bucketName) if err != nil { return err } @@ -603,14 +617,16 @@ func (sys *NotificationSys) GetClusterBucketStats(ctx context.Context, bucketNam for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } - bucketStats = append(bucketStats, BucketStats{ - ReplicationStats: globalReplicationStats.Get(bucketName), - QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{globalReplicationStats.getNodeQueueStats(bucketName)}}, - ProxyStats: globalReplicationStats.getProxyStats(bucketName), - }) + if st := globalReplicationStats.Load(); st != nil { + bucketStats = append(bucketStats, BucketStats{ + ReplicationStats: st.Get(bucketName), + QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{st.getNodeQueueStats(bucketName)}}, + ProxyStats: st.getProxyStats(bucketName), + }) + } return bucketStats } @@ -619,13 +635,12 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri ng := WithNPeers(len(sys.peerClients)).WithRetries(1) siteStats := make([]SRMetricsSummary, len(sys.peerClients)) for index, client := range sys.peerClients { - index := index client := client ng.Go(ctx, func() error { if client == nil { return errPeerNotReachable } - sm, err := client.GetSRMetrics() + sm, err := client.GetSRMetrics(ctx) if err != nil { return err } @@ -636,10 +651,10 @@ func (sys *NotificationSys) GetClusterSiteMetrics(ctx context.Context) []SRMetri for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } - siteStats = append(siteStats, globalReplicationStats.getSRMetricsForNode()) + siteStats = append(siteStats, globalReplicationStats.Load().getSRMetricsForNode()) return siteStats } @@ -658,14 +673,14 @@ func (sys *NotificationSys) ReloadPoolMeta(ctx context.Context) { for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } -// StopRebalance notifies all MinIO nodes to signal any ongoing rebalance -// goroutine to stop. -func (sys *NotificationSys) StopRebalance(ctx context.Context) { +// DeleteUploadID notifies all the MinIO nodes to remove the +// given uploadID from cache +func (sys *NotificationSys) DeleteUploadID(ctx context.Context, uploadID string) { ng := WithNPeers(len(sys.peerClients)) for idx, client := range sys.peerClients { if client == nil { @@ -673,22 +688,43 @@ func (sys *NotificationSys) StopRebalance(ctx context.Context) { } client := client ng.Go(ctx, func() error { - return client.StopRebalance(ctx) + return client.DeleteUploadID(ctx, uploadID) }, idx, *client.host) } for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } +} +// StopRebalance notifies all MinIO nodes to signal any ongoing rebalance +// goroutine to stop. +func (sys *NotificationSys) StopRebalance(ctx context.Context) { objAPI := newObjectLayerFn() if objAPI == nil { - logger.LogIf(ctx, errServerNotInitialized) + internalLogIf(ctx, errServerNotInitialized) return } + ng := WithNPeers(len(sys.peerClients)) + for idx, client := range sys.peerClients { + if client == nil { + continue + } + client := client + ng.Go(ctx, func() error { + return client.StopRebalance(ctx) + }, idx, *client.host) + } + for _, nErr := range ng.Wait() { + reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) + if nErr.Err != nil { + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + } + } + if pools, ok := objAPI.(*erasureServerPools); ok { pools.StopRebalance() } @@ -711,7 +747,7 @@ func (sys *NotificationSys) LoadRebalanceMeta(ctx context.Context, startRebalanc for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -732,7 +768,7 @@ func (sys *NotificationSys) LoadTransitionTierConfig(ctx context.Context) { for _, nErr := range ng.Wait() { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", nErr.Host.String()) if nErr.Err != nil { - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), nErr.Err, nErr.Host.String()) } } } @@ -870,14 +906,13 @@ func (sys *NotificationSys) GetMetrics(ctx context.Context, t madmin.MetricType, } // GetResourceMetrics - gets the resource metrics from all nodes excluding self. -func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan Metric { +func (sys *NotificationSys) GetResourceMetrics(ctx context.Context) <-chan MetricV2 { if sys == nil { return nil } g := errgroup.WithNErrs(len(sys.peerClients)) - peerChannels := make([]<-chan Metric, len(sys.peerClients)) + peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable @@ -946,7 +981,7 @@ func (sys *NotificationSys) addNodeErr(nodeInfo madmin.NodeInfo, peerClient *pee addr := peerClient.host.String() reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", addr) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, "add-node-err-"+addr) + peersLogOnceIf(ctx, err, "add-node-err-"+addr) nodeInfo.SetAddr(addr) nodeInfo.SetError(err.Error()) } @@ -1047,7 +1082,7 @@ func getOfflineDisks(offlineHost string, endpoints EndpointServerPools) []madmin } // StorageInfo returns disk information across all peers -func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer, metrics bool) StorageInfo { +func (sys *NotificationSys) StorageInfo(ctx context.Context, objLayer ObjectLayer, metrics bool) StorageInfo { var storageInfo StorageInfo replies := make([]StorageInfo, len(sys.peerClients)) @@ -1059,7 +1094,7 @@ func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer, metrics bool) Stor wg.Add(1) go func(client *peerRESTClient, idx int) { defer wg.Done() - info, err := client.LocalStorageInfo(metrics) + info, err := client.LocalStorageInfo(ctx, metrics) if err != nil { info.Disks = getOfflineDisks(client.host.String(), globalEndpoints) } @@ -1069,7 +1104,7 @@ func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer, metrics bool) Stor wg.Wait() // Add local to this server. - replies = append(replies, objLayer.LocalStorageInfo(GlobalContext, metrics)) + replies = append(replies, objLayer.LocalStorageInfo(ctx, metrics)) storageInfo.Backend = objLayer.BackendInfo() for _, sinfo := range replies { @@ -1080,7 +1115,7 @@ func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer, metrics bool) Stor } // ServerInfo - calls ServerInfo RPC call on all peers. -func (sys *NotificationSys) ServerInfo(metrics bool) []madmin.ServerProperties { +func (sys *NotificationSys) ServerInfo(ctx context.Context, metrics bool) []madmin.ServerProperties { reply := make([]madmin.ServerProperties, len(sys.peerClients)) var wg sync.WaitGroup for i, client := range sys.peerClients { @@ -1090,7 +1125,9 @@ func (sys *NotificationSys) ServerInfo(metrics bool) []madmin.ServerProperties { wg.Add(1) go func(client *peerRESTClient, idx int) { defer wg.Done() - info, err := client.ServerInfo(metrics) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + info, err := client.ServerInfo(ctx, metrics) if err != nil { info.Endpoint = client.host.String() info.State = string(madmin.ItemOffline) @@ -1104,24 +1141,13 @@ func (sys *NotificationSys) ServerInfo(metrics bool) []madmin.ServerProperties { return reply } -// returns all the peers that are currently online. -func (sys *NotificationSys) getOnlinePeers() []*peerRESTClient { - var peerClients []*peerRESTClient - for _, peerClient := range sys.allPeerClients { - if peerClient != nil && peerClient.IsOnline() { - peerClients = append(peerClients, peerClient) - } - } - return peerClients -} - // restClientFromHash will return a deterministic peerRESTClient based on s. // Will return nil if client is local. func (sys *NotificationSys) restClientFromHash(s string) (client *peerRESTClient) { if len(sys.peerClients) == 0 { return nil } - peerClients := sys.getOnlinePeers() + peerClients := sys.allPeerClients if len(peerClients) == 0 { return nil } @@ -1144,7 +1170,6 @@ func (sys *NotificationSys) GetPeerOnlineCount() (nodesOnline, nodesOffline int) defer wg.Done() nodesOnlineIndex[idx] = client.restClient.HealthCheckFn() }(idx, client) - } wg.Wait() @@ -1155,7 +1180,7 @@ func (sys *NotificationSys) GetPeerOnlineCount() (nodesOnline, nodesOffline int) nodesOffline++ } } - return + return nodesOnline, nodesOffline } // NewNotificationSys - creates new notification system object. @@ -1187,7 +1212,7 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ... reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) ctx := logger.SetReqInfo(ctx, reqInfo) - logger.LogOnceIf(ctx, err, sys.peerClients[index].host.String()) + peersLogOnceIf(ctx, err, sys.peerClients[index].host.String()) } reports = append(reports, globalBucketMonitor.GetReport(bandwidth.SelectBuckets(buckets...))) consolidatedReport := bandwidth.BucketBandwidthReport{ @@ -1214,22 +1239,22 @@ func (sys *NotificationSys) GetBandwidthReports(ctx context.Context, buckets ... return consolidatedReport } -func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels []<-chan Metric, g *errgroup.Group) <-chan Metric { - ch := make(chan Metric) +func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels []<-chan MetricV2, g *errgroup.Group) <-chan MetricV2 { + ch := make(chan MetricV2) var wg sync.WaitGroup for index, err := range g.Wait() { if err != nil { if sys.peerClients[index] != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) } else { - logger.LogOnceIf(ctx, err, "peer-offline") + peersLogOnceIf(ctx, err, "peer-offline") } continue } wg.Add(1) - go func(ctx context.Context, peerChannel <-chan Metric, wg *sync.WaitGroup) { + go func(ctx context.Context, peerChannel <-chan MetricV2, wg *sync.WaitGroup) { defer wg.Done() for { select { @@ -1248,7 +1273,7 @@ func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels } }(ctx, peerChannels[index], &wg) } - go func(wg *sync.WaitGroup, ch chan Metric) { + go func(wg *sync.WaitGroup, ch chan MetricV2) { wg.Wait() xioutil.SafeClose(ch) }(&wg, ch) @@ -1256,14 +1281,13 @@ func (sys *NotificationSys) collectPeerMetrics(ctx context.Context, peerChannels } // GetBucketMetrics - gets the cluster level bucket metrics from all nodes excluding self. -func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan Metric { +func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan MetricV2 { if sys == nil { return nil } g := errgroup.WithNErrs(len(sys.peerClients)) - peerChannels := make([]<-chan Metric, len(sys.peerClients)) + peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable @@ -1277,14 +1301,13 @@ func (sys *NotificationSys) GetBucketMetrics(ctx context.Context) <-chan Metric } // GetClusterMetrics - gets the cluster metrics from all nodes excluding self. -func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan Metric { +func (sys *NotificationSys) GetClusterMetrics(ctx context.Context) <-chan MetricV2 { if sys == nil { return nil } g := errgroup.WithNErrs(len(sys.peerClients)) - peerChannels := make([]<-chan Metric, len(sys.peerClients)) + peerChannels := make([]<-chan MetricV2, len(sys.peerClients)) for index := range sys.peerClients { - index := index g.Go(func() error { if sys.peerClients[index] == nil { return errPeerNotReachable @@ -1316,7 +1339,7 @@ func (sys *NotificationSys) ServiceFreeze(ctx context.Context, freeze bool) []No } client := client ng.Go(GlobalContext, func() error { - return client.SignalService(serviceSig, "", false) + return client.SignalService(serviceSig, "", false, nil) }, idx, *client.host) } nerrs := ng.Wait() @@ -1460,7 +1483,7 @@ func (sys *NotificationSys) DriveSpeedTest(ctx context.Context, opts madmin.Driv reqInfo := (&logger.ReqInfo{}).AppendTags("remotePeer", client.host.String()) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogOnceIf(ctx, err, client.host.String()) + peersLogOnceIf(ctx, err, client.host.String()) }(client) } @@ -1521,7 +1544,7 @@ func (sys *NotificationSys) GetLastDayTierStats(ctx context.Context) DailyAllTie merged := globalTransitionState.getDailyAllTierStats() for i, stat := range lastDayStats { if errs[i] != nil { - logger.LogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String()) + peersLogOnceIf(ctx, fmt.Errorf("failed to fetch last day tier stats: %w", errs[i]), sys.peerClients[i].host.String()) continue } merged.merge(stat) @@ -1556,9 +1579,9 @@ func (sys *NotificationSys) GetReplicationMRF(ctx context.Context, bucket, node if sys.peerClients[index] != nil { reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", sys.peerClients[index].host.String()) - logger.LogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) + peersLogOnceIf(logger.SetReqInfo(ctx, reqInfo), err, sys.peerClients[index].host.String()) } else { - logger.LogOnceIf(ctx, err, "peer-offline") + peersLogOnceIf(ctx, err, "peer-offline") } continue } @@ -1588,7 +1611,7 @@ func (sys *NotificationSys) GetReplicationMRF(ctx context.Context, bucket, node if node != "all" && node != globalLocalNodeName { return nil } - mCh, err := globalReplicationPool.getMRF(ctx, bucket) + mCh, err := globalReplicationPool.Get().getMRF(ctx, bucket) if err != nil { return err } diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index 4e642e390c5de..d00e2be19f95c 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -19,6 +19,7 @@ package cmd import ( "io" + "maps" "math" "net/http" "time" @@ -27,7 +28,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/hash" - "github.com/minio/minio/internal/logger" ) //go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false @@ -234,7 +234,7 @@ func (o ObjectInfo) ExpiresStr() string { // ArchiveInfo returns any saved zip archive meta information. // It will be decrypted if needed. -func (o *ObjectInfo) ArchiveInfo() []byte { +func (o *ObjectInfo) ArchiveInfo(h http.Header) []byte { if len(o.UserDefined) == 0 { return nil } @@ -244,9 +244,9 @@ func (o *ObjectInfo) ArchiveInfo() []byte { } data := []byte(z) if v, ok := o.UserDefined[archiveTypeMetadataKey]; ok && v == archiveTypeEnc { - decrypted, err := o.metadataDecrypter()(archiveTypeEnc, data) + decrypted, err := o.metadataDecrypter(h)(archiveTypeEnc, data) if err != nil { - logger.LogIf(GlobalContext, err) + encLogIf(GlobalContext, err) return nil } data = decrypted @@ -291,9 +291,7 @@ func (o *ObjectInfo) Clone() (cinfo ObjectInfo) { VersionPurgeStatusInternal: o.VersionPurgeStatusInternal, } cinfo.UserDefined = make(map[string]string, len(o.UserDefined)) - for k, v := range o.UserDefined { - cinfo.UserDefined[k] = v - } + maps.Copy(cinfo.UserDefined, o.UserDefined) return cinfo } @@ -327,6 +325,7 @@ func (ri ReplicateObjectInfo) ToObjectInfo() ObjectInfo { VersionPurgeStatusInternal: ri.VersionPurgeStatusInternal, DeleteMarker: true, UserDefined: map[string]string{}, + Checksum: ri.Checksum, } } @@ -358,6 +357,7 @@ type ReplicateObjectInfo struct { TargetStatuses map[string]replication.StatusType TargetPurgeStatuses map[string]VersionPurgeStatusType ReplicationTimestamp time.Time + Checksum []byte } // MultipartInfo captures metadata information about the uploadId @@ -418,6 +418,9 @@ type ListPartsInfo struct { // ChecksumAlgorithm if set ChecksumAlgorithm string + + // ChecksumType if set + ChecksumType string } // Lookup - returns if uploadID is valid @@ -596,10 +599,11 @@ type PartInfo struct { ActualSize int64 // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string } // CompletePart - represents the part that was completed, this is sent by the client @@ -612,11 +616,14 @@ type CompletePart struct { // Entity tag returned when the part was uploaded. ETag string + Size int64 + // Checksum values. Optional. - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + ChecksumCRC32 string + ChecksumCRC32C string + ChecksumSHA1 string + ChecksumSHA256 string + ChecksumCRC64NVME string } // CompleteMultipartUpload - represents list of parts which are completed, this is sent by the @@ -629,6 +636,7 @@ type CompleteMultipartUpload struct { type NewMultipartUploadResult struct { UploadID string ChecksumAlgo string + ChecksumType string } type getObjectAttributesResponse struct { @@ -640,10 +648,12 @@ type getObjectAttributesResponse struct { } type objectAttributesChecksum struct { - ChecksumCRC32 string `xml:",omitempty"` - ChecksumCRC32C string `xml:",omitempty"` - ChecksumSHA1 string `xml:",omitempty"` - ChecksumSHA256 string `xml:",omitempty"` + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` + ChecksumType string `xml:",omitempty"` } type objectAttributesParts struct { @@ -656,12 +666,13 @@ type objectAttributesParts struct { } type objectAttributesPart struct { - PartNumber int - Size int64 - ChecksumCRC32 string `xml:",omitempty"` - ChecksumCRC32C string `xml:",omitempty"` - ChecksumSHA1 string `xml:",omitempty"` - ChecksumSHA256 string `xml:",omitempty"` + PartNumber int + Size int64 + ChecksumCRC32 string `xml:",omitempty"` + ChecksumCRC32C string `xml:",omitempty"` + ChecksumSHA1 string `xml:",omitempty"` + ChecksumSHA256 string `xml:",omitempty"` + ChecksumCRC64NVME string `xml:",omitempty"` } type objectAttributesErrorResponse struct { diff --git a/cmd/object-api-datatypes_gen.go b/cmd/object-api-datatypes_gen.go index 68664c74b0689..b15bb3ab2c6ee 100644 --- a/cmd/object-api-datatypes_gen.go +++ b/cmd/object-api-datatypes_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/minio/minio/internal/bucket/replication" "github.com/tinylib/msgp/msgp" @@ -201,13 +201,16 @@ func (z *CompleteMultipartUpload) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *CompletePart) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 6 + // map header, size 8 // string "PartNumber" - o = append(o, 0x86, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = append(o, 0x88, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) o = msgp.AppendInt(o, z.PartNumber) // string "ETag" o = append(o, 0xa4, 0x45, 0x54, 0x61, 0x67) o = msgp.AppendString(o, z.ETag) + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) // string "ChecksumCRC32" o = append(o, 0xad, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x43, 0x52, 0x43, 0x33, 0x32) o = msgp.AppendString(o, z.ChecksumCRC32) @@ -220,6 +223,9 @@ func (z *CompletePart) MarshalMsg(b []byte) (o []byte, err error) { // string "ChecksumSHA256" o = append(o, 0xae, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36) o = msgp.AppendString(o, z.ChecksumSHA256) + // string "ChecksumCRC64NVME" + o = append(o, 0xb1, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x43, 0x52, 0x43, 0x36, 0x34, 0x4e, 0x56, 0x4d, 0x45) + o = msgp.AppendString(o, z.ChecksumCRC64NVME) return } @@ -253,6 +259,12 @@ func (z *CompletePart) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ETag") return } + case "Size": + z.Size, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } case "ChecksumCRC32": z.ChecksumCRC32, bts, err = msgp.ReadStringBytes(bts) if err != nil { @@ -277,6 +289,12 @@ func (z *CompletePart) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ChecksumSHA256") return } + case "ChecksumCRC64NVME": + z.ChecksumCRC64NVME, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ChecksumCRC64NVME") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -291,7 +309,7 @@ func (z *CompletePart) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CompletePart) Msgsize() (s int) { - s = 1 + 11 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.ETag) + 14 + msgp.StringPrefixSize + len(z.ChecksumCRC32) + 15 + msgp.StringPrefixSize + len(z.ChecksumCRC32C) + 13 + msgp.StringPrefixSize + len(z.ChecksumSHA1) + 15 + msgp.StringPrefixSize + len(z.ChecksumSHA256) + s = 1 + 11 + msgp.IntSize + 5 + msgp.StringPrefixSize + len(z.ETag) + 5 + msgp.Int64Size + 14 + msgp.StringPrefixSize + len(z.ChecksumCRC32) + 15 + msgp.StringPrefixSize + len(z.ChecksumCRC32C) + 13 + msgp.StringPrefixSize + len(z.ChecksumSHA1) + 15 + msgp.StringPrefixSize + len(z.ChecksumSHA256) + 18 + msgp.StringPrefixSize + len(z.ChecksumCRC64NVME) return } @@ -956,9 +974,9 @@ func (z *ListObjectsV2Info) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *ListPartsInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 11 + // map header, size 12 // string "Bucket" - o = append(o, 0x8b, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) + o = append(o, 0x8c, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) o = msgp.AppendString(o, z.Bucket) // string "Object" o = append(o, 0xa6, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74) @@ -1001,6 +1019,9 @@ func (z *ListPartsInfo) MarshalMsg(b []byte) (o []byte, err error) { // string "ChecksumAlgorithm" o = append(o, 0xb1, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d) o = msgp.AppendString(o, z.ChecksumAlgorithm) + // string "ChecksumType" + o = append(o, 0xac, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.ChecksumType) return } @@ -1099,14 +1120,12 @@ func (z *ListPartsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.UserDefined == nil { z.UserDefined = make(map[string]string, zb0003) } else if len(z.UserDefined) > 0 { - for key := range z.UserDefined { - delete(z.UserDefined, key) - } + clear(z.UserDefined) } for zb0003 > 0 { - var za0002 string var za0003 string zb0003-- + var za0002 string za0002, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "UserDefined") @@ -1125,6 +1144,12 @@ func (z *ListPartsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ChecksumAlgorithm") return } + case "ChecksumType": + z.ChecksumType, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ChecksumType") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -1150,7 +1175,7 @@ func (z *ListPartsInfo) Msgsize() (s int) { s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003) } } - s += 18 + msgp.StringPrefixSize + len(z.ChecksumAlgorithm) + s += 18 + msgp.StringPrefixSize + len(z.ChecksumAlgorithm) + 13 + msgp.StringPrefixSize + len(z.ChecksumType) return } @@ -1232,14 +1257,12 @@ func (z *MultipartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.UserDefined == nil { z.UserDefined = make(map[string]string, zb0002) } else if len(z.UserDefined) > 0 { - for key := range z.UserDefined { - delete(z.UserDefined, key) - } + clear(z.UserDefined) } for zb0002 > 0 { - var za0001 string var za0002 string zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "UserDefined") @@ -1279,13 +1302,16 @@ func (z *MultipartInfo) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z NewMultipartUploadResult) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 + // map header, size 3 // string "UploadID" - o = append(o, 0x82, 0xa8, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x44) + o = append(o, 0x83, 0xa8, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x44) o = msgp.AppendString(o, z.UploadID) // string "ChecksumAlgo" o = append(o, 0xac, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) o = msgp.AppendString(o, z.ChecksumAlgo) + // string "ChecksumType" + o = append(o, 0xac, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendString(o, z.ChecksumType) return } @@ -1319,6 +1345,12 @@ func (z *NewMultipartUploadResult) UnmarshalMsg(bts []byte) (o []byte, err error err = msgp.WrapError(err, "ChecksumAlgo") return } + case "ChecksumType": + z.ChecksumType, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ChecksumType") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -1333,7 +1365,7 @@ func (z *NewMultipartUploadResult) UnmarshalMsg(bts []byte) (o []byte, err error // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z NewMultipartUploadResult) Msgsize() (s int) { - s = 1 + 9 + msgp.StringPrefixSize + len(z.UploadID) + 13 + msgp.StringPrefixSize + len(z.ChecksumAlgo) + s = 1 + 9 + msgp.StringPrefixSize + len(z.UploadID) + 13 + msgp.StringPrefixSize + len(z.ChecksumAlgo) + 13 + msgp.StringPrefixSize + len(z.ChecksumType) return } @@ -1629,14 +1661,12 @@ func (z *ObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.UserDefined == nil { z.UserDefined = make(map[string]string, zb0002) } else if len(z.UserDefined) > 0 { - for key := range z.UserDefined { - delete(z.UserDefined, key) - } + clear(z.UserDefined) } for zb0002 > 0 { - var za0001 string var za0002 string zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "UserDefined") @@ -1772,9 +1802,9 @@ func (z *ObjectInfo) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *PartInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 9 + // map header, size 10 // string "PartNumber" - o = append(o, 0x89, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) + o = append(o, 0x8a, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) o = msgp.AppendInt(o, z.PartNumber) // string "LastModified" o = append(o, 0xac, 0x4c, 0x61, 0x73, 0x74, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64) @@ -1800,6 +1830,9 @@ func (z *PartInfo) MarshalMsg(b []byte) (o []byte, err error) { // string "ChecksumSHA256" o = append(o, 0xae, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x53, 0x48, 0x41, 0x32, 0x35, 0x36) o = msgp.AppendString(o, z.ChecksumSHA256) + // string "ChecksumCRC64NVME" + o = append(o, 0xb1, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x43, 0x52, 0x43, 0x36, 0x34, 0x4e, 0x56, 0x4d, 0x45) + o = msgp.AppendString(o, z.ChecksumCRC64NVME) return } @@ -1875,6 +1908,12 @@ func (z *PartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ChecksumSHA256") return } + case "ChecksumCRC64NVME": + z.ChecksumCRC64NVME, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ChecksumCRC64NVME") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -1889,16 +1928,16 @@ func (z *PartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *PartInfo) Msgsize() (s int) { - s = 1 + 11 + msgp.IntSize + 13 + msgp.TimeSize + 5 + msgp.StringPrefixSize + len(z.ETag) + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 14 + msgp.StringPrefixSize + len(z.ChecksumCRC32) + 15 + msgp.StringPrefixSize + len(z.ChecksumCRC32C) + 13 + msgp.StringPrefixSize + len(z.ChecksumSHA1) + 15 + msgp.StringPrefixSize + len(z.ChecksumSHA256) + s = 1 + 11 + msgp.IntSize + 13 + msgp.TimeSize + 5 + msgp.StringPrefixSize + len(z.ETag) + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 14 + msgp.StringPrefixSize + len(z.ChecksumCRC32) + 15 + msgp.StringPrefixSize + len(z.ChecksumCRC32C) + 13 + msgp.StringPrefixSize + len(z.ChecksumSHA1) + 15 + msgp.StringPrefixSize + len(z.ChecksumSHA256) + 18 + msgp.StringPrefixSize + len(z.ChecksumCRC64NVME) return } // MarshalMsg implements msgp.Marshaler func (z *ReplicateObjectInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 25 + // map header, size 26 // string "Name" - o = append(o, 0xde, 0x0, 0x19, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0xde, 0x0, 0x1a, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "Bucket" o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) @@ -2012,6 +2051,9 @@ func (z *ReplicateObjectInfo) MarshalMsg(b []byte) (o []byte, err error) { // string "ReplicationTimestamp" o = append(o, 0xb4, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) o = msgp.AppendTime(o, z.ReplicationTimestamp) + // string "Checksum" + o = append(o, 0xa8, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d) + o = msgp.AppendBytes(o, z.Checksum) return } @@ -2175,14 +2217,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.TargetStatuses == nil { z.TargetStatuses = make(map[string]replication.StatusType, zb0002) } else if len(z.TargetStatuses) > 0 { - for key := range z.TargetStatuses { - delete(z.TargetStatuses, key) - } + clear(z.TargetStatuses) } for zb0002 > 0 { - var za0001 string var za0002 replication.StatusType zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "TargetStatuses") @@ -2205,14 +2245,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.TargetPurgeStatuses == nil { z.TargetPurgeStatuses = make(map[string]VersionPurgeStatusType, zb0003) } else if len(z.TargetPurgeStatuses) > 0 { - for key := range z.TargetPurgeStatuses { - delete(z.TargetPurgeStatuses, key) - } + clear(z.TargetPurgeStatuses) } for zb0003 > 0 { - var za0003 string var za0004 VersionPurgeStatusType zb0003-- + var za0003 string za0003, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "TargetPurgeStatuses") @@ -2231,6 +2269,12 @@ func (z *ReplicateObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "ReplicationTimestamp") return } + case "Checksum": + z.Checksum, bts, err = msgp.ReadBytesBytes(bts, z.Checksum) + if err != nil { + err = msgp.WrapError(err, "Checksum") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -2259,7 +2303,7 @@ func (z *ReplicateObjectInfo) Msgsize() (s int) { s += msgp.StringPrefixSize + len(za0003) + za0004.Msgsize() } } - s += 21 + msgp.TimeSize + s += 21 + msgp.TimeSize + 9 + msgp.BytesPrefixSize + len(z.Checksum) return } diff --git a/cmd/object-api-deleteobject_test.go b/cmd/object-api-deleteobject_test.go index eef5754933896..b7e888e4e2494 100644 --- a/cmd/object-api-deleteobject_test.go +++ b/cmd/object-api-deleteobject_test.go @@ -93,14 +93,18 @@ func testDeleteObject(obj ObjectLayer, instanceType string, t TestErrHandler) { md5Bytes := md5.Sum([]byte(object.content)) oi, err := obj.PutObject(context.Background(), testCase.bucketName, object.name, mustGetPutObjReader(t, strings.NewReader(object.content), int64(len(object.content)), hex.EncodeToString(md5Bytes[:]), ""), ObjectOptions{}) - t.Log(oi) if err != nil { + t.Log(oi) t.Fatalf("%s : %s", instanceType, err.Error()) } } oi, err := obj.DeleteObject(context.Background(), testCase.bucketName, testCase.pathToDelete, ObjectOptions{}) - t.Log(oi, err) + if err != nil && !isErrObjectNotFound(err) { + t.Log(oi) + t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, err) + continue + } result, err := obj.ListObjects(context.Background(), testCase.bucketName, "", "", "", 1000) if err != nil { diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index b3b0a1777a9e8..ef372393886c9 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -27,13 +27,13 @@ import ( // Converts underlying storage error. Convenience function written to // handle all cases where we have known types of errors returned by // underlying storage layer. -func toObjectErr(err error, params ...string) error { - if err == nil { +func toObjectErr(oerr error, params ...string) error { + if oerr == nil { return nil } // Unwarp the error first - err = unwrapAll(err) + err := unwrapAll(oerr) if err == context.Canceled { return context.Canceled @@ -157,6 +157,9 @@ func toObjectErr(err error, params ...string) error { if len(params) >= 2 { apiErr.Object = decodeDirObject(params[1]) } + if v, ok := oerr.(InsufficientReadQuorum); ok { + apiErr.Type = v.Type + } return apiErr case errErasureWriteQuorum.Error(): apiErr := InsufficientWriteQuorum{} @@ -201,8 +204,34 @@ func (e SlowDown) Error() string { return "Please reduce your request rate" } +// RQErrType reason for read quorum error. +type RQErrType int + +const ( + // RQInsufficientOnlineDrives - not enough online drives. + RQInsufficientOnlineDrives RQErrType = 1 << iota + // RQInconsistentMeta - inconsistent metadata. + RQInconsistentMeta +) + +func (t RQErrType) String() string { + switch t { + case RQInsufficientOnlineDrives: + return "InsufficientOnlineDrives" + case RQInconsistentMeta: + return "InconsistentMeta" + default: + return "Unknown" + } +} + // InsufficientReadQuorum storage cannot satisfy quorum for read operation. -type InsufficientReadQuorum GenericError +type InsufficientReadQuorum struct { + Bucket string + Object string + Err error + Type RQErrType +} func (e InsufficientReadQuorum) Error() string { return "Storage resources are insufficient for the read operation " + e.Bucket + "/" + e.Object @@ -566,7 +595,7 @@ type InvalidRange struct { } func (e InvalidRange) Error() string { - return fmt.Sprintf("The requested range \"bytes %d -> %d of %d\" is not satisfiable.", e.OffsetBegin, e.OffsetEnd, e.ResourceSize) + return fmt.Sprintf("The requested range 'bytes=%d-%d' is not satisfiable", e.OffsetBegin, e.OffsetEnd) } // ObjectTooLarge error returned when the size of the object > max object size allowed (5G) per request. @@ -676,6 +705,10 @@ func (e UnsupportedMetadata) Error() string { // isErrBucketNotFound - Check if error type is BucketNotFound. func isErrBucketNotFound(err error) bool { + if errors.Is(err, errVolumeNotFound) { + return true + } + var bkNotFound BucketNotFound return errors.As(err, &bkNotFound) } @@ -694,12 +727,20 @@ func isErrWriteQuorum(err error) bool { // isErrObjectNotFound - Check if error type is ObjectNotFound. func isErrObjectNotFound(err error) bool { + if errors.Is(err, errFileNotFound) { + return true + } + var objNotFound ObjectNotFound return errors.As(err, &objNotFound) } // isErrVersionNotFound - Check if error type is VersionNotFound. func isErrVersionNotFound(err error) bool { + if errors.Is(err, errFileVersionNotFound) { + return true + } + var versionNotFound VersionNotFound return errors.As(err, &versionNotFound) } @@ -729,6 +770,9 @@ func isErrMethodNotAllowed(err error) bool { } func isErrInvalidRange(err error) bool { + if errors.Is(err, errInvalidRange) { + return true + } _, ok := err.(InvalidRange) return ok } @@ -744,3 +788,20 @@ func isReplicationPermissionCheck(err error) bool { _, ok := err.(ReplicationPermissionCheck) return ok } + +// DataMovementOverwriteErr - captures the error when a data movement activity +// like rebalance incorrectly tries to overwrite an object. +type DataMovementOverwriteErr GenericError + +func (de DataMovementOverwriteErr) Error() string { + objInfoStr := fmt.Sprintf("bucket=%s object=%s", de.Bucket, de.Object) + if de.VersionID != "" { + objInfoStr = fmt.Sprintf("%s version-id=%s", objInfoStr, de.VersionID) + } + return fmt.Sprintf("invalid data movement operation, source and destination pool are the same for %s", objInfoStr) +} + +func isDataMovementOverWriteErr(err error) bool { + var de DataMovementOverwriteErr + return errors.As(err, &de) +} diff --git a/cmd/object-api-input-checks.go b/cmd/object-api-input-checks.go index 8439a21505ba2..9c8b213e4d822 100644 --- a/cmd/object-api-input-checks.go +++ b/cmd/object-api-input-checks.go @@ -24,7 +24,6 @@ import ( "strings" "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio/internal/logger" ) // Checks on CopyObject arguments, bucket and object. @@ -71,10 +70,6 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string) error // Validates object prefix validity after bucket exists. if !IsValidObjectPrefix(prefix) { - logger.LogIf(ctx, ObjectNameInvalid{ - Bucket: bucket, - Object: prefix, - }) return ObjectNameInvalid{ Bucket: bucket, Object: prefix, @@ -90,10 +85,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo } if uploadIDMarker != "" { if HasSuffix(keyMarker, SlashSeparator) { - logger.LogIf(ctx, InvalidUploadIDKeyCombination{ - UploadIDMarker: uploadIDMarker, - KeyMarker: keyMarker, - }) return InvalidUploadIDKeyCombination{ UploadIDMarker: uploadIDMarker, KeyMarker: keyMarker, @@ -101,7 +92,6 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo } _, err := base64.RawURLEncoding.DecodeString(uploadIDMarker) if err != nil { - logger.LogIf(ctx, err) return MalformedUploadID{ UploadID: uploadIDMarker, } diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 0f24c98336a5d..06c73741078d3 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -86,9 +86,12 @@ type ObjectOptions struct { WantChecksum *hash.Checksum // x-amz-checksum-XXX checksum sent to PutObject/ CompleteMultipartUpload. + WantServerSideChecksumType hash.ChecksumType // if set, we compute a server-side checksum of this type + NoDecryption bool // indicates if the stream must be decrypted. PreserveETag string // preserves this etag during a PUT call. NoLock bool // indicates to lower layers if the caller is expecting to hold locks. + HasIfMatch bool // indicates if the request has If-Match header ProxyRequest bool // only set for GET/HEAD in active-active replication scenario ProxyHeaderSet bool // only set for GET/HEAD in active-active replication scenario ReplicationRequest bool // true only if replication request @@ -113,6 +116,8 @@ type ObjectOptions struct { // participating in a rebalance operation. Typically set for 'write' operations. SkipRebalancing bool + SrcPoolIdx int // set by PutObject/CompleteMultipart operations due to rebalance; used to prevent rebalance src, dst pools to be the same + DataMovement bool // indicates an going decommisionning or rebalacing PrefixEnabledFn func(prefix string) bool // function which returns true if versioning is enabled on prefix @@ -143,6 +148,7 @@ type WalkOptions struct { LatestOnly bool // returns only latest versions for all matching objects AskDisks string // dictates how many disks are being listed VersionsSort WalkVersionsSortOrder // sort order for versions of the same object; default: Ascending order in ModTime + Limit int // maximum number of items, 0 means no limit } // ExpirationOptions represents object options for object expiration at objectLayer. @@ -179,8 +185,9 @@ type DeleteBucketOptions struct { // BucketOptions provides options for ListBuckets and GetBucketInfo call. type BucketOptions struct { - Deleted bool // true only when site replication is enabled - Cached bool // true only when we are requesting a cached response instead of hitting the disk for example ListBuckets() call. + Deleted bool // true only when site replication is enabled + Cached bool // true only when we are requesting a cached response instead of hitting the disk for example ListBuckets() call. + NoMetadata bool } // SetReplicaStatus sets replica status and timestamp for delete operations in ObjectOptions @@ -204,8 +211,8 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str o.DeleteReplication = ReplicationState{ ReplicateDecisionStr: dsc.String(), } - switch { - case o.VersionID == "": + switch o.VersionID { + case "": o.DeleteReplication.ReplicationStatusInternal = dsc.PendingStatus() o.DeleteReplication.Targets = replicationStatusesMap(o.DeleteReplication.ReplicationStatusInternal) default: @@ -218,11 +225,11 @@ func (o *ObjectOptions) SetDeleteReplicationState(dsc ReplicateDecision, vID str func (o *ObjectOptions) PutReplicationState() (r ReplicationState) { rstatus, ok := o.UserDefined[ReservedMetadataPrefixLower+ReplicationStatus] if !ok { - return + return r } r.ReplicationStatusInternal = rstatus r.Targets = replicationStatusesMap(rstatus) - return + return r } // SetEvalMetadataFn sets the metadata evaluation function @@ -244,6 +251,7 @@ type ObjectLayer interface { Shutdown(context.Context) error NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error BackendInfo() madmin.BackendInfo + Legacy() bool // Only returns true for deployments which use CRCMOD as its object distribution algorithm. StorageInfo(ctx context.Context, metrics bool) StorageInfo LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo @@ -256,7 +264,7 @@ type ObjectLayer interface { ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (result ListObjectVersionsInfo, err error) // Walk lists all objects including versions, delete markers. - Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error + Walk(ctx context.Context, bucket, prefix string, results chan<- itemOrErr[ObjectInfo], opts WalkOptions) error // Object operations. diff --git a/cmd/object-api-interface_gen.go b/cmd/object-api-interface_gen.go index c8994be71f86d..919cdb1748a5a 100644 --- a/cmd/object-api-interface_gen.go +++ b/cmd/object-api-interface_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -9,13 +9,16 @@ import ( // MarshalMsg implements msgp.Marshaler func (z BucketOptions) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 + // map header, size 3 // string "Deleted" - o = append(o, 0x82, 0xa7, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64) + o = append(o, 0x83, 0xa7, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64) o = msgp.AppendBool(o, z.Deleted) // string "Cached" o = append(o, 0xa6, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64) o = msgp.AppendBool(o, z.Cached) + // string "NoMetadata" + o = append(o, 0xaa, 0x4e, 0x6f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) + o = msgp.AppendBool(o, z.NoMetadata) return } @@ -49,6 +52,12 @@ func (z *BucketOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Cached") return } + case "NoMetadata": + z.NoMetadata, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "NoMetadata") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -63,7 +72,7 @@ func (z *BucketOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z BucketOptions) Msgsize() (s int) { - s = 1 + 8 + msgp.BoolSize + 7 + msgp.BoolSize + s = 1 + 8 + msgp.BoolSize + 7 + msgp.BoolSize + 11 + msgp.BoolSize return } @@ -210,9 +219,9 @@ func (z *MakeBucketOptions) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *WalkOptions) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 4 + // map header, size 5 // string "Marker" - o = append(o, 0x84, 0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72) + o = append(o, 0x85, 0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72) o = msgp.AppendString(o, z.Marker) // string "LatestOnly" o = append(o, 0xaa, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79) @@ -223,6 +232,9 @@ func (z *WalkOptions) MarshalMsg(b []byte) (o []byte, err error) { // string "VersionsSort" o = append(o, 0xac, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x53, 0x6f, 0x72, 0x74) o = msgp.AppendUint8(o, uint8(z.VersionsSort)) + // string "Limit" + o = append(o, 0xa5, 0x4c, 0x69, 0x6d, 0x69, 0x74) + o = msgp.AppendInt(o, z.Limit) return } @@ -272,6 +284,12 @@ func (z *WalkOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.VersionsSort = WalkVersionsSortOrder(zb0002) } + case "Limit": + z.Limit, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Limit") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -286,7 +304,7 @@ func (z *WalkOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *WalkOptions) Msgsize() (s int) { - s = 1 + 7 + msgp.StringPrefixSize + len(z.Marker) + 11 + msgp.BoolSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 13 + msgp.Uint8Size + s = 1 + 7 + msgp.StringPrefixSize + len(z.Marker) + 11 + msgp.BoolSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 13 + msgp.Uint8Size + 6 + msgp.IntSize return } diff --git a/cmd/object-api-listobjects_test.go b/cmd/object-api-listobjects_test.go index 28fd824f9049b..58bfc4f84dc4e 100644 --- a/cmd/object-api-listobjects_test.go +++ b/cmd/object-api-listobjects_test.go @@ -26,6 +26,9 @@ import ( "strconv" "strings" "testing" + "time" + + "github.com/minio/minio/internal/bucket/lifecycle" ) func TestListObjectsVersionedFolders(t *testing.T) { @@ -153,7 +156,6 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { var err error var resultL ListObjectsInfo @@ -163,14 +165,14 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys) - resultV, err = obj.ListObjectVersions(context.Background(), testCase.bucketName, + resultV, err = obj.ListObjectVersions(t.Context(), testCase.bucketName, testCase.prefix, testCase.marker, "", testCase.delimiter, testCase.maxKeys) } else { t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys) - resultL, err = obj.ListObjects(context.Background(), testCase.bucketName, + resultL, err = obj.ListObjects(t.Context(), testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimiter, testCase.maxKeys) } if err != nil && testCase.shouldPass { @@ -389,7 +391,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Formulating the result data set to be expected from ListObjects call inside the tests, @@ -852,7 +853,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (20). {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, - // Testing for trancated value (21-24). + // Testing for truncated value (21-24). {"test-bucket-list-object", "", "", "", 5, resultCases[1], nil, true}, {"test-bucket-list-object", "", "", "", 4, resultCases[2], nil, true}, {"test-bucket-list-object", "", "", "", 3, resultCases[3], nil, true}, @@ -878,7 +879,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v // Marker being set to a value which is lesser than and all object names when sorted (37). // Expected to send all the objects in the bucket in this case. {"test-bucket-list-object", "", "Abc", "", 10, resultCases[14], nil, true}, - // Marker is to a hierarhical value (38-39). + // Marker is to a hierarchical value (38-39). {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "", 10, resultCases[15], nil, true}, {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "", 10, resultCases[16], nil, true}, // Testing with marker and truncation, but no prefix (40-42). @@ -909,7 +910,7 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v {"test-bucket-list-object", "Asia", "", SlashSeparator, 10, resultCases[25], nil, true}, {"test-bucket-list-object", "new", "", SlashSeparator, 10, resultCases[26], nil, true}, {"test-bucket-list-object", "Asia/India/", "", SlashSeparator, 10, resultCases[27], nil, true}, - // Test with marker set as hierarhical value and with delimiter. (58-59) + // Test with marker set as hierarchical value and with delimiter. (58-59) {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", SlashSeparator, 10, resultCases[28], nil, true}, {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", SlashSeparator, 10, resultCases[29], nil, true}, // Test with prefix and delimiter set to '/'. (60) @@ -942,10 +943,9 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:", testCase.prefix, "marker:", testCase.marker, "delimiter:", testCase.delimiter, "maxkeys:", testCase.maxKeys) - result, err := obj.ListObjects(context.Background(), testCase.bucketName, + result, err := obj.ListObjects(t.Context(), testCase.bucketName, testCase.prefix, testCase.marker, testCase.delimiter, int(testCase.maxKeys)) if err != nil && testCase.shouldPass { t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, err.Error()) @@ -1014,7 +1014,6 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v t.Errorf("Test %d: %s: Expected NextMarker to be empty since listing is not truncated, but instead found `%v`", i+1, instanceType, result.NextMarker) } } - } }) } @@ -1166,10 +1165,9 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } - // Formualting the result data set to be expected from ListObjects call inside the tests, + // Formulating the result data set to be expected from ListObjects call inside the tests, // This will be used in testCases and used for asserting the correctness of ListObjects output in the tests. resultCases := []ListObjectsInfo{ @@ -1593,7 +1591,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand {"test-bucket-list-object", "", "", "", -1, resultCases[0], nil, true}, // Testing for very large value of maxKey, this should set maxKeys to listObjectsLimit (18). {"test-bucket-list-object", "", "", "", 1234567890, resultCases[0], nil, true}, - // Testing for trancated value (19-22). + // Testing for truncated value (19-22). {"test-bucket-list-object", "", "", "", 5, resultCases[1], nil, true}, {"test-bucket-list-object", "", "", "", 4, resultCases[2], nil, true}, {"test-bucket-list-object", "", "", "", 3, resultCases[3], nil, true}, @@ -1618,7 +1616,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand // Marker being set to a value which is lesser than and all object names when sorted (35). // Expected to send all the objects in the bucket in this case. {"test-bucket-list-object", "", "Abc", "", 10, resultCases[14], nil, true}, - // Marker is to a hierarhical value (36-37). + // Marker is to a hierarchical value (36-37). {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "", 10, resultCases[15], nil, true}, {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "", 10, resultCases[16], nil, true}, // Testing with marker and truncation, but no prefix (38-40). @@ -1649,7 +1647,7 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand {"test-bucket-list-object", "Asia", "", SlashSeparator, 10, resultCases[25], nil, true}, {"test-bucket-list-object", "new", "", SlashSeparator, 10, resultCases[26], nil, true}, {"test-bucket-list-object", "Asia/India/", "", SlashSeparator, 10, resultCases[27], nil, true}, - // Test with marker set as hierarhical value and with delimiter. (56-57) + // Test with marker set as hierarchical value and with delimiter. (56-57) {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", SlashSeparator, 10, resultCases[28], nil, true}, {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", SlashSeparator, 10, resultCases[29], nil, true}, // Test with prefix and delimiter set to '/'. (58) @@ -1676,9 +1674,8 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { - result, err := obj.ListObjectVersions(context.Background(), testCase.bucketName, + result, err := obj.ListObjectVersions(t.Context(), testCase.bucketName, testCase.prefix, testCase.marker, "", testCase.delimiter, int(testCase.maxKeys)) if err != nil && testCase.shouldPass { t.Errorf("%s: Expected to pass, but failed with: %s", instanceType, err.Error()) @@ -1785,12 +1782,10 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Formulating the result data set to be expected from ListObjects call inside the tests, // This will be used in testCases and used for asserting the correctness of ListObjects output in the tests. - resultCases := []ListObjectsInfo{ { Objects: []ObjectInfo{ @@ -1829,13 +1824,12 @@ func testListObjectsContinuation(obj ObjectLayer, instanceType string, t1 TestEr } for i, testCase := range testCases { - testCase := testCase t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { var foundObjects []ObjectInfo var foundPrefixes []string marker := "" for { - result, err := obj.ListObjects(context.Background(), testCase.bucketName, + result, err := obj.ListObjects(t.Context(), testCase.bucketName, testCase.prefix, marker, testCase.delimiter, testCase.page) if err != nil { t.Fatalf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, err.Error()) @@ -1910,27 +1904,143 @@ func BenchmarkListObjects(b *testing.B) { bucket := "ls-benchmark-bucket" // Create a bucket. - err := obj.MakeBucket(context.Background(), bucket, MakeBucketOptions{}) + err := obj.MakeBucket(b.Context(), bucket, MakeBucketOptions{}) if err != nil { b.Fatal(err) } // Insert objects to be listed and benchmarked later. - for i := 0; i < 20000; i++ { + for i := range 20000 { key := "obj" + strconv.Itoa(i) - _, err = obj.PutObject(context.Background(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{}) + _, err = obj.PutObject(b.Context(), bucket, key, mustGetPutObjReader(b, bytes.NewBufferString(key), int64(len(key)), "", ""), ObjectOptions{}) if err != nil { b.Fatal(err) } } - b.ResetTimer() - // List the buckets over and over and over. - for i := 0; i < b.N; i++ { - _, err = obj.ListObjects(context.Background(), bucket, "", "obj9000", "", -1) + for b.Loop() { + _, err = obj.ListObjects(b.Context(), bucket, "", "obj9000", "", -1) if err != nil { b.Fatal(err) } } } + +func TestListObjectsWithILM(t *testing.T) { + ExecObjectLayerTest(t, testListObjectsWithILM) +} + +func testListObjectsWithILM(obj ObjectLayer, instanceType string, t1 TestErrHandler) { + // Prepare lifecycle expiration workers + es := newExpiryState(t1.Context(), obj, 0) + globalExpiryState = es + + t, _ := t1.(*testing.T) + + objContent := "test-content" + objMd5 := md5.Sum([]byte(objContent)) + + uploads := []struct { + bucket string + expired int + notExpired int + }{ + {"test-list-ilm-nothing-expired", 0, 6}, + {"test-list-ilm-all-expired", 6, 0}, + {"test-list-ilm-all-half-expired", 3, 3}, + } + + oneWeekAgo := time.Now().Add(-7 * 24 * time.Hour) + + lifecycleBytes := []byte(` + + + Enabled + + 1 + + + +`) + + lifecycleConfig, err := lifecycle.ParseLifecycleConfig(bytes.NewReader(lifecycleBytes)) + if err != nil { + t.Fatal(err) + } + + for i, upload := range uploads { + err := obj.MakeBucket(context.Background(), upload.bucket, MakeBucketOptions{}) + if err != nil { + t.Fatalf("%s : %s", instanceType, err.Error()) + } + + metadata, err := globalBucketMetadataSys.Get(upload.bucket) + if err != nil { + t.Fatal(err) + } + metadata.lifecycleConfig = lifecycleConfig + globalBucketMetadataSys.Set(upload.bucket, metadata) + defer globalBucketMetadataSys.Remove(upload.bucket) + + // Upload objects which modtime as one week ago, supposed to be expired by ILM + for range upload.expired { + _, err := obj.PutObject(context.Background(), upload.bucket, randString(32), + mustGetPutObjReader(t, + bytes.NewBufferString(objContent), + int64(len(objContent)), + hex.EncodeToString(objMd5[:]), + ""), + ObjectOptions{MTime: oneWeekAgo}, + ) + if err != nil { + t.Fatal(err) + } + } + + // Upload objects which current time as modtime, not expired by ILM + for range upload.notExpired { + _, err := obj.PutObject(context.Background(), upload.bucket, randString(32), + mustGetPutObjReader(t, + bytes.NewBufferString(objContent), + int64(len(objContent)), + hex.EncodeToString(objMd5[:]), + ""), + ObjectOptions{}, + ) + if err != nil { + t.Fatal(err) + } + } + + for _, maxKeys := range []int{1, 10, 49} { + // Test ListObjects V2 + totalObjs, didRuns := 0, 0 + marker := "" + for { + didRuns++ + if didRuns > 1000 { + t.Fatal("too many runs") + return + } + result, err := obj.ListObjectsV2(context.Background(), upload.bucket, "", marker, "", maxKeys, false, "") + if err != nil { + t.Fatalf("Test %d: %s: Expected to pass, but failed with: %s", i, instanceType, err.Error()) + } + totalObjs += len(result.Objects) + if !result.IsTruncated { + break + } + if marker != "" && marker == result.NextContinuationToken { + t.Fatalf("infinite loop marker: %s", result.NextContinuationToken) + } + marker = result.NextContinuationToken + } + + if totalObjs != upload.notExpired { + t.Fatalf("Test %d: %s: max-keys=%d, %d objects are expected to be seen, but %d found instead (didRuns=%d)", + i+1, instanceType, maxKeys, upload.notExpired, totalObjs, didRuns) + } + } + } +} diff --git a/cmd/object-api-multipart_test.go b/cmd/object-api-multipart_test.go index 3a334a7134972..d888df7177c27 100644 --- a/cmd/object-api-multipart_test.go +++ b/cmd/object-api-multipart_test.go @@ -79,7 +79,7 @@ func testObjectNewMultipartUpload(obj ObjectLayer, instanceType string, t TestEr case InvalidUploadID: t.Fatalf("%s: New Multipart upload failed to create uuid file.", instanceType) default: - t.Fatalf(err.Error()) + t.Fatal(err.Error()) } } } @@ -369,7 +369,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // Failed to create newbucket, abort. t.Fatalf("%s : %s", instanceType, err.Error()) } - for i := 0; i < 3; i++ { + for range 3 { // Initiate Multipart Upload on bucketNames[1] for the same object 3 times. // Used to test the listing for the case of multiple uploadID's for a given object. res, err = obj.NewMultipartUpload(context.Background(), bucketNames[1], objectNames[0], opts) @@ -392,7 +392,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } // Initiate Multipart Upload on bucketNames[2]. // Used to test the listing for the case of multiple objects for a given bucket. - for i := 0; i < 6; i++ { + for i := range 6 { res, err = obj.NewMultipartUpload(context.Background(), bucketNames[2], objectNames[i], opts) if err != nil { // Failed to create NewMultipartUpload, abort. @@ -443,7 +443,6 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan if err != nil { t.Fatalf("%s : %s", instanceType, err.Error()) } - } // Expected Results set for asserting ListObjectMultipart test. @@ -1202,6 +1201,314 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan } } +// Wrapper for calling TestListObjectPartsStale tests for both Erasure multiple disks and single node setup. +func TestListObjectPartsStale(t *testing.T) { + ExecObjectLayerDiskAlteredTest(t, testListObjectPartsStale) +} + +// testListObjectPartsStale - Tests validate listing of object parts when parts are stale +func testListObjectPartsStale(obj ObjectLayer, instanceType string, disks []string, t *testing.T) { + bucketNames := []string{"minio-bucket", "minio-2-bucket"} + objectNames := []string{"minio-object-1.txt"} + uploadIDs := []string{} + + globalStorageClass.Update(storageclass.Config{ + RRS: storageclass.StorageClass{ + Parity: 2, + }, + Standard: storageclass.StorageClass{ + Parity: 4, + }, + }) + + // bucketnames[0]. + // objectNames[0]. + // uploadIds [0]. + // Create bucket before initiating NewMultipartUpload. + err := obj.MakeBucket(context.Background(), bucketNames[0], MakeBucketOptions{}) + if err != nil { + // Failed to create newbucket, abort. + t.Fatalf("%s : %s", instanceType, err.Error()) + } + opts := ObjectOptions{} + // Initiate Multipart Upload on the above created bucket. + res, err := obj.NewMultipartUpload(context.Background(), bucketNames[0], objectNames[0], opts) + if err != nil { + // Failed to create NewMultipartUpload, abort. + t.Fatalf("%s : %s", instanceType, err.Error()) + } + + z := obj.(*erasureServerPools) + er := z.serverPools[0].sets[0] + + uploadIDs = append(uploadIDs, res.UploadID) + + // Create multipart parts. + // Need parts to be uploaded before MultipartLists can be called and tested. + createPartCases := []struct { + bucketName string + objName string + uploadID string + PartID int + inputReaderData string + inputMd5 string + inputDataSize int64 + expectedMd5 string + }{ + // Case 1-4. + // Creating sequence of parts for same uploadID. + // Used to ensure that the ListMultipartResult produces one output for the four parts uploaded below for the given upload ID. + {bucketNames[0], objectNames[0], uploadIDs[0], 1, "abcd", "e2fc714c4727ee9395f324cd2e7f331f", int64(len("abcd")), "e2fc714c4727ee9395f324cd2e7f331f"}, + {bucketNames[0], objectNames[0], uploadIDs[0], 2, "efgh", "1f7690ebdd9b4caf8fab49ca1757bf27", int64(len("efgh")), "1f7690ebdd9b4caf8fab49ca1757bf27"}, + {bucketNames[0], objectNames[0], uploadIDs[0], 3, "ijkl", "09a0877d04abf8759f99adec02baf579", int64(len("abcd")), "09a0877d04abf8759f99adec02baf579"}, + {bucketNames[0], objectNames[0], uploadIDs[0], 4, "mnop", "e132e96a5ddad6da8b07bba6f6131fef", int64(len("abcd")), "e132e96a5ddad6da8b07bba6f6131fef"}, + } + sha256sum := "" + // Iterating over creatPartCases to generate multipart chunks. + for _, testCase := range createPartCases { + _, err := obj.PutObjectPart(context.Background(), testCase.bucketName, testCase.objName, testCase.uploadID, testCase.PartID, mustGetPutObjReader(t, bytes.NewBufferString(testCase.inputReaderData), testCase.inputDataSize, testCase.inputMd5, sha256sum), opts) + if err != nil { + t.Fatalf("%s : %s", instanceType, err.Error()) + } + } + + erasureDisks := er.getDisks() + uploadIDPath := er.getUploadIDDir(bucketNames[0], objectNames[0], uploadIDs[0]) + dataDirs, err := erasureDisks[0].ListDir(context.Background(), minioMetaMultipartBucket, minioMetaMultipartBucket, uploadIDPath, -1) + if err != nil { + t.Fatalf("%s : %s", instanceType, err.Error()) + } + + var dataDir string + for _, folder := range dataDirs { + if strings.HasSuffix(folder, SlashSeparator) { + dataDir = folder + break + } + } + + toDel := (len(erasureDisks) / 2) + 1 + for _, disk := range erasureDisks[:toDel] { + disk.DeleteBulk(context.Background(), minioMetaMultipartBucket, []string{pathJoin(uploadIDPath, dataDir, "part.2")}...) + } + + partInfos := []ListPartsInfo{ + // partinfos - 0. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 10, + UploadID: uploadIDs[0], + Parts: []PartInfo{ + { + PartNumber: 1, + Size: 4, + ETag: "e2fc714c4727ee9395f324cd2e7f331f", + }, + { + PartNumber: 3, + Size: 4, + ETag: "09a0877d04abf8759f99adec02baf579", + }, + { + PartNumber: 4, + Size: 4, + ETag: "e132e96a5ddad6da8b07bba6f6131fef", + }, + }, + }, + // partinfos - 1. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 3, + UploadID: uploadIDs[0], + Parts: []PartInfo{ + { + PartNumber: 1, + Size: 4, + ETag: "e2fc714c4727ee9395f324cd2e7f331f", + }, + { + PartNumber: 3, + Size: 4, + ETag: "09a0877d04abf8759f99adec02baf579", + }, + { + PartNumber: 4, + Size: 4, + ETag: "e132e96a5ddad6da8b07bba6f6131fef", + }, + }, + }, + // partinfos - 2. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 2, + NextPartNumberMarker: 3, + IsTruncated: true, + UploadID: uploadIDs[0], + Parts: []PartInfo{ + { + PartNumber: 1, + Size: 4, + ETag: "e2fc714c4727ee9395f324cd2e7f331f", + }, + { + PartNumber: 3, + Size: 4, + ETag: "09a0877d04abf8759f99adec02baf579", + }, + }, + }, + // partinfos - 3. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 2, + IsTruncated: false, + UploadID: uploadIDs[0], + PartNumberMarker: 3, + Parts: []PartInfo{ + { + PartNumber: 4, + Size: 4, + ETag: "e132e96a5ddad6da8b07bba6f6131fef", + }, + }, + }, + // partinfos - 4. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 2, + IsTruncated: false, + UploadID: uploadIDs[0], + PartNumberMarker: 4, + }, + // partinfos - 5. + { + Bucket: bucketNames[0], + Object: objectNames[0], + MaxParts: 2, + IsTruncated: false, + UploadID: uploadIDs[0], + PartNumberMarker: 100, + }, + } + + // Collection of non-exhaustive ListObjectParts test cases, valid errors + // and success responses. + testCases := []struct { + bucket string + object string + uploadID string + partNumberMarker int + maxParts int + // Expected output of ListPartsInfo. + expectedResult ListPartsInfo + expectedErr error + // Flag indicating whether the test is expected to pass or not. + shouldPass bool + }{ + // Test cases with invalid bucket names (Test number 1-4). + {".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false}, + {"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false}, + {"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false}, + {"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false}, + // Test cases for listing uploadID with single part. + // Valid bucket names, but they do not exist (Test number 5-7). + {"volatile-bucket-1", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, + {"volatile-bucket-2", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, + {"volatile-bucket-3", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false}, + // Test case for Asserting for invalid objectName (Test number 8). + {bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false}, + // Asserting for Invalid UploadID (Test number 9). + {bucketNames[0], objectNames[0], "abc", 0, 0, ListPartsInfo{}, InvalidUploadID{UploadID: "abc"}, false}, + // Test case for uploadID with multiple parts (Test number 12). + {bucketNames[0], objectNames[0], uploadIDs[0], 0, 10, partInfos[0], nil, true}, + // Test case with maxParts set to less than number of parts (Test number 13). + {bucketNames[0], objectNames[0], uploadIDs[0], 0, 3, partInfos[1], nil, true}, + // Test case with partNumberMarker set (Test number 14). + {bucketNames[0], objectNames[0], uploadIDs[0], 0, 2, partInfos[2], nil, true}, + // Test case with partNumberMarker set (Test number 15). + {bucketNames[0], objectNames[0], uploadIDs[0], 3, 2, partInfos[3], nil, true}, + // Test case with partNumberMarker set (Test number 16). + {bucketNames[0], objectNames[0], uploadIDs[0], 4, 2, partInfos[4], nil, true}, + // Test case with partNumberMarker set (Test number 17). + {bucketNames[0], objectNames[0], uploadIDs[0], 100, 2, partInfos[5], nil, true}, + } + + for i, testCase := range testCases { + actualResult, actualErr := obj.ListObjectParts(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.partNumberMarker, testCase.maxParts, ObjectOptions{}) + if actualErr != nil && testCase.shouldPass { + t.Errorf("Test %d: %s: Expected to pass, but failed with: %s", i+1, instanceType, actualErr.Error()) + } + if actualErr == nil && !testCase.shouldPass { + t.Errorf("Test %d: %s: Expected to fail with \"%s\", but passed instead", i+1, instanceType, testCase.expectedErr.Error()) + } + // Failed as expected, but does it fail for the expected reason. + if actualErr != nil && !testCase.shouldPass { + if !strings.Contains(actualErr.Error(), testCase.expectedErr.Error()) { + t.Errorf("Test %d: %s: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, instanceType, testCase.expectedErr, actualErr) + } + } + // Passes as expected, but asserting the results. + if actualErr == nil && testCase.shouldPass { + expectedResult := testCase.expectedResult + // Asserting the MaxParts. + if actualResult.MaxParts != expectedResult.MaxParts { + t.Errorf("Test %d: %s: Expected the MaxParts to be %d, but instead found it to be %d", i+1, instanceType, expectedResult.MaxParts, actualResult.MaxParts) + } + // Asserting Object Name. + if actualResult.Object != expectedResult.Object { + t.Errorf("Test %d: %s: Expected Object name to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Object, actualResult.Object) + } + // Asserting UploadID. + if actualResult.UploadID != expectedResult.UploadID { + t.Errorf("Test %d: %s: Expected UploadID to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.UploadID, actualResult.UploadID) + } + // Asserting NextPartNumberMarker. + if actualResult.NextPartNumberMarker != expectedResult.NextPartNumberMarker { + t.Errorf("Test %d: %s: Expected NextPartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.NextPartNumberMarker, actualResult.NextPartNumberMarker) + } + // Asserting PartNumberMarker. + if actualResult.PartNumberMarker != expectedResult.PartNumberMarker { + t.Errorf("Test %d: %s: Expected PartNumberMarker to be \"%d\", but instead found it to be \"%d\"", i+1, instanceType, expectedResult.PartNumberMarker, actualResult.PartNumberMarker) + } + // Asserting the BucketName. + if actualResult.Bucket != expectedResult.Bucket { + t.Errorf("Test %d: %s: Expected Bucket to be \"%s\", but instead found it to be \"%s\"", i+1, instanceType, expectedResult.Bucket, actualResult.Bucket) + } + // Asserting IsTruncated. + if actualResult.IsTruncated != testCase.expectedResult.IsTruncated { + t.Errorf("Test %d: %s: Expected IsTruncated to be \"%v\", but found it to \"%v\"", i+1, instanceType, expectedResult.IsTruncated, actualResult.IsTruncated) + } + // Asserting the number of Parts. + if len(expectedResult.Parts) != len(actualResult.Parts) { + t.Errorf("Test %d: %s: Expected the result to contain info of %d Parts, but found %d instead", i+1, instanceType, len(expectedResult.Parts), len(actualResult.Parts)) + } else { + // Iterating over the partInfos and asserting the fields. + for j, actualMetaData := range actualResult.Parts { + // Asserting the PartNumber in the PartInfo. + if actualMetaData.PartNumber != expectedResult.Parts[j].PartNumber { + t.Errorf("Test %d: %s: Part %d: Expected PartNumber to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].PartNumber, actualMetaData.PartNumber) + } + // Asserting the Size in the PartInfo. + if actualMetaData.Size != expectedResult.Parts[j].Size { + t.Errorf("Test %d: %s: Part %d: Expected Part Size to be \"%d\", but instead found \"%d\"", i+1, instanceType, j+1, expectedResult.Parts[j].Size, actualMetaData.Size) + } + // Asserting the ETag in the PartInfo. + if actualMetaData.ETag != expectedResult.Parts[j].ETag { + t.Errorf("Test %d: %s: Part %d: Expected Etag to be \"%s\", but instead found \"%s\"", i+1, instanceType, j+1, expectedResult.Parts[j].ETag, actualMetaData.ETag) + } + } + } + } + } +} + // Wrapper for calling TestListObjectPartsDiskNotFound tests for both Erasure multiple disks and single node setup. func TestListObjectPartsDiskNotFound(t *testing.T) { ExecObjectLayerDiskAlteredTest(t, testListObjectPartsDiskNotFound) @@ -1860,10 +2167,9 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T } for _, testCase := range testCases { - testCase := testCase t.(*testing.T).Run("", func(t *testing.T) { opts = ObjectOptions{} - actualResult, actualErr := obj.CompleteMultipartUpload(context.Background(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{}) + actualResult, actualErr := obj.CompleteMultipartUpload(t.Context(), testCase.bucket, testCase.object, testCase.uploadID, testCase.parts, ObjectOptions{}) if actualErr != nil && testCase.shouldPass { t.Errorf("%s: Expected to pass, but failed with: %s", instanceType, actualErr) } diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index 3d501c7b9e635..6482f20005aea 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -37,44 +37,46 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str var sse encrypt.ServerSide opts = ObjectOptions{UserDefined: metadata} + if v, ok := header[xhttp.MinIOSourceProxyRequest]; ok { + opts.ProxyHeaderSet = true + opts.ProxyRequest = strings.Join(v, "") == "true" + } + if _, ok := header[xhttp.MinIOSourceReplicationRequest]; ok { + opts.ReplicationRequest = true + } + opts.Speedtest = header.Get(globalObjectPerfUserMetadata) != "" + if copySource { if crypto.SSECopy.IsRequested(header) { clientKey, err = crypto.SSECopy.ParseHTTP(header) if err != nil { - return + return opts, err } if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { - return + return opts, err } opts.ServerSideEncryption = encrypt.SSECopy(sse) - return + return opts, err } - return + return opts, err } if crypto.SSEC.IsRequested(header) { clientKey, err = crypto.SSEC.ParseHTTP(header) if err != nil { - return + return opts, err } if sse, err = encrypt.NewSSEC(clientKey[:]); err != nil { - return + return opts, err } opts.ServerSideEncryption = sse - return + return opts, err } if crypto.S3.IsRequested(header) || (metadata != nil && crypto.S3.IsEncrypted(metadata)) { opts.ServerSideEncryption = encrypt.NewSSE() } - if v, ok := header[xhttp.MinIOSourceProxyRequest]; ok { - opts.ProxyHeaderSet = true - opts.ProxyRequest = strings.Join(v, "") == "true" - } - if _, ok := header[xhttp.MinIOSourceReplicationRequest]; ok { - opts.ReplicationRequest = true - } - opts.Speedtest = header.Get(globalObjectPerfUserMetadata) != "" - return + + return opts, err } // get ObjectOptions for GET calls from encryption headers @@ -171,7 +173,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r apiErr = toAPIError(ctx, vErr) } valid = false - return + return opts, valid } opts.MaxParts, err = parseIntHeader(bucket, object, r.Header, xhttp.AmzMaxParts) @@ -179,7 +181,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r apiErr = toAPIError(ctx, err) argumentName = strings.ToLower(xhttp.AmzMaxParts) valid = false - return + return opts, valid } if opts.MaxParts == 0 { @@ -191,7 +193,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r apiErr = toAPIError(ctx, err) argumentName = strings.ToLower(xhttp.AmzPartNumberMarker) valid = false - return + return opts, valid } opts.ObjectAttributes = parseObjectAttributes(r.Header) @@ -199,7 +201,7 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r apiErr = errorCodes.ToAPIErr(ErrInvalidAttributeName) argumentName = strings.ToLower(xhttp.AmzObjectAttributes) valid = false - return + return opts, valid } for tag := range opts.ObjectAttributes { @@ -214,28 +216,30 @@ func getAndValidateAttributesOpts(ctx context.Context, w http.ResponseWriter, r argumentName = strings.ToLower(xhttp.AmzObjectAttributes) argumentValue = tag valid = false - return + return opts, valid } } - return + return opts, valid } func parseObjectAttributes(h http.Header) (attributes map[string]struct{}) { attributes = make(map[string]struct{}) - for _, v := range strings.Split(strings.TrimSpace(h.Get(xhttp.AmzObjectAttributes)), ",") { - if v != "" { - attributes[v] = struct{}{} + for _, headerVal := range h.Values(xhttp.AmzObjectAttributes) { + for v := range strings.SplitSeq(strings.TrimSpace(headerVal), ",") { + if v != "" { + attributes[v] = struct{}{} + } } } - return + return attributes } func parseIntHeader(bucket, object string, h http.Header, headerName string) (value int, err error) { stringInt := strings.TrimSpace(h.Get(headerName)) if stringInt == "" { - return + return value, err } value, err = strconv.Atoi(stringInt) if err != nil { @@ -245,7 +249,7 @@ func parseIntHeader(bucket, object string, h http.Header, headerName string) (va Err: fmt.Errorf("Unable to parse %s, value should be an integer", headerName), } } - return + return value, err } func parseBoolHeader(bucket, object string, h http.Header, headerName string) (bool, error) { @@ -400,12 +404,7 @@ func putOptsFromHeaders(ctx context.Context, hdr http.Header, metadata map[strin metadata = make(map[string]string) } - wantCRC, err := hash.GetContentChecksum(hdr) - if err != nil { - return opts, fmt.Errorf("invalid/unknown checksum sent: %v", err) - } etag := strings.TrimSpace(hdr.Get(xhttp.MinIOSourceETag)) - if crypto.S3KMS.IsRequested(hdr) { keyID, context, err := crypto.S3KMS.ParseHTTP(hdr) if err != nil { @@ -415,13 +414,13 @@ func putOptsFromHeaders(ctx context.Context, hdr http.Header, metadata map[strin if err != nil { return ObjectOptions{}, err } - return ObjectOptions{ + op := ObjectOptions{ ServerSideEncryption: sseKms, UserDefined: metadata, MTime: mtime, - WantChecksum: wantCRC, PreserveETag: etag, - }, nil + } + return op, nil } // default case of passing encryption headers and UserDefined metadata to backend opts, err = getDefaultOpts(hdr, false, metadata) @@ -434,7 +433,6 @@ func putOptsFromHeaders(ctx context.Context, hdr http.Header, metadata map[strin opts.ReplicationSourceRetentionTimestamp = retaintimestmp opts.ReplicationSourceTaggingTimestamp = taggingtimestmp opts.PreserveETag = etag - opts.WantChecksum = wantCRC return opts, nil } @@ -470,17 +468,13 @@ func completeMultipartOpts(ctx context.Context, r *http.Request, bucket, object } } } + opts.WantChecksum, err = hash.GetContentChecksum(r.Header) if err != nil { - return opts, InvalidArgument{ - Bucket: bucket, - Object: object, - Err: fmt.Errorf("invalid/unknown checksum sent: %v", err), - } + return opts, err } opts.MTime = mtime opts.UserDefined = make(map[string]string) - // Transfer SSEC key in opts.EncryptFn if crypto.SSEC.IsRequested(r.Header) { key, err := ParseSSECustomerRequest(r) @@ -491,5 +485,12 @@ func completeMultipartOpts(ctx context.Context, r *http.Request, bucket, object } } } + if _, ok := r.Header[xhttp.MinIOSourceReplicationRequest]; ok { + opts.ReplicationRequest = true + opts.UserDefined[ReservedMetadataPrefix+"Actual-Object-Size"] = r.Header.Get(xhttp.MinIOReplicationActualObjectSize) + } + if r.Header.Get(ReplicationSsecChecksumHeader) != "" { + opts.UserDefined[ReplicationSsecChecksumHeader] = r.Header.Get(ReplicationSsecChecksumHeader) + } return opts, nil } diff --git a/cmd/object-api-options_test.go b/cmd/object-api-options_test.go new file mode 100644 index 0000000000000..661372cd4069d --- /dev/null +++ b/cmd/object-api-options_test.go @@ -0,0 +1,78 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "net/http" + "net/http/httptest" + "reflect" + "testing" + + xhttp "github.com/minio/minio/internal/http" +) + +// TestGetAndValidateAttributesOpts is currently minimal and covers a subset of getAndValidateAttributesOpts(), +// it is intended to be expanded when the function is worked on in the future. +func TestGetAndValidateAttributesOpts(t *testing.T) { + globalBucketVersioningSys = &BucketVersioningSys{} + bucket := minioMetaBucket + ctx := t.Context() + testCases := []struct { + name string + headers http.Header + wantObjectAttrs map[string]struct{} + }{ + { + name: "empty header", + headers: http.Header{}, + wantObjectAttrs: map[string]struct{}{}, + }, + { + name: "single header line", + headers: http.Header{ + xhttp.AmzObjectAttributes: []string{"test1,test2"}, + }, + wantObjectAttrs: map[string]struct{}{ + "test1": {}, "test2": {}, + }, + }, + { + name: "multiple header lines with some duplicates", + headers: http.Header{ + xhttp.AmzObjectAttributes: []string{"test1,test2", "test3,test4", "test4,test3"}, + }, + wantObjectAttrs: map[string]struct{}{ + "test1": {}, "test2": {}, "test3": {}, "test4": {}, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest("GET", "/test", nil) + req.Header = testCase.headers + + opts, _ := getAndValidateAttributesOpts(ctx, rec, req, bucket, "testobject") + + if !reflect.DeepEqual(opts.ObjectAttributes, testCase.wantObjectAttrs) { + t.Errorf("want opts %v, got %v", testCase.wantObjectAttrs, opts.ObjectAttributes) + } + }) + } +} diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index d7724f3bbfe2d..5d791ce45192b 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -29,11 +29,13 @@ import ( "net/http" "path" "runtime" + "slices" "strconv" "strings" "sync" "time" "unicode/utf8" + "unsafe" "github.com/google/uuid" "github.com/klauspost/compress/s2" @@ -45,13 +47,11 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/ioutil" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/trie" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/trie" + "github.com/minio/pkg/v3/wildcard" "github.com/valyala/bytebufferpool" - "golang.org/x/exp/slices" ) const ( @@ -80,6 +80,18 @@ const ( compMinIndexSize = 8 << 20 ) +// getkeyeparator - returns the separator to be used for +// persisting on drive. +// +// - ":" is used on non-windows platforms +// - "_" is used on windows platforms +func getKeySeparator() string { + if runtime.GOOS == globalWindowsOSName { + return "_" + } + return ":" +} + // isMinioBucket returns true if given bucket is a MinIO internal // bucket and false otherwise. func isMinioMetaBucketName(bucket string) bool { @@ -116,7 +128,7 @@ func IsValidBucketName(bucket string) bool { // 'label' in AWS terminology and if the bucket looks // like an IP address. isNotNumber := false - for i := 0; i < len(piece); i++ { + for i := range len(piece) { switch { case (piece[i] >= 'a' && piece[i] <= 'z' || piece[i] == '-'): @@ -133,7 +145,7 @@ func IsValidBucketName(bucket string) bool { allNumbers = allNumbers && !isNotNumber } // Does the bucket name look like an IP address? - return !(len(pieces) == 4 && allNumbers) + return len(pieces) != 4 || !allNumbers } // IsValidObjectName verifies an object name in accordance with Amazon's @@ -234,6 +246,24 @@ func pathsJoinPrefix(prefix string, elem ...string) (paths []string) { return paths } +// string concat alternative to s1 + s2 with low overhead. +func concat(ss ...string) string { + length := len(ss) + if length == 0 { + return "" + } + // create & allocate the memory in advance. + n := 0 + for i := range length { + n += len(ss[i]) + } + b := make([]byte, 0, n) + for i := range length { + b = append(b, ss[i]...) + } + return unsafe.String(unsafe.SliceData(b), n) +} + // pathJoin - like path.Join() but retains trailing SlashSeparator of the last element func pathJoin(elem ...string) string { sb := bytebufferpool.Get() @@ -523,28 +553,41 @@ func (o ObjectInfo) GetActualSize() (int64, error) { return *o.ActualSize, nil } if o.IsCompressed() { - sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"] - if !ok { - return -1, errInvalidDecompressedSize + sizeStr := o.UserDefined[ReservedMetadataPrefix+"actual-size"] + if sizeStr != "" { + size, err := strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return -1, errInvalidDecompressedSize + } + return size, nil } - size, err := strconv.ParseInt(sizeStr, 10, 64) - if err != nil { + var actualSize int64 + for _, part := range o.Parts { + actualSize += part.ActualSize + } + if (actualSize == 0) && (actualSize != o.Size) { return -1, errInvalidDecompressedSize } - return size, nil + return actualSize, nil } if _, ok := crypto.IsEncrypted(o.UserDefined); ok { - sizeStr, ok := o.UserDefined[ReservedMetadataPrefix+"actual-size"] - if ok { + sizeStr := o.UserDefined[ReservedMetadataPrefix+"actual-size"] + if sizeStr != "" { size, err := strconv.ParseInt(sizeStr, 10, 64) if err != nil { return -1, errObjectTampered } return size, nil } - return o.DecryptedSize() + actualSize, err := o.DecryptedSize() + if err != nil { + return -1, err + } + if (actualSize == 0) && (actualSize != o.Size) { + return -1, errObjectTampered + } + return actualSize, nil } - return o.Size, nil } @@ -577,22 +620,35 @@ func excludeForCompression(header http.Header, object string, cfg compress.Confi } // Filter compression includes. - exclude := len(cfg.Extensions) > 0 || len(cfg.MimeTypes) > 0 + if len(cfg.Extensions) == 0 && len(cfg.MimeTypes) == 0 { + // Nothing to filter, include everything. + return false + } + if len(cfg.Extensions) > 0 && hasStringSuffixInSlice(objStr, cfg.Extensions) { - exclude = false + // Matched an extension to compress, do not exclude. + return false } if len(cfg.MimeTypes) > 0 && hasPattern(cfg.MimeTypes, contentType) { - exclude = false + // Matched an MIME type to compress, do not exclude. + return false } - return exclude + + // Did not match any inclusion filters, exclude from compression. + return true } // Utility which returns if a string is present in the list. -// Comparison is case insensitive. +// Comparison is case insensitive. Explicit short-circuit if +// the list contains the wildcard "*". func hasStringSuffixInSlice(str string, list []string) bool { str = strings.ToLower(str) for _, v := range list { + if v == "*" { + return true + } + if strings.HasSuffix(str, strings.ToLower(v)) { return true } @@ -742,7 +798,7 @@ type ObjReaderFn func(inputReader io.Reader, h http.Header, cleanupFns ...func() // are called on Close() in FIFO order as passed in ObjReadFn(). NOTE: It is // assumed that clean up functions do not panic (otherwise, they may // not all run!). -func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) ( +func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions, h http.Header) ( fn ObjReaderFn, off, length int64, err error, ) { if opts.CheckPrecondFn != nil && opts.CheckPrecondFn(oi) { @@ -797,7 +853,9 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) ( return b, nil } if isEncrypted { - decrypt = oi.compressionIndexDecrypt + decrypt = func(b []byte) ([]byte, error) { + return oi.compressionIndexDecrypt(b, h) + } } // In case of range based queries on multiparts, the offset and length are reduced. off, decOff, firstPart, decryptSkip, seqNum = getCompressedOffsets(oi, off, decrypt) @@ -826,7 +884,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) ( return nil, err } if decryptSkip > 0 { - inputReader = ioutil.NewSkipReader(inputReader, decryptSkip) + inputReader = xioutil.NewSkipReader(inputReader, decryptSkip) } oi.Size = decLength } @@ -911,7 +969,7 @@ func NewGetObjectReader(rs *HTTPRangeSpec, oi ObjectInfo, opts ObjectOptions) ( // Apply the skipLen and limit on the // decrypted stream - decReader = io.LimitReader(ioutil.NewSkipReader(decReader, skipLen), decRangeLength) + decReader = io.LimitReader(xioutil.NewSkipReader(decReader, skipLen), decRangeLength) // Assemble the GetObjectReader r = &GetObjectReader{ @@ -969,8 +1027,8 @@ func compressionIndexEncrypter(key crypto.ObjectKey, input func() []byte) func() } // compressionIndexDecrypt reverses compressionIndexEncrypter. -func (o *ObjectInfo) compressionIndexDecrypt(input []byte) ([]byte, error) { - return o.metadataDecrypter()("compression-index", input) +func (o *ObjectInfo) compressionIndexDecrypt(input []byte, h http.Header) ([]byte, error) { + return o.metadataDecrypter(h)("compression-index", input) } // SealMD5CurrFn seals md5sum with object encryption key and returns sealed @@ -1038,6 +1096,16 @@ func NewPutObjReader(rawReader *hash.Reader) *PutObjReader { return &PutObjReader{Reader: rawReader, rawReader: rawReader} } +// RawServerSideChecksumResult returns the ServerSideChecksumResult from the +// underlying rawReader, since the PutObjReader might be encrypted data and +// thus any checksum from that would be incorrect. +func (p *PutObjReader) RawServerSideChecksumResult() *hash.Checksum { + if p.rawReader != nil { + return p.rawReader.ServerSideChecksumResult + } + return nil +} + func sealETag(encKey crypto.ObjectKey, md5CurrSum []byte) []byte { var emptyKey [32]byte if bytes.Equal(encKey[:], emptyKey[:]) { @@ -1193,7 +1261,20 @@ func hasSpaceFor(di []*DiskInfo, size int64) (bool, error) { } if nDisks < len(di)/2 || nDisks <= 0 { - return false, fmt.Errorf("not enough online disks to calculate the available space, expected (%d)/(%d)", (len(di)/2)+1, nDisks) + var errs []error + for index, disk := range di { + switch { + case disk == nil: + errs = append(errs, fmt.Errorf("disk[%d]: offline", index)) + case disk.Error != "": + errs = append(errs, fmt.Errorf("disk %s: %s", disk.Endpoint, disk.Error)) + case disk.Total == 0: + errs = append(errs, fmt.Errorf("disk %s: total is zero", disk.Endpoint)) + } + } + // Log disk errors. + peersLogIf(context.Background(), errors.Join(errs...)) + return false, fmt.Errorf("not enough online disks to calculate the available space, need %d, found %d", (len(di)/2)+1, nDisks) } // Check we have enough on each disk, ignoring diskFillFraction. diff --git a/cmd/object-api-utils_test.go b/cmd/object-api-utils_test.go index 794e0034802a3..53992bf86ac3d 100644 --- a/cmd/object-api-utils_test.go +++ b/cmd/object-api-utils_test.go @@ -20,8 +20,10 @@ package cmd import ( "bytes" "context" + "encoding/hex" "fmt" "io" + "math/rand" "net/http" "net/http/httptest" "path" @@ -34,7 +36,7 @@ import ( "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config/compress" "github.com/minio/minio/internal/crypto" - "github.com/minio/pkg/v2/trie" + "github.com/minio/pkg/v3/trie" ) func pathJoinOld(elem ...string) string { @@ -47,12 +49,49 @@ func pathJoinOld(elem ...string) string { return path.Join(elem...) + trailingSlash } +func concatNaive(ss ...string) string { + rs := ss[0] + for i := 1; i < len(ss); i++ { + rs += ss[i] + } + return rs +} + +func benchmark(b *testing.B, data []string) { + b.Run("concat naive", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for b.Loop() { + concatNaive(data...) + } + }) + b.Run("concat fast", func(b *testing.B) { + b.ResetTimer() + b.ReportAllocs() + for b.Loop() { + concat(data...) + } + }) +} + +func BenchmarkConcatImplementation(b *testing.B) { + data := make([]string, 2) + rng := rand.New(rand.NewSource(0)) + for i := range 2 { + var tmp [16]byte + rng.Read(tmp[:]) + data[i] = hex.EncodeToString(tmp[:]) + } + b.ResetTimer() + benchmark(b, data) +} + func BenchmarkPathJoinOld(b *testing.B) { b.Run("PathJoin", func(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { pathJoinOld("volume", "path/path/path") } }) @@ -63,7 +102,7 @@ func BenchmarkPathJoin(b *testing.B) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { pathJoin("volume", "path/path/path") } }) @@ -570,7 +609,6 @@ func TestGetActualSize(t *testing.T) { objInfo: ObjectInfo{ UserDefined: map[string]string{ "X-Minio-Internal-compression": "klauspost/compress/s2", - "X-Minio-Internal-actual-size": "100000001", "content-type": "application/octet-stream", "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", }, @@ -584,6 +622,7 @@ func TestGetActualSize(t *testing.T) { ActualSize: 32891137, }, }, + Size: 100000001, }, result: 100000001, }, @@ -596,6 +635,7 @@ func TestGetActualSize(t *testing.T) { "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", }, Parts: []ObjectPartInfo{}, + Size: 841, }, result: 841, }, @@ -607,6 +647,7 @@ func TestGetActualSize(t *testing.T) { "etag": "b3ff3ef3789147152fbfbc50efba4bfd-2", }, Parts: []ObjectPartInfo{}, + Size: 100, }, result: -1, }, diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 8ff232f7e03ed..a6febc1225e59 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -23,6 +23,7 @@ import ( "net/http" "regexp" "strconv" + "strings" "time" "github.com/minio/minio/internal/amztime" @@ -138,7 +139,7 @@ func checkCopyObjectPreconditions(ctx context.Context, w http.ResponseWriter, r // x-minio-source-etag func checkPreconditionsPUT(ctx context.Context, w http.ResponseWriter, r *http.Request, objInfo ObjectInfo, opts ObjectOptions) bool { // Return false for methods other than PUT. - if r.Method != http.MethodPut { + if r.Method != http.MethodPut && r.Method != http.MethodPost { return false } // If the object doesn't have a modtime (IsZero), or the modtime @@ -185,7 +186,7 @@ func checkPreconditionsPUT(ctx context.Context, w http.ResponseWriter, r *http.R if isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) { // If the object ETag matches with the specified ETag. writeHeaders() - w.WriteHeader(http.StatusNotModified) + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL) return true } } @@ -247,10 +248,19 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ } // Check if the part number is correct. - if opts.PartNumber > 1 && opts.PartNumber > len(objInfo.Parts) { - // According to S3 we don't need to set any object information here. - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumber), r.URL) - return true + if opts.PartNumber > 1 { + partFound := false + for _, pi := range objInfo.Parts { + if pi.Number == opts.PartNumber { + partFound = true + break + } + } + if !partFound { + // According to S3 we don't need to set any object information here. + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidPartNumber), r.URL) + return true + } } // If-None-Match : Return the object only if its entity tag (ETag) is different from the @@ -258,6 +268,14 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ ifNoneMatchETagHeader := r.Header.Get(xhttp.IfNoneMatch) if ifNoneMatchETagHeader != "" { if isETagEqual(objInfo.ETag, ifNoneMatchETagHeader) { + // Do not care If-Modified-Since, Because: + // 1. If If-Modified-Since condition evaluates to true. + // If both of the If-None-Match and If-Modified-Since headers are present in the request as follows: + // If-None-Match condition evaluates to false , and; + // If-Modified-Since condition evaluates to true ; + // Then Amazon S3 returns the 304 Not Modified response code. + // 2. If If-Modified-Since condition evaluates to false, The following `ifModifiedSinceHeader` judgment will also return 304 + // If the object ETag matches with the specified ETag. writeHeadersPrecondition(w, objInfo) w.WriteHeader(http.StatusNotModified) @@ -294,7 +312,7 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ // If-Unmodified-Since : Return the object only if it has not been modified since the specified // time, otherwise return a 412 (precondition failed). ifUnmodifiedSinceHeader := r.Header.Get(xhttp.IfUnmodifiedSince) - if ifUnmodifiedSinceHeader != "" { + if ifUnmodifiedSinceHeader != "" && ifMatchETagHeader == "" { if givenTime, err := amztime.ParseHeader(ifUnmodifiedSinceHeader); err == nil { if ifModifiedSince(objInfo.ModTime, givenTime) { // If the object is modified since the specified time. @@ -313,7 +331,7 @@ func checkPreconditions(ctx context.Context, w http.ResponseWriter, r *http.Requ func ifModifiedSince(objTime time.Time, givenTime time.Time) bool { // The Date-Modified header truncates sub-second precision, so // use mtime < t+1s instead of mtime <= t to check for unmodified. - return objTime.After(givenTime.Add(1 * time.Second)) + return !objTime.Before(givenTime.Add(1 * time.Second)) } // canonicalizeETag returns ETag with leading and trailing double-quotes removed, @@ -325,17 +343,20 @@ func canonicalizeETag(etag string) string { // isETagEqual return true if the canonical representations of two ETag strings // are equal, false otherwise func isETagEqual(left, right string) bool { + if strings.TrimSpace(right) == "*" { + return true + } return canonicalizeETag(left) == canonicalizeETag(right) } // setPutObjHeaders sets all the necessary headers returned back // upon a success Put/Copy/CompleteMultipart/Delete requests // to activate delete only headers set delete as true -func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) { +func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, del bool, h http.Header) { // We must not use the http.Header().Set method here because some (broken) // clients expect the ETag header key to be literally "ETag" - not "Etag" (case-sensitive). // Therefore, we have to set the ETag directly as map entry. - if objInfo.ETag != "" && !delete { + if objInfo.ETag != "" && !del { w.Header()[xhttp.ETag] = []string{`"` + objInfo.ETag + `"`} } @@ -343,20 +364,21 @@ func setPutObjHeaders(w http.ResponseWriter, objInfo ObjectInfo, delete bool) { if objInfo.VersionID != "" && objInfo.VersionID != nullVersionID { w.Header()[xhttp.AmzVersionID] = []string{objInfo.VersionID} // If version is a deleted marker, set this header as well - if objInfo.DeleteMarker && delete { // only returned during delete object + if objInfo.DeleteMarker && del { // only returned during delete object w.Header()[xhttp.AmzDeleteMarker] = []string{strconv.FormatBool(objInfo.DeleteMarker)} } } if objInfo.Bucket != "" && objInfo.Name != "" { - if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !delete { + if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil && !del { lc.SetPredictionHeaders(w, objInfo.ToLifecycleOpts()) } } - hash.AddChecksumHeader(w, objInfo.decryptChecksums(0)) + cs, _ := objInfo.decryptChecksums(0, h) + hash.AddChecksumHeader(w, cs) } -func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete, lcEvent lifecycle.Event) { +func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toDel []ObjectToDelete, lcEvent []lifecycle.Event) { for remaining := toDel; len(remaining) > 0; toDel = remaining { if len(toDel) > maxDeleteList { remaining = toDel[maxDeleteList:] @@ -377,8 +399,7 @@ func deleteObjectVersions(ctx context.Context, o ObjectLayer, bucket string, toD VersionID: dobj.VersionID, } traceFn := globalLifecycleSys.trace(oi) - // Note: NewerNoncurrentVersions action is performed only scanner today - tags := newLifecycleAuditEvent(lcEventSrc_Scanner, lcEvent).Tags() + tags := newLifecycleAuditEvent(lcEventSrc_Scanner, lcEvent[i]).Tags() // Send audit for the lifecycle delete operation auditLogLifecycle( diff --git a/cmd/object-handlers-common_test.go b/cmd/object-handlers-common_test.go index 6d190a95e2edd..80555a1a0060e 100644 --- a/cmd/object-handlers-common_test.go +++ b/cmd/object-handlers-common_test.go @@ -18,7 +18,13 @@ package cmd import ( + "bytes" + "net/http" + "net/http/httptest" "testing" + "time" + + xhttp "github.com/minio/minio/internal/http" ) // Tests - canonicalizeETag() @@ -51,3 +57,125 @@ func TestCanonicalizeETag(t *testing.T) { } } } + +// Tests - CheckPreconditions() +func TestCheckPreconditions(t *testing.T) { + objModTime := time.Date(2024, time.August, 26, 0o2, 0o1, 0o1, 0, time.UTC) + objInfo := ObjectInfo{ETag: "aa", ModTime: objModTime} + testCases := []struct { + name string + ifMatch string + ifNoneMatch string + ifModifiedSince string + ifUnmodifiedSince string + objInfo ObjectInfo + expectedFlag bool + expectedCode int + }{ + // If-None-Match(false) and If-Modified-Since(true) + { + name: "If-None-Match1", + ifNoneMatch: "aa", + ifModifiedSince: "Sun, 26 Aug 2024 02:01:00 GMT", + objInfo: objInfo, + expectedFlag: true, + expectedCode: 304, + }, + // If-Modified-Since(false) + { + name: "If-None-Match2", + ifNoneMatch: "aaa", + ifModifiedSince: "Sun, 26 Aug 2024 02:01:01 GMT", + objInfo: objInfo, + expectedFlag: true, + expectedCode: 304, + }, + { + name: "If-None-Match3", + ifNoneMatch: "aaa", + ifModifiedSince: "Sun, 26 Aug 2024 02:01:02 GMT", + objInfo: objInfo, + expectedFlag: true, + expectedCode: 304, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + request := httptest.NewRequest(http.MethodHead, "/bucket/a", bytes.NewReader([]byte{})) + request.Header.Set(xhttp.IfNoneMatch, tc.ifNoneMatch) + request.Header.Set(xhttp.IfModifiedSince, tc.ifModifiedSince) + request.Header.Set(xhttp.IfMatch, tc.ifMatch) + request.Header.Set(xhttp.IfUnmodifiedSince, tc.ifUnmodifiedSince) + actualFlag := checkPreconditions(t.Context(), recorder, request, tc.objInfo, ObjectOptions{}) + if tc.expectedFlag != actualFlag { + t.Errorf("test: %s, got flag: %v, want: %v", tc.name, actualFlag, tc.expectedFlag) + } + if tc.expectedCode != recorder.Code { + t.Errorf("test: %s, got code: %d, want: %d", tc.name, recorder.Code, tc.expectedCode) + } + }) + } + testCases = []struct { + name string + ifMatch string + ifNoneMatch string + ifModifiedSince string + ifUnmodifiedSince string + objInfo ObjectInfo + expectedFlag bool + expectedCode int + }{ + // If-Match(true) and If-Unmodified-Since(false) + { + name: "If-Match1", + ifMatch: "aa", + ifUnmodifiedSince: "Sun, 26 Aug 2024 02:01:00 GMT", + objInfo: objInfo, + expectedFlag: false, + expectedCode: 200, + }, + // If-Unmodified-Since(true) + { + name: "If-Match2", + ifMatch: "aa", + ifUnmodifiedSince: "Sun, 26 Aug 2024 02:01:01 GMT", + objInfo: objInfo, + expectedFlag: false, + expectedCode: 200, + }, + { + name: "If-Match3", + ifMatch: "aa", + ifUnmodifiedSince: "Sun, 26 Aug 2024 02:01:02 GMT", + objInfo: objInfo, + expectedFlag: false, + expectedCode: 200, + }, + // If-Match(true) + { + name: "If-Match4", + ifMatch: "aa", + objInfo: objInfo, + expectedFlag: false, + expectedCode: 200, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + request := httptest.NewRequest(http.MethodHead, "/bucket/a", bytes.NewReader([]byte{})) + request.Header.Set(xhttp.IfNoneMatch, tc.ifNoneMatch) + request.Header.Set(xhttp.IfModifiedSince, tc.ifModifiedSince) + request.Header.Set(xhttp.IfMatch, tc.ifMatch) + request.Header.Set(xhttp.IfUnmodifiedSince, tc.ifUnmodifiedSince) + actualFlag := checkPreconditions(t.Context(), recorder, request, tc.objInfo, ObjectOptions{}) + if tc.expectedFlag != actualFlag { + t.Errorf("test: %s, got flag: %v, want: %v", tc.name, actualFlag, tc.expectedFlag) + } + if tc.expectedCode != recorder.Code { + t.Errorf("test: %s, got code: %d, want: %d", tc.name, recorder.Code, tc.expectedCode) + } + }) + } +} diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 5db412d926ea6..6542c46554f89 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -19,13 +19,13 @@ package cmd import ( "archive/tar" - "bytes" "context" "encoding/hex" "encoding/xml" "errors" "fmt" "io" + "maps" "net/http" "net/http/httptest" "net/textproto" @@ -34,9 +34,8 @@ import ( "sort" "strconv" "strings" - "sync" + "sync/atomic" "time" - "unicode" "github.com/google/uuid" "github.com/klauspost/compress/gzhttp" @@ -50,7 +49,6 @@ import ( "github.com/minio/minio/internal/bucket/lifecycle" objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/replication" - "github.com/minio/minio/internal/config/cache" "github.com/minio/minio/internal/config/dns" "github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/crypto" @@ -64,8 +62,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/s3select" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" - "github.com/valyala/bytebufferpool" + "github.com/minio/pkg/v3/policy" ) // supportedHeadGetReqParams - supported request parameters for GET and HEAD presigned request. @@ -383,92 +380,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj } } - cachedResult := globalCacheConfig.Enabled() && opts.VersionID == "" - - var update bool - if cachedResult { - rc := &cache.CondCheck{} - h := r.Header.Clone() - if opts.PartNumber > 0 { - h.Set(xhttp.PartNumber, strconv.Itoa(opts.PartNumber)) - } - rc.Init(bucket, object, h) - - ci, err := globalCacheConfig.Get(rc) - if ci != nil { - tgs, ok := ci.Metadata[xhttp.AmzObjectTagging] - if ok { - // Set this such that authorization policies can be applied on the object tags. - r.Header.Set(xhttp.AmzObjectTagging, tgs) - } - - if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone { - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) - return - } - - okSt := (ci.StatusCode == http.StatusOK || ci.StatusCode == http.StatusPartialContent || - ci.StatusCode == http.StatusPreconditionFailed || ci.StatusCode == http.StatusNotModified) - if okSt { - ci.WriteHeaders(w, func() { - // set common headers - setCommonHeaders(w) - }, func() { - okSt := (ci.StatusCode == http.StatusOK || ci.StatusCode == http.StatusPartialContent) - if okSt && len(ci.Data) > 0 { - for k, v := range ci.Metadata { - w.Header().Set(k, v) - } - - if opts.PartNumber > 0 && strings.Contains(ci.ETag, "-") { - w.Header()[xhttp.AmzMpPartsCount] = []string{ - strings.TrimLeftFunc(ci.ETag, func(r rune) bool { - return !unicode.IsNumber(r) - }), - } - } - - // For providing ranged content - start, rangeLen, err := rs.GetOffsetLength(ci.Size) - if err != nil { - start, rangeLen = 0, ci.Size - } - - // Set content length. - w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10)) - if rs != nil { - contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, ci.Size) - w.Header().Set(xhttp.ContentRange, contentRange) - } - - io.Copy(w, bytes.NewReader(ci.Data)) - return - } - if ci.StatusCode == http.StatusPreconditionFailed { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL) - return - } else if ci.StatusCode == http.StatusNotModified { - w.WriteHeader(ci.StatusCode) - return - } - - // We did not satisfy any requirement from the cache, update the cache. - // this basically means that we do not have the Data for the object - // cached yet - update = true - }) - if !update { - // No update is needed means we have written already to the client just return here. - return - } - } - } - - if errors.Is(err, cache.ErrKeyMissing) { - update = true - } - } - // Validate pre-conditions if any. opts.CheckPrecondFn = func(oi ObjectInfo) bool { if _, err := DecryptObjectInfo(&oi, r); err != nil { @@ -476,6 +387,15 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj return true } + if oi.UserTags != "" { + r.Header.Set(xhttp.AmzObjectTagging, oi.UserTags) + } + + if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) + return true + } + return checkPreconditions(ctx, w, r, oi, opts) } @@ -488,21 +408,24 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj reader *GetObjectReader perr error ) - proxytgts := getProxyTargets(ctx, bucket, object, opts) - if !proxytgts.Empty() { - globalReplicationStats.incProxy(bucket, getObjectAPI, false) - // proxy to replication target if active-active replication is in place. - reader, proxy, perr = proxyGetToReplicationTarget(ctx, bucket, object, rs, r.Header, opts, proxytgts) - if perr != nil { - globalReplicationStats.incProxy(bucket, getObjectAPI, true) - proxyGetErr := ErrorRespToObjectError(perr, bucket, object) - if !isErrBucketNotFound(proxyGetErr) && !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) && - !isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) { - logger.LogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr)) + + if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && (gr == nil || !gr.ObjInfo.DeleteMarker) { + proxytgts := getProxyTargets(ctx, bucket, object, opts) + if !proxytgts.Empty() { + globalReplicationStats.Load().incProxy(bucket, getObjectAPI, false) + // proxy to replication target if active-active replication is in place. + reader, proxy, perr = proxyGetToReplicationTarget(ctx, bucket, object, rs, r.Header, opts, proxytgts) + if perr != nil { + globalReplicationStats.Load().incProxy(bucket, getObjectAPI, true) + proxyGetErr := ErrorRespToObjectError(perr, bucket, object) + if !isErrBucketNotFound(proxyGetErr) && !isErrObjectNotFound(proxyGetErr) && !isErrVersionNotFound(proxyGetErr) && + !isErrPreconditionFailed(proxyGetErr) && !isErrInvalidRange(proxyGetErr) { + replLogIf(ctx, fmt.Errorf("Proxying request (replication) failed for %s/%s(%s) - %w", bucket, object, opts.VersionID, perr)) + } + } + if reader != nil && proxy.Proxy && perr == nil { + gr = reader } - } - if reader != nil && proxy.Proxy && perr == nil { - gr = reader } } if reader == nil || !proxy.Proxy { @@ -544,27 +467,26 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj objInfo := gr.ObjInfo - if objInfo.UserTags != "" { - r.Header.Set(xhttp.AmzObjectTagging, objInfo.UserTags) - } - - if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) - return - } - if !proxy.Proxy { // apply lifecycle rules only for local requests // Automatically remove the object/version if an expiry lifecycle rule can be applied if lc, err := globalLifecycleSys.Get(bucket); err == nil { - rcfg, _ := globalBucketObjectLockSys.Get(bucket) - replcfg, _ := getReplicationConfig(ctx, bucket) + rcfg, err := globalBucketObjectLockSys.Get(bucket) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + replcfg, err := getReplicationConfig(ctx, bucket) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } event := evalActionFromLifecycle(ctx, *lc, rcfg, replcfg, objInfo) if event.Action.Delete() { // apply whatever the expiry rule is. applyExpiryRule(event, lcEventSrc_s3GetObject, objInfo) if !event.Action.DeleteRestored() { // If the ILM action is not on restored object return error. - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(ErrNoSuchKey)) + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNoSuchKey), r.URL) return } } @@ -600,43 +522,11 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" && rs == nil { // AWS S3 silently drops checksums on range requests. - hash.AddChecksumHeader(w, objInfo.decryptChecksums(opts.PartNumber)) - } - - var buf *bytebufferpool.ByteBuffer - if update { - if globalCacheConfig.MatchesSize(objInfo.Size) { - buf = bytebufferpool.Get() - defer bytebufferpool.Put(buf) - } - defer func() { - var data []byte - if buf != nil { - data = buf.Bytes() - } - - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Metadata: cleanReservedKeys(objInfo.UserDefined), - Range: rangeHeader, - PartNumber: opts.PartNumber, - Size: asize, - Data: data, - }) - }() + cs, _ := objInfo.decryptChecksums(opts.PartNumber, r.Header) + hash.AddChecksumHeader(w, cs) } - if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { + if err = setObjectHeaders(ctx, w, objInfo, rs, opts); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -648,11 +538,7 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj setHeadGetRespHeaders(w, r.Form) - var iw io.Writer - iw = w - if buf != nil { - iw = io.MultiWriter(w, buf) - } + var iw io.Writer = w statusCodeWritten := false httpWriter := xioutil.WriteOnClose(iw) @@ -728,7 +614,7 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj } if _, err = DecryptObjectInfo(&objInfo, r); err != nil { - writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -747,23 +633,18 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj w.Header().Del(xhttp.ContentType) if _, ok := opts.ObjectAttributes[xhttp.Checksum]; ok { - chkSums := objInfo.decryptChecksums(0) + chkSums, _ := objInfo.decryptChecksums(0, r.Header) // AWS does not appear to append part number on this API call. - switch { - case chkSums["CRC32"] != "": - OA.Checksum = new(objectAttributesChecksum) - OA.Checksum.ChecksumCRC32 = strings.Split(chkSums["CRC32"], "-")[0] - case chkSums["CRC32C"] != "": - OA.Checksum = new(objectAttributesChecksum) - OA.Checksum.ChecksumCRC32C = strings.Split(chkSums["CRC32C"], "-")[0] - case chkSums["SHA256"] != "": - OA.Checksum = new(objectAttributesChecksum) - OA.Checksum.ChecksumSHA1 = strings.Split(chkSums["SHA1"], "-")[0] - case chkSums["SHA1"] != "": - OA.Checksum = new(objectAttributesChecksum) - OA.Checksum.ChecksumSHA256 = strings.Split(chkSums["SHA256"], "-")[0] + if len(chkSums) > 0 { + OA.Checksum = &objectAttributesChecksum{ + ChecksumCRC32: strings.Split(chkSums["CRC32"], "-")[0], + ChecksumCRC32C: strings.Split(chkSums["CRC32C"], "-")[0], + ChecksumSHA1: strings.Split(chkSums["SHA1"], "-")[0], + ChecksumSHA256: strings.Split(chkSums["SHA256"], "-")[0], + ChecksumCRC64NVME: strings.Split(chkSums["CRC64NVME"], "-")[0], + ChecksumType: chkSums[xhttp.AmzChecksumType], + } } - } if _, ok := opts.ObjectAttributes[xhttp.ETag]; ok { @@ -775,10 +656,10 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj } if _, ok := opts.ObjectAttributes[xhttp.StorageClass]; ok { - OA.StorageClass = objInfo.StorageClass + OA.StorageClass = filterStorageClass(ctx, objInfo.StorageClass) } - objInfo.decryptPartsChecksums() + objInfo.decryptPartsChecksums(r.Header) if _, ok := opts.ObjectAttributes[xhttp.ObjectParts]; ok { OA.ObjectParts = new(objectAttributesParts) @@ -800,12 +681,13 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj OA.ObjectParts.NextPartNumberMarker = v.Number OA.ObjectParts.Parts = append(OA.ObjectParts.Parts, &objectAttributesPart{ - ChecksumSHA1: objInfo.Parts[i].Checksums["SHA1"], - ChecksumSHA256: objInfo.Parts[i].Checksums["SHA256"], - ChecksumCRC32: objInfo.Parts[i].Checksums["CRC32"], - ChecksumCRC32C: objInfo.Parts[i].Checksums["CRC32C"], - PartNumber: objInfo.Parts[i].Number, - Size: objInfo.Parts[i].Size, + ChecksumSHA1: objInfo.Parts[i].Checksums["SHA1"], + ChecksumSHA256: objInfo.Parts[i].Checksums["SHA256"], + ChecksumCRC32: objInfo.Parts[i].Checksums["CRC32"], + ChecksumCRC32C: objInfo.Parts[i].Checksums["CRC32C"], + ChecksumCRC64NVME: objInfo.Parts[i].Checksums["CRC64NVME"], + PartNumber: objInfo.Parts[i].Number, + Size: objInfo.Parts[i].Size, }) } } @@ -826,8 +708,6 @@ func (api objectAPIHandlers) getObjectAttributesHandler(ctx context.Context, obj UserAgent: r.UserAgent(), Host: handlers.GetSourceIP(r), }) - - return } // GetObjectHandler - GET Object @@ -937,97 +817,22 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob } } - cachedResult := globalCacheConfig.Enabled() && opts.VersionID == "" - - var update bool - if cachedResult { - rc := &cache.CondCheck{} - h := r.Header.Clone() - if opts.PartNumber > 0 { - h.Set(xhttp.PartNumber, strconv.Itoa(opts.PartNumber)) - } - rc.Init(bucket, object, h) - - ci, err := globalCacheConfig.Get(rc) - if ci != nil { - tgs, ok := ci.Metadata[xhttp.AmzObjectTagging] - if ok { - // Set this such that authorization policies can be applied on the object tags. - r.Header.Set(xhttp.AmzObjectTagging, tgs) - } - - if s3Error := authorizeRequest(ctx, r, policy.GetObjectAction); s3Error != ErrNone { - writeErrorResponseHeadersOnly(w, errorCodes.ToAPIErr(s3Error)) - return - } - - okSt := (ci.StatusCode == http.StatusOK || ci.StatusCode == http.StatusPartialContent || - ci.StatusCode == http.StatusPreconditionFailed || ci.StatusCode == http.StatusNotModified) - if okSt { - ci.WriteHeaders(w, func() { - // set common headers - setCommonHeaders(w) - }, func() { - okSt := (ci.StatusCode == http.StatusOK || ci.StatusCode == http.StatusPartialContent) - if okSt { - for k, v := range ci.Metadata { - w.Header().Set(k, v) - } - - // For providing ranged content - start, rangeLen, err := rs.GetOffsetLength(ci.Size) - if err != nil { - start, rangeLen = 0, ci.Size - } - - if opts.PartNumber > 0 && strings.Contains(ci.ETag, "-") { - w.Header()[xhttp.AmzMpPartsCount] = []string{ - strings.TrimLeftFunc(ci.ETag, func(r rune) bool { - return !unicode.IsNumber(r) - }), - } - } - - // Set content length for the range. - w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10)) - if rs != nil { - contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, ci.Size) - w.Header().Set(xhttp.ContentRange, contentRange) - } - - return - } - if ci.StatusCode == http.StatusPreconditionFailed { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPreconditionFailed), r.URL) - return - } - - w.WriteHeader(ci.StatusCode) - }) - return - } - } - if errors.Is(err, cache.ErrKeyMissing) { - update = true - } - } - opts.FastGetObjInfo = true objInfo, err := getObjectInfo(ctx, bucket, object, opts) var proxy proxyResult - if err != nil { + if err != nil && !objInfo.DeleteMarker && (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) { // proxy HEAD to replication target if active-active replication configured on bucket proxytgts := getProxyTargets(ctx, bucket, object, opts) if !proxytgts.Empty() { - globalReplicationStats.incProxy(bucket, headObjectAPI, false) + globalReplicationStats.Load().incProxy(bucket, headObjectAPI, false) var oi ObjectInfo oi, proxy = proxyHeadToReplicationTarget(ctx, bucket, object, rs, opts, proxytgts) if proxy.Proxy { objInfo = oi } if proxy.Err != nil { - globalReplicationStats.incProxy(bucket, headObjectAPI, true) + globalReplicationStats.Load().incProxy(bucket, headObjectAPI, true) writeErrorResponseHeadersOnly(w, toAPIError(ctx, proxy.Err)) return } @@ -1077,8 +882,16 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob if !proxy.Proxy { // apply lifecycle rules only locally not for proxied requests // Automatically remove the object/version if an expiry lifecycle rule can be applied if lc, err := globalLifecycleSys.Get(bucket); err == nil { - rcfg, _ := globalBucketObjectLockSys.Get(bucket) - replcfg, _ := getReplicationConfig(ctx, bucket) + rcfg, err := globalBucketObjectLockSys.Get(bucket) + if err != nil { + writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) + return + } + replcfg, err := getReplicationConfig(ctx, bucket) + if err != nil { + writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) + return + } event := evalActionFromLifecycle(ctx, *lc, rcfg, replcfg, objInfo) if event.Action.Delete() { // apply whatever the expiry rule is. @@ -1105,24 +918,6 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob return } - if update { - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - defer globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Size: asize, - Metadata: cleanReservedKeys(objInfo.UserDefined), - }) - } - // Validate pre-conditions if any. if checkPreconditions(ctx, w, r, objInfo, opts) { return @@ -1150,11 +945,12 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob if r.Header.Get(xhttp.AmzChecksumMode) == "ENABLED" && rs == nil { // AWS S3 silently drops checksums on range requests. - hash.AddChecksumHeader(w, objInfo.decryptChecksums(opts.PartNumber)) + cs, _ := objInfo.decryptChecksums(opts.PartNumber, r.Header) + hash.AddChecksumHeader(w, cs) } // Set standard object headers. - if err = setObjectHeaders(w, objInfo, rs, opts); err != nil { + if err = setObjectHeaders(ctx, w, objInfo, rs, opts); err != nil { writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return } @@ -1291,22 +1087,31 @@ func getCpObjMetadataFromHeader(ctx context.Context, r *http.Request, userMeta m return defaultMeta, nil } -// getRemoteInstanceTransport contains a singleton roundtripper. -var ( - getRemoteInstanceTransport *http.Transport - getRemoteInstanceTransportOnce sync.Once -) +// getRemoteInstanceTransport contains a roundtripper for external (not peers) servers +var remoteInstanceTransport atomic.Value + +func setRemoteInstanceTransport(tr http.RoundTripper) { + remoteInstanceTransport.Store(tr) +} + +func getRemoteInstanceTransport() http.RoundTripper { + rt, ok := remoteInstanceTransport.Load().(http.RoundTripper) + if ok { + return rt + } + return nil +} // Returns a minio-go Client configured to access remote host described by destDNSRecord // Applicable only in a federated deployment var getRemoteInstanceClient = func(r *http.Request, host string) (*miniogo.Core, error) { - cred := getReqAccessCred(r, globalSite.Region) + cred := getReqAccessCred(r, globalSite.Region()) // In a federated deployment, all the instances share config files // and hence expected to have same credentials. core, err := miniogo.NewCore(host, &miniogo.Options{ Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, ""), Secure: globalIsTLS, - Transport: getRemoteInstanceTransport, + Transport: getRemoteInstanceTransport(), }) if err != nil { return nil, err @@ -1389,6 +1194,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } + // Sanitize the source object name similar to NewMultipart and PutObject API + srcObject = trimLeadingSlash(srcObject) + if vid != "" && vid != nullVersionID { _, err := uuid.Parse(vid) if err != nil { @@ -1441,9 +1249,10 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // convert copy src encryption options for GET calls getOpts := ObjectOptions{ - VersionID: srcOpts.VersionID, - Versioned: srcOpts.Versioned, - VersionSuspended: srcOpts.VersionSuspended, + VersionID: srcOpts.VersionID, + Versioned: srcOpts.Versioned, + VersionSuspended: srcOpts.VersionSuspended, + ReplicationRequest: r.Header.Get(xhttp.MinIOSourceReplicationRequest) == "true", } getSSE := encrypt.SSE(srcOpts.ServerSideEncryption) if getSSE != srcOpts.ServerSideEncryption { @@ -1530,16 +1339,12 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } } - // Check if either the source is encrypted or the destination will be encrypted. - objectEncryption := crypto.Requested(r.Header) - objectEncryption = objectEncryption || crypto.IsSourceEncrypted(srcInfo.UserDefined) - var compressMetadata map[string]string // No need to compress for remote etcd calls // Pass the decompressed stream to such calls. isDstCompressed := isCompressible(r.Header, dstObject) && length > minCompressibleSize && - !isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) && !cpSrcDstSame && !objectEncryption + !isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) if isDstCompressed { compressMetadata = make(map[string]string, 2) // Preserving the compression metadata. @@ -1575,7 +1380,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re return } // Encryption parameters not present for this object. - if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) { + if crypto.SSEC.IsEncrypted(srcInfo.UserDefined) && !crypto.SSECopy.IsRequested(r.Header) && r.Header.Get(xhttp.MinIOSourceReplicationRequest) != "true" { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidSSECustomerAlgorithm), r.URL) return } @@ -1662,6 +1467,46 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re targetSize, _ = srcInfo.DecryptedSize() } + // Client can request that a different type of checksum is computed server-side for the + // destination object using the x-amz-checksum-algorithm header. + headerChecksumType := hash.NewChecksumHeader(r.Header) + if headerChecksumType.IsSet() { + dstOpts.WantServerSideChecksumType = headerChecksumType.Base() + srcInfo.Reader.AddServerSideChecksumHasher(headerChecksumType) + dstOpts.WantChecksum = nil + } else { + // Check the source object for checksum. + // If Checksum is not encrypted, decryptChecksum will be a no-op and return + // the already unencrypted value. + srcChecksumDecrypted, err := srcInfo.decryptChecksum(r.Header) + if err != nil { + encLogOnceIf(GlobalContext, + fmt.Errorf("Unable to decryptChecksum for object: %s/%s, error: %w", srcBucket, srcObject, err), + "copy-object-decrypt-checksums-"+srcBucket+srcObject) + } + + // The source object has a checksum set, we need the destination to have one too. + if srcChecksumDecrypted != nil { + dstOpts.WantChecksum = hash.ChecksumFromBytes(srcChecksumDecrypted) + + // When an object is being copied from a source that is multipart, the destination will + // no longer be multipart, and thus the checksum becomes full-object instead. Since + // the CopyObject API does not require that the caller send us this final checksum, we need + // to compute it server-side, with the same type as the source object. + if dstOpts.WantChecksum != nil && dstOpts.WantChecksum.Type.IsMultipartComposite() { + dstOpts.WantServerSideChecksumType = dstOpts.WantChecksum.Type.Base() + srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType) + dstOpts.WantChecksum = nil + } + } else { + // S3: All copied objects without checksums and specified destination checksum algorithms + // automatically gain a CRC-64NVME checksum algorithm. + dstOpts.WantServerSideChecksumType = hash.ChecksumCRC64NVME + srcInfo.Reader.AddServerSideChecksumHasher(dstOpts.WantServerSideChecksumType) + dstOpts.WantChecksum = nil + } + } + if isTargetEncrypted { var encReader io.Reader kind, _ := crypto.IsRequested(r.Header) @@ -1695,6 +1540,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if dstOpts.IndexCB != nil { dstOpts.IndexCB = compressionIndexEncrypter(objEncKey, dstOpts.IndexCB) } + dstOpts.EncryptFn = metadataEncrypter(objEncKey) } } @@ -1721,9 +1567,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if dstOpts.ReplicationRequest { srcTimestamp := dstOpts.ReplicationSourceTaggingTimestamp if !srcTimestamp.IsZero() { - ondiskTimestamp, err := time.Parse(lastTaggingTimestamp, time.RFC3339Nano) + ondiskTimestamp, err := time.Parse(time.RFC3339Nano, lastTaggingTimestamp) // update tagging metadata only if replica timestamp is newer than what's on disk - if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) { + if err != nil || (err == nil && !ondiskTimestamp.After(srcTimestamp)) { srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = srcTimestamp.UTC().Format(time.RFC3339Nano) srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags } @@ -1732,7 +1578,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcInfo.UserDefined[xhttp.AmzObjectTagging] = objTags srcInfo.UserDefined[ReservedMetadataPrefixLower+TaggingTimestamp] = UTCNow().Format(time.RFC3339Nano) } - } srcInfo.UserDefined = filterReplicationStatusMetadata(srcInfo.UserDefined) @@ -1748,7 +1593,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if dstOpts.ReplicationRequest { srcTimestamp := dstOpts.ReplicationSourceRetentionTimestamp if !srcTimestamp.IsZero() { - ondiskTimestamp, err := time.Parse(lastretentionTimestamp, time.RFC3339Nano) + ondiskTimestamp, err := time.Parse(time.RFC3339Nano, lastretentionTimestamp) // update retention metadata only if replica timestamp is newer than what's on disk if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) { srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockMode)] = string(retentionMode) @@ -1768,7 +1613,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re if dstOpts.ReplicationRequest { srcTimestamp := dstOpts.ReplicationSourceLegalholdTimestamp if !srcTimestamp.IsZero() { - ondiskTimestamp, err := time.Parse(lastLegalHoldTimestamp, time.RFC3339Nano) + ondiskTimestamp, err := time.Parse(time.RFC3339Nano, lastLegalHoldTimestamp) // update legalhold metadata only if replica timestamp is newer than what's on disk if err != nil || (err == nil && ondiskTimestamp.Before(srcTimestamp)) { srcInfo.UserDefined[strings.ToLower(xhttp.AmzObjectLockLegalHold)] = string(legalHold.Status) @@ -1798,15 +1643,11 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re srcInfo.UserDefined[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano) } // Store the preserved compression metadata. - for k, v := range compressMetadata { - srcInfo.UserDefined[k] = v - } + maps.Copy(srcInfo.UserDefined, compressMetadata) // We need to preserve the encryption headers set in EncryptRequest, // so we do not want to override them, copy them instead. - for k, v := range encMetadata { - srcInfo.UserDefined[k] = v - } + maps.Copy(srcInfo.UserDefined, encMetadata) // Ensure that metadata does not contain sensitive information crypto.RemoveSensitiveEntries(srcInfo.UserDefined) @@ -1822,13 +1663,22 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re // if encryption is enabled we do not need explicit "REPLACE" metadata to // be enabled as well - this is to allow for key-rotation. if !isDirectiveReplace(r.Header.Get(xhttp.AmzMetadataDirective)) && !isDirectiveReplace(r.Header.Get(xhttp.AmzTagDirective)) && - srcInfo.metadataOnly && srcOpts.VersionID == "" && !objectEncryption { + srcInfo.metadataOnly && srcOpts.VersionID == "" && + !crypto.Requested(r.Header) && + !crypto.IsSourceEncrypted(srcInfo.UserDefined) { // If x-amz-metadata-directive is not set to REPLACE then we need // to error out if source and destination are same. writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidCopyDest), r.URL) return } + // After we've checked for an invalid copy (above), if a server-side checksum type + // is requested, we need to read the source to recompute the checksum. + if dstOpts.WantServerSideChecksumType.IsSet() { + srcInfo.metadataOnly = false + } + + // Federation only. remoteCallRequired := isRemoteCopyRequired(ctx, srcBucket, dstBucket, objectAPI) var objInfo ObjectInfo @@ -1901,7 +1751,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType) } - setPutObjHeaders(w, objInfo, false) + setPutObjHeaders(w, objInfo, false, r.Header) // We must not use the http.Header().Set method here because some (broken) // clients expect the x-amz-copy-source-version-id header key to be literally // "x-amz-copy-source-version-id"- not in canonicalized form, preserve it. @@ -1923,22 +1773,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re Host: handlers.GetSourceIP(r), }) - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - defer globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Size: asize, - Metadata: cleanReservedKeys(objInfo.UserDefined), - }) - if !remoteCallRequired && !globalTierConfigMgr.Empty() { // Schedule object for immediate transition if eligible. objInfo.ETag = origETag @@ -2062,7 +1896,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } case authTypeStreamingUnsignedTrailer: // Initialize stream chunked reader with optional trailers. - rd, s3Err = newUnsignedV4ChunkedReader(r, true) + rd, s3Err = newUnsignedV4ChunkedReader(r, true, r.Header.Get(xhttp.Authorization) != "") if s3Err != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) return @@ -2075,7 +1909,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } case authTypePresigned, authTypeSigned: - if s3Err = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Err != ErrNone { + if s3Err = reqSignatureV4Verify(r, globalSite.Region(), serviceS3); s3Err != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) return } @@ -2101,7 +1935,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String() metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano) - defer globalReplicationStats.UpdateReplicaStat(bucket, size) + defer globalReplicationStats.Load().UpdateReplicaStat(bucket, size) } // Check if bucket encryption is enabled @@ -2110,16 +1944,14 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req AutoEncrypt: globalAutoEncryption, }) - var buf *bytebufferpool.ByteBuffer - if globalCacheConfig.MatchesSize(size) { - buf = bytebufferpool.Get() - defer bytebufferpool.Put(buf) - } - var reader io.Reader reader = rd - if buf != nil { - reader = io.TeeReader(rd, buf) + + var opts ObjectOptions + opts, err = putOptsFromReq(ctx, r, bucket, object, metadata) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return } actualSize := size @@ -2138,6 +1970,8 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } + opts.WantChecksum = actualReader.Checksum() + // Set compression metrics. var s2c io.ReadCloser wantEncryption := crypto.Requested(r.Header) @@ -2168,22 +2002,21 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err := hashReader.AddChecksum(r, size < 0); err != nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) - return + if size >= 0 { + if err := hashReader.AddChecksum(r, false); err != nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) + return + } + opts.WantChecksum = hashReader.Checksum() } rawReader := hashReader pReader := NewPutObjReader(rawReader) - - var opts ObjectOptions - opts, err = putOptsFromReq(ctx, r, bucket, object, metadata) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return - } opts.IndexCB = idxCb + if r.Header.Get(xhttp.IfMatch) != "" { + opts.HasIfMatch = true + } if opts.PreserveETag != "" || r.Header.Get(xhttp.IfMatch) != "" || r.Header.Get(xhttp.IfNoneMatch) != "" { @@ -2234,11 +2067,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req return } - if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) { - writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL) - return - } - reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -2323,31 +2151,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType) } - setPutObjHeaders(w, objInfo, false) - - defer func() { - var data []byte - if buf != nil { - data = buf.Bytes() - } - - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Size: asize, - Metadata: cleanReservedKeys(objInfo.UserDefined), - Data: data, - }) - }() + setPutObjHeaders(w, objInfo, false, r.Header) // Notify object created event. evt := eventArgs{ @@ -2471,8 +2275,15 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h putObject = objectAPI.PutObject ) - // Check if put is allowed - if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, object, r, policy.PutObjectAction); s3Err != ErrNone { + var opts untarOptions + opts.ignoreDirs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreDirs), "true") + opts.ignoreErrs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreErrors), "true") + opts.prefixAll = r.Header.Get(xhttp.MinIOSnowballPrefix) + if opts.prefixAll != "" { + opts.prefixAll = trimLeadingSlash(pathJoin(opts.prefixAll, slashSeparator)) + } + // Check if put is allow for specified prefix. + if s3Err = isPutActionAllowed(ctx, rAuthType, bucket, opts.prefixAll, r, policy.PutObjectAction); s3Err != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) return } @@ -2493,7 +2304,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h } case authTypePresigned, authTypeSigned: - if s3Err = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Err != ErrNone { + if s3Err = reqSignatureV4Verify(r, globalSite.Region(), serviceS3); s3Err != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) return } @@ -2540,7 +2351,10 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h putObjectTar := func(reader io.Reader, info os.FileInfo, object string) error { size := info.Size() - + if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.PutObjectAction); s3Err != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + return errors.New(errorCodes.ToAPIErr(s3Err).Code) + } metadata := map[string]string{ xhttp.AmzStorageClass: sc, // save same storage-class as incoming stream. } @@ -2576,7 +2390,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if r.Header.Get(xhttp.AmzBucketReplicationStatus) == replication.Replica.String() { if s3Err = isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, policy.ReplicateObjectAction); s3Err != ErrNone { - return err + return errors.New(errorCodes.ToAPIErr(s3Err).Code) } metadata[ReservedMetadataPrefixLower+ReplicaStatus] = replication.Replica.String() metadata[ReservedMetadataPrefixLower+ReplicaTimestamp] = UTCNow().Format(time.RFC3339Nano) @@ -2594,8 +2408,8 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if k == "minio.versionId" { continue } - if strings.HasPrefix(k, "minio.metadata.") { - k = strings.TrimPrefix(k, "minio.metadata.") + if after, ok0 := strings.CutPrefix(k, "minio.metadata."); ok0 { + k = after hdrs.Set(k, v) } } @@ -2603,9 +2417,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if err != nil { return err } - for k, v := range m { - metadata[k] = v - } + maps.Copy(metadata, m) } else { versionID = r.Form.Get(xhttp.VersionID) hdrs = r.Header @@ -2640,7 +2452,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h if dsc := mustReplicate(ctx, bucket, object, getMustReplicateOptions(metadata, "", "", replication.ObjectReplicationType, opts)); dsc.ReplicateAny() { metadata[ReservedMetadataPrefixLower+ReplicationTimestamp] = UTCNow().Format(time.RFC3339Nano) metadata[ReservedMetadataPrefixLower+ReplicationStatus] = dsc.PendingStatus() - } var objectEncryptionKey crypto.ObjectKey @@ -2649,10 +2460,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h return errInvalidEncryptionParameters } - if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) { - return errInvalidEncryptionParametersSSEC - } - reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) if err != nil { return err @@ -2704,22 +2511,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType) } - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - defer globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Size: asize, - Metadata: cleanReservedKeys(objInfo.UserDefined), - }) - // Notify object created event. evt := eventArgs{ EventName: event.ObjectCreatedPut, @@ -2743,14 +2534,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h return nil } - var opts untarOptions - opts.ignoreDirs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreDirs), "true") - opts.ignoreErrs = strings.EqualFold(r.Header.Get(xhttp.MinIOSnowballIgnoreErrors), "true") - opts.prefixAll = r.Header.Get(xhttp.MinIOSnowballPrefix) - if opts.prefixAll != "" { - opts.prefixAll = trimLeadingSlash(pathJoin(opts.prefixAll, slashSeparator)) - } - if err = untar(ctx, hreader, putObjectTar, opts); err != nil { apiErr := errorCodes.ToAPIErr(s3Err) // If not set, convert or use BadRequest @@ -2830,7 +2613,9 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. rcfg, _ := globalBucketObjectLockSys.Get(bucket) if rcfg.LockEnabled && opts.DeletePrefix { - writeErrorResponse(ctx, w, toAPIError(ctx, errors.New("force-delete is forbidden in a locked-enabled bucket")), r.URL) + apiErr := toAPIError(ctx, errInvalidArgument) + apiErr.Description = "force-delete is forbidden on Object Locking enabled buckets" + writeErrorResponse(ctx, w, apiErr, r.URL) return } @@ -2877,7 +2662,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return err } } - return + return err }) deleteObject := objectAPI.DeleteObject @@ -2915,9 +2700,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. return } - defer globalCacheConfig.Delete(bucket, object) - - setPutObjHeaders(w, objInfo, true) + setPutObjHeaders(w, objInfo, true, r.Header) writeSuccessNoContent(w) eventName := event.ObjectRemovedDelete @@ -2936,7 +2719,7 @@ func (api objectAPIHandlers) DeleteObjectHandler(w http.ResponseWriter, r *http. Host: handlers.GetSourceIP(r), }) - if objInfo.ReplicationStatus == replication.Pending || objInfo.VersionPurgeStatus == Pending { + if objInfo.ReplicationStatus == replication.Pending || objInfo.VersionPurgeStatus == replication.VersionPurgePending { dmVersionID := "" versionID := "" if objInfo.DeleteMarker { @@ -2995,7 +2778,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if !hasContentMD5(r.Header) { + if !validateLengthAndChecksum(r) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL) return } @@ -3005,7 +2788,7 @@ func (api objectAPIHandlers) PutObjectLegalHoldHandler(w http.ResponseWriter, r return } - legalHold, err := objectlock.ParseObjectLegalHold(io.LimitReader(r.Body, r.ContentLength)) + legalHold, err := objectlock.ParseObjectLegalHold(r.Body) if err != nil { apiErr := errorCodes.ToAPIErr(ErrMalformedXML) apiErr.Description = err.Error() @@ -3142,9 +2925,9 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } - cred, owner, s3Err := validateSignature(getRequestAuthType(r), r) - if s3Err != ErrNone { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL) + // Check permissions to perform this object retention operation + if s3Error := authenticateRequest(ctx, r, policy.PutObjectRetentionAction); s3Error != ErrNone { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return } @@ -3153,7 +2936,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } - if !hasContentMD5(r.Header) { + if !validateLengthAndChecksum(r) { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL) return } @@ -3171,6 +2954,9 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r return } + reqInfo := logger.GetReqInfo(ctx) + reqInfo.SetTags("retention", objRetention.String()) + opts, err := getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -3181,7 +2967,7 @@ func (api objectAPIHandlers) PutObjectRetentionHandler(w http.ResponseWriter, r MTime: opts.MTime, VersionID: opts.VersionID, EvalMetadataFn: func(oi *ObjectInfo, gerr error) (dsc ReplicateDecision, err error) { - if err := enforceRetentionBypassForPut(ctx, r, *oi, objRetention, cred, owner); err != nil { + if err := enforceRetentionBypassForPut(ctx, r, *oi, objRetention, reqInfo.Cred, reqInfo.Owner); err != nil { return dsc, err } if objRetention.Mode.Valid() { @@ -3335,11 +3121,11 @@ func (api objectAPIHandlers) GetObjectTaggingHandler(w http.ResponseWriter, r *h if isErrObjectNotFound(err) || isErrVersionNotFound(err) { proxytgts := getProxyTargets(ctx, bucket, object, opts) if !proxytgts.Empty() { - globalReplicationStats.incProxy(bucket, getObjectTaggingAPI, false) + globalReplicationStats.Load().incProxy(bucket, getObjectTaggingAPI, false) // proxy to replication target if site replication is in place. tags, gerr := proxyGetTaggingToRepTarget(ctx, bucket, object, opts, proxytgts) if gerr.Err != nil || tags == nil { - globalReplicationStats.incProxy(bucket, getObjectTaggingAPI, true) + globalReplicationStats.Load().incProxy(bucket, getObjectTaggingAPI, true) writeErrorResponse(ctx, w, toAPIError(ctx, gerr.Err), r.URL) return } // overlay tags from peer site. @@ -3438,11 +3224,11 @@ func (api objectAPIHandlers) PutObjectTaggingHandler(w http.ResponseWriter, r *h if isErrObjectNotFound(err) || isErrVersionNotFound(err) { proxytgts := getProxyTargets(ctx, bucket, object, opts) if !proxytgts.Empty() { - globalReplicationStats.incProxy(bucket, putObjectTaggingAPI, false) + globalReplicationStats.Load().incProxy(bucket, putObjectTaggingAPI, false) // proxy to replication target if site replication is in place. perr := proxyTaggingToRepTarget(ctx, bucket, object, tags, opts, proxytgts) if perr.Err != nil { - globalReplicationStats.incProxy(bucket, putObjectTaggingAPI, true) + globalReplicationStats.Load().incProxy(bucket, putObjectTaggingAPI, true) writeErrorResponse(ctx, w, toAPIError(ctx, perr.Err), r.URL) return } @@ -3535,11 +3321,11 @@ func (api objectAPIHandlers) DeleteObjectTaggingHandler(w http.ResponseWriter, r if isErrObjectNotFound(err) || isErrVersionNotFound(err) { proxytgts := getProxyTargets(ctx, bucket, object, opts) if !proxytgts.Empty() { - globalReplicationStats.incProxy(bucket, removeObjectTaggingAPI, false) + globalReplicationStats.Load().incProxy(bucket, removeObjectTaggingAPI, false) // proxy to replication target if active-active replication is in place. perr := proxyTaggingToRepTarget(ctx, bucket, object, nil, opts, proxytgts) if perr.Err != nil { - globalReplicationStats.incProxy(bucket, removeObjectTaggingAPI, true) + globalReplicationStats.Load().incProxy(bucket, removeObjectTaggingAPI, true) writeErrorResponse(ctx, w, toAPIError(ctx, perr.Err), r.URL) return } @@ -3701,6 +3487,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r * VersionID: objInfo.VersionID, }, ObjectOptions{ VersionID: objInfo.VersionID, + MTime: objInfo.ModTime, }); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidObjectState), r.URL) return @@ -3781,7 +3568,7 @@ func (api objectAPIHandlers) PostRestoreObjectHandler(w http.ResponseWriter, r * VersionID: objInfo.VersionID, } if err := objectAPI.RestoreTransitionedObject(rctx, bucket, object, opts); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) + s3LogIf(ctx, fmt.Errorf("Unable to restore transitioned bucket/object %s/%s: %w", bucket, object, err)) return } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 6588e128588b7..ec71c69239444 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -29,6 +29,7 @@ import ( "hash" "hash/crc32" "io" + "maps" "net/http" "net/http/httptest" "net/url" @@ -61,7 +62,7 @@ const ( // Wrapper for calling HeadObject API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIHeadObjectHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testAPIHeadObjectHandler, []string{"HeadObject"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIHeadObjectHandler, endpoints: []string{"HeadObject"}}) } func testAPIHeadObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -207,7 +208,7 @@ func TestAPIHeadObjectHandlerWithEncryption(t *testing.T) { defer func() { globalPolicySys = nil }() defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPIHeadObjectHandlerWithEncryption, []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject", "HeadObject"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIHeadObjectHandlerWithEncryption, endpoints: []string{"NewMultipart", "PutObjectPart", "CompleteMultipart", "GetObject", "PutObject", "HeadObject"}}) } func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -228,9 +229,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -246,7 +245,7 @@ func testAPIHeadObjectHandlerWithEncryption(obj ObjectLayer, instanceType, bucke for _, l := range oi.partLengths { sum += l } - return + return sum } // set of inputs for uploading the objects before tests for @@ -662,9 +661,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -680,7 +677,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str for _, l := range oi.partLengths { sum += l } - return + return sum } // set of inputs for uploading the objects before tests for @@ -692,8 +689,8 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str {"small-1", []int64{509}, make(map[string]string)}, {"small-2", []int64{5 * oneMiB}, make(map[string]string)}, // // // cases 4-7: multipart part objects - {"mp-0", []int64{5 * oneMiB, 1}, make(map[string]string)}, - {"mp-1", []int64{5*oneMiB + 1, 1}, make(map[string]string)}, + {"mp-0", []int64{5 * oneMiB, 10}, make(map[string]string)}, + {"mp-1", []int64{5*oneMiB + 1, 10}, make(map[string]string)}, {"mp-2", []int64{5487701, 5487799, 3}, make(map[string]string)}, {"mp-3", []int64{10499807, 10499963, 7}, make(map[string]string)}, // cases 8-11: small single part objects with encryption @@ -702,12 +699,14 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str {"enc-small-1", []int64{509}, mapCopy(metaWithSSEC)}, {"enc-small-2", []int64{5 * oneMiB}, mapCopy(metaWithSSEC)}, // cases 12-15: multipart part objects with encryption - {"enc-mp-0", []int64{5 * oneMiB, 1}, mapCopy(metaWithSSEC)}, - {"enc-mp-1", []int64{5*oneMiB + 1, 1}, mapCopy(metaWithSSEC)}, + {"enc-mp-0", []int64{5 * oneMiB, 10}, mapCopy(metaWithSSEC)}, + {"enc-mp-1", []int64{5*oneMiB + 1, 10}, mapCopy(metaWithSSEC)}, {"enc-mp-2", []int64{5487701, 5487799, 3}, mapCopy(metaWithSSEC)}, {"enc-mp-3", []int64{10499807, 10499963, 7}, mapCopy(metaWithSSEC)}, } - + if testing.Short() { + objectInputs = append(objectInputs[0:5], objectInputs[8:11]...) + } // iterate through the above set of inputs and upload the object. for _, input := range objectInputs { uploadTestObject(t, apiRouter, credentials, bucketName, input.objectName, input.partLengths, input.metaData, false) @@ -762,6 +761,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str readers = append(readers, NewDummyDataGen(p, cumulativeSum)) cumulativeSum += p } + refReader := io.LimitReader(ioutilx.NewSkipReader(io.MultiReader(readers...), off), length) if ok, msg := cmpReaders(refReader, rec.Body); !ok { t.Fatalf("(%s) Object: %s Case %d ByteRange: %s --> data mismatch! (msg: %s)", instanceType, oi.objectName, i+1, byteRange, msg) @@ -811,7 +811,6 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str caseNumber++ } } - } // HTTP request for testing when `objectLayer` is set to `nil`. @@ -858,9 +857,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket } mapCopy = func(m map[string]string) map[string]string { r := make(map[string]string, len(m)) - for k, v := range m { - r[k] = v - } + maps.Copy(r, m) return r } ) @@ -898,6 +895,14 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket {"enc-mp-3", []int64{10499807, 10499963, 7}, mapCopy(metaWithSSEC)}, } + // SSEC can't be used with compression + globalCompressConfigMu.Lock() + compressEnabled := globalCompressConfig.Enabled + globalCompressConfigMu.Unlock() + if compressEnabled { + objectInputs = objectInputs[0:9] + } + // iterate through the above set of inputs and upload the object. for _, input := range objectInputs { uploadTestObject(t, apiRouter, credentials, bucketName, input.objectName, input.partLengths, input.metaData, false) @@ -1237,6 +1242,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam if err != nil { t.Fatalf("Error injecting faults into the request: %v.", err) } + // Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler. // Call the ServeHTTP to execute the handler,`func (api objectAPIHandlers) GetObjectHandler` handles the request. apiRouter.ServeHTTP(rec, req) @@ -1555,8 +1561,8 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a } } } - } + if testCase.expectedRespStatus == http.StatusOK { buffer := new(bytes.Buffer) // Fetch the object to check whether the content is same as the one uploaded via PutObject. @@ -1668,7 +1674,7 @@ func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, a // expected. func TestAPICopyObjectPartHandlerSanity(t *testing.T) { defer DetectTestLeak(t)() - ExecExtendedObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"CopyObjectPart"}) + ExecExtendedObjectLayerAPITest(t, testAPICopyObjectPartHandlerSanity, []string{"NewMultipart", "CompleteMultipart", "CopyObjectPart"}) } func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -1676,7 +1682,6 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam ) { objectName := "test-object" var err error - opts := ObjectOptions{} // set of byte data for PutObject. // object has to be created before running tests for Copy Object. // this is required even to assert the copied object, @@ -1708,18 +1713,26 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam } } - // Initiate Multipart upload for testing PutObjectPartHandler. testObject := "testobject" - // PutObjectPart API HTTP Handler has to be tested in isolation, - // that is without any other handler being registered, - // That's why NewMultipartUpload is initiated using ObjectLayer. - res, err := obj.NewMultipartUpload(context.Background(), bucketName, testObject, opts) + // Initiate Multipart upload for testing CopyObjectPartHandler. + rec := httptest.NewRecorder() + req, err := newTestSignedRequestV4(http.MethodPost, getNewMultipartURL("", bucketName, testObject), + 0, nil, credentials.AccessKey, credentials.SecretKey, nil) if err != nil { - // Failed to create NewMultipartUpload, abort. - t.Fatalf("MinIO %s : %s", instanceType, err) + t.Fatalf("Failed to create HTTP request for NewMultipart Request: %v", err) } - uploadID := res.UploadID + apiRouter.ServeHTTP(rec, req) + if rec.Code != http.StatusOK { + t.Fatalf("%s: Expected the response status to be `%d`, but instead found `%d`", instanceType, http.StatusOK, rec.Code) + } + decoder := xml.NewDecoder(rec.Body) + multipartResponse := &InitiateMultipartUploadResponse{} + err = decoder.Decode(multipartResponse) + if err != nil { + t.Fatalf("Error decoding the recorded response Body") + } + uploadID := multipartResponse.UploadID a := 0 b := globalMinPartSize @@ -1760,18 +1773,34 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam }) } - result, err := obj.CompleteMultipartUpload(context.Background(), bucketName, testObject, uploadID, parts, ObjectOptions{}) + var completeBytes []byte + // Complete multipart upload parts. + completeUploads := &CompleteMultipartUpload{ + Parts: parts, + } + completeBytes, err = xml.Marshal(completeUploads) if err != nil { - t.Fatalf("Test: %s complete multipart upload failed: %v", instanceType, err) + t.Fatalf("Error XML encoding of parts: %s.", err) } - if result.Size != int64(len(bytesData[0].byteData)) { - t.Fatalf("Test: %s expected size not written: expected %d, got %d", instanceType, len(bytesData[0].byteData), result.Size) + // Indicating that all parts are uploaded and initiating CompleteMultipartUpload. + req, err = newTestSignedRequestV4(http.MethodPost, getCompleteMultipartUploadURL("", bucketName, testObject, uploadID), + int64(len(completeBytes)), bytes.NewReader(completeBytes), credentials.AccessKey, credentials.SecretKey, nil) + if err != nil { + t.Fatalf("Failed to create HTTP request for CompleteMultipartUpload: %v", err) + } + + rec = httptest.NewRecorder() + + apiRouter.ServeHTTP(rec, req) + // Assert the response code with the expected status. + if rec.Code != http.StatusOK { + t.Errorf("Test %s: Expected the response status to be `%d`, but instead found `%d`", instanceType, http.StatusOK, rec.Code) } var buf bytes.Buffer r, err := obj.GetObjectNInfo(context.Background(), bucketName, testObject, nil, nil, ObjectOptions{}) if err != nil { - t.Fatalf("Test: %s reading completed file failed: %v", instanceType, err) + t.Fatalf("Test %s: reading completed file failed: %v", instanceType, err) } if _, err = io.Copy(&buf, r); err != nil { r.Close() @@ -1779,7 +1808,14 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam } r.Close() if !bytes.Equal(buf.Bytes(), bytesData[0].byteData) { - t.Fatalf("Test: %s returned data is not expected corruption detected:", instanceType) + t.Fatalf("Test %s: returned data is not expected corruption detected:", instanceType) + } + + globalCompressConfigMu.Lock() + compressEnabled := globalCompressConfig.Enabled + globalCompressConfigMu.Unlock() + if compressEnabled && !r.ObjInfo.IsCompressed() { + t.Errorf("Test %s: object found to be uncompressed though compression was enabled", instanceType) } } @@ -1851,7 +1887,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri // expected output. expectedRespStatus int }{ - // Test case - 1, copy part 1 from from newObject1, ignore request headers. + // Test case - 1, copy part 1 from newObject1, ignore request headers. { bucketName: bucketName, uploadID: uploadID, @@ -2011,7 +2047,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, - // Test case - 14, copy part 1 from from newObject1 with null versionId + // Test case - 14, copy part 1 from newObject1 with null versionId { bucketName: bucketName, uploadID: uploadID, @@ -2020,7 +2056,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, }, - // Test case - 15, copy part 1 from from newObject1 with non null versionId + // Test case - 15, copy part 1 from newObject1 with non null versionId { bucketName: bucketName, uploadID: uploadID, @@ -2118,7 +2154,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri // Wrapper for calling Copy Object API handler tests for both Erasure multiple disks and single node setup. func TestAPICopyObjectHandler(t *testing.T) { defer DetectTestLeak(t)() - ExecExtendedObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject"}) + ExecExtendedObjectLayerAPITest(t, testAPICopyObjectHandler, []string{"CopyObject", "PutObject"}) } func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -2149,7 +2185,10 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, new(bytes.Buffer), new(bytes.Buffer), } - + bucketInfo, err := obj.GetBucketInfo(context.Background(), bucketName, BucketOptions{}) + if err != nil { + t.Fatalf("Test -1: %s: Failed to get bucket info: %s", instanceType, err) + } // set of inputs for uploading the objects before tests for downloading is done. putObjectInputs := []struct { bucketName string @@ -2166,21 +2205,29 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, // used for anonymous HTTP request test. {bucketName, anonObject, int64(len(bytesData[0].byteData)), bytesData[0].byteData, bytesData[0].md5sum, make(map[string]string)}, } - // iterate through the above set of inputs and upload the object. for i, input := range putObjectInputs { - // uploading the object. - var objInfo ObjectInfo - objInfo, err = obj.PutObject(context.Background(), input.bucketName, input.objectName, mustGetPutObjReader(t, bytes.NewReader(input.textData), input.contentLength, input.md5sum, ""), ObjectOptions{UserDefined: input.metaData}) - // if object upload fails stop the test. + rec := httptest.NewRecorder() + req, err := newTestSignedRequestV4(http.MethodPut, getPutObjectURL("", input.bucketName, input.objectName), + input.contentLength, bytes.NewReader(input.textData), credentials.AccessKey, credentials.SecretKey, nil) if err != nil { - t.Fatalf("Put Object case %d: Error uploading object: %v", i+1, err) + t.Fatalf("Test %d: Failed to create HTTP request for Put Object: %v", i, err) } - if objInfo.ETag != input.md5sum { - t.Fatalf("Put Object case %d: Checksum mismatched: got %s, expected %s", i+1, input.md5sum, objInfo.ETag) + apiRouter.ServeHTTP(rec, req) + if rec.Code != http.StatusOK { + b, err := io.ReadAll(rec.Body) + if err != nil { + t.Fatal(err) + } + var apiErr APIErrorResponse + err = xml.Unmarshal(b, &apiErr) + if err != nil { + t.Fatal(err) + } + gotErr := apiErr.Code + t.Errorf("test %d: want api got %q", i, gotErr) } } - // test cases with inputs and expected result for Copy Object. testCases := []struct { bucketName string @@ -2433,6 +2480,22 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, } for i, testCase := range testCases { + if bucketInfo.Versioning { + if strings.Contains(testCase.copySourceHeader, "versionId=null") { + testCase.expectedRespStatus = http.StatusNotFound + } + } + values := url.Values{} + if testCase.expectedRespStatus == http.StatusOK { + r, err := obj.GetObjectNInfo(context.Background(), testCase.bucketName, objectName, nil, nil, opts) + if err != nil { + t.Fatalf("Test %d: %s reading completed file failed: %v", i, instanceType, err) + } + r.Close() + if r.ObjInfo.VersionID != "" { + values.Set(xhttp.VersionID, r.ObjInfo.VersionID) + } + } var req *http.Request var reqV2 *http.Request // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. @@ -2445,7 +2508,11 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, } // "X-Amz-Copy-Source" header contains the information about the source bucket and the object to copied. if testCase.copySourceHeader != "" { - req.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader) + if values.Encode() != "" && !strings.Contains(testCase.copySourceHeader, "?") { + req.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader+"?"+values.Encode()) + } else { + req.Header.Set("X-Amz-Copy-Source", testCase.copySourceHeader) + } } if testCase.copyModifiedHeader != "" { req.Header.Set("X-Amz-Copy-Source-If-Modified-Since", testCase.copyModifiedHeader) @@ -2505,6 +2572,13 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, if !bytes.Equal(bytesData[0].byteData, buffers[0].Bytes()) { t.Errorf("Test %d: %s: Data Mismatch: Data fetched back from the copied object doesn't match the original one.", i, instanceType) } + + globalCompressConfigMu.Lock() + compressEnabled := globalCompressConfig.Enabled + globalCompressConfigMu.Unlock() + if compressEnabled && !r.ObjInfo.IsCompressed() { + t.Errorf("Test %d %s: object found to be uncompressed though compression was enabled", i, instanceType) + } } // Verify response of the V2 signed HTTP request. @@ -2551,10 +2625,10 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, if testCase.copySourceSame { // encryption will rotate creds, so fail only for non-encryption scenario. if GlobalKMS == nil { - t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i, instanceType, testCase.expectedRespStatus, rec.Code) + t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i, instanceType, testCase.expectedRespStatus, recV2.Code) } } else { - t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code) + t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) } } } @@ -2587,7 +2661,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandler(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPINewMultipartHandler, []string{"NewMultipart"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPINewMultipartHandler, endpoints: []string{"NewMultipart"}}) } func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -2726,7 +2800,7 @@ func testAPINewMultipartHandler(obj ObjectLayer, instanceType, bucketName string // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPINewMultipartHandlerParallel(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPINewMultipartHandlerParallel, []string{"NewMultipart"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPINewMultipartHandlerParallel, endpoints: []string{"NewMultipart"}}) } func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -2740,7 +2814,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam objectName := "test-object-new-multipart-parallel" var wg sync.WaitGroup - for i := 0; i < 10; i++ { + for range 10 { wg.Add(1) // Initiate NewMultipart upload on the same object 10 times concurrrently. go func() { @@ -2789,7 +2863,7 @@ func testAPINewMultipartHandlerParallel(obj ObjectLayer, instanceType, bucketNam // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPICompleteMultipartHandler(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPICompleteMultipartHandler, []string{"CompleteMultipart"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPICompleteMultipartHandler, endpoints: []string{"CompleteMultipart"}}) } func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -2804,7 +2878,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s // upload IDs collected. var uploadIDs []string - for i := 0; i < 2; i++ { + for range 2 { // initiate new multipart uploadID. res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) if err != nil { @@ -2913,7 +2987,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s s3MD5 := getCompleteMultipartMD5(inputParts[3].parts) // generating the response body content for the success case. - successResponse := generateCompleteMultpartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), ObjectInfo{ETag: s3MD5}) + successResponse := generateCompleteMultipartUploadResponse(bucketName, objectName, getGetObjectURL("", bucketName, objectName), ObjectInfo{ETag: s3MD5}, nil) encodedSuccessResponse := encodeResponse(successResponse) ctx := context.Background() @@ -3158,7 +3232,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s // The UploadID from the response body is parsed and its existence is asserted with an attempt to ListParts using it. func TestAPIAbortMultipartHandler(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPIAbortMultipartHandler, []string{"AbortMultipart"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIAbortMultipartHandler, endpoints: []string{"AbortMultipart"}}) } func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -3172,7 +3246,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri // upload IDs collected. var uploadIDs []string - for i := 0; i < 2; i++ { + for range 2 { // initiate new multipart uploadID. res, err := obj.NewMultipartUpload(context.Background(), bucketName, objectName, opts) if err != nil { @@ -3318,7 +3392,7 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri // Wrapper for calling Delete Object API handler tests for both Erasure multiple disks and FS single drive setup. func TestAPIDeleteObjectHandler(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPIDeleteObjectHandler, []string{"DeleteObject"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIDeleteObjectHandler, endpoints: []string{"DeleteObject"}}) } func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -3440,7 +3514,6 @@ func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string t.Errorf("Case %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code) } - } // Test for Anonymous/unsigned http request. @@ -3880,8 +3953,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin // when signature type of the HTTP request is `Presigned`. func TestAPIListObjectPartsHandlerPreSign(t *testing.T) { defer DetectTestLeak(t)() - ExecObjectLayerAPITest(t, testAPIListObjectPartsHandlerPreSign, - []string{"PutObjectPart", "NewMultipart", "ListObjectParts"}) + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: testAPIListObjectPartsHandlerPreSign, endpoints: []string{"PutObjectPart", "NewMultipart", "ListObjectParts"}}) } func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, @@ -4119,7 +4191,6 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str // validate the error response. if test.expectedErr != noAPIErr { - var errBytes []byte // read the response body. errBytes, err = io.ReadAll(rec.Result().Body) diff --git a/cmd/object-lambda-handlers.go b/cmd/object-lambda-handlers.go index 3405d9358c09d..1ced5165d89cf 100644 --- a/cmd/object-lambda-handlers.go +++ b/cmd/object-lambda-handlers.go @@ -19,9 +19,12 @@ package cmd import ( "crypto/subtle" + "encoding/hex" "io" "net/http" "net/url" + "strconv" + "strings" "time" "github.com/klauspost/compress/gzhttp" @@ -29,15 +32,16 @@ import ( miniogo "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" "github.com/minio/minio/internal/auth" levent "github.com/minio/minio/internal/config/lambda/event" + "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" ) -func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Request) (levent.Event, error) { +var getLambdaEventData = func(bucket, object string, cred auth.Credentials, r *http.Request) (levent.Event, error) { host := globalLocalNodeName secure := globalIsTLS if globalMinioEndpointURL != nil { @@ -55,7 +59,7 @@ func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Re Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken), Secure: secure, Transport: globalRemoteTargetTransport, - Region: globalSite.Region, + Region: globalSite.Region(), }) if err != nil { return levent.Event{}, err @@ -77,16 +81,13 @@ func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Re return levent.Event{}, err } - token, err := authenticateNode(cred.AccessKey, cred.SecretKey, u.RawQuery) - if err != nil { - return levent.Event{}, err - } + ckSum := sha256.Sum256([]byte(cred.AccessKey + u.RawQuery)) eventData := levent.Event{ GetObjectContext: &levent.GetObjectContext{ InputS3URL: u.String(), OutputRoute: shortuuid.New(), - OutputToken: token, + OutputToken: hex.EncodeToString(ckSum[:]), }, UserRequest: levent.UserRequest{ URL: r.URL.String(), @@ -94,87 +95,13 @@ func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Re }, UserIdentity: levent.Identity{ Type: "IAMUser", - PrincipalID: cred.AccessKey, - AccessKeyID: cred.SecretKey, + PrincipalID: cred.ParentUser, + AccessKeyID: cred.AccessKey, }, } return eventData, nil } -var statusTextToCode = map[string]int{ - "Continue": http.StatusContinue, - "Switching Protocols": http.StatusSwitchingProtocols, - "Processing": http.StatusProcessing, - "Early Hints": http.StatusEarlyHints, - "OK": http.StatusOK, - "Created": http.StatusCreated, - "Accepted": http.StatusAccepted, - "Non-Authoritative Information": http.StatusNonAuthoritativeInfo, - "No Content": http.StatusNoContent, - "Reset Content": http.StatusResetContent, - "Partial Content": http.StatusPartialContent, - "Multi-Status": http.StatusMultiStatus, - "Already Reported": http.StatusAlreadyReported, - "IM Used": http.StatusIMUsed, - "Multiple Choices": http.StatusMultipleChoices, - "Moved Permanently": http.StatusMovedPermanently, - "Found": http.StatusFound, - "See Other": http.StatusSeeOther, - "Not Modified": http.StatusNotModified, - "Use Proxy": http.StatusUseProxy, - "Temporary Redirect": http.StatusTemporaryRedirect, - "Permanent Redirect": http.StatusPermanentRedirect, - "Bad Request": http.StatusBadRequest, - "Unauthorized": http.StatusUnauthorized, - "Payment Required": http.StatusPaymentRequired, - "Forbidden": http.StatusForbidden, - "Not Found": http.StatusNotFound, - "Method Not Allowed": http.StatusMethodNotAllowed, - "Not Acceptable": http.StatusNotAcceptable, - "Proxy Authentication Required": http.StatusProxyAuthRequired, - "Request Timeout": http.StatusRequestTimeout, - "Conflict": http.StatusConflict, - "Gone": http.StatusGone, - "Length Required": http.StatusLengthRequired, - "Precondition Failed": http.StatusPreconditionFailed, - "Request Entity Too Large": http.StatusRequestEntityTooLarge, - "Request URI Too Long": http.StatusRequestURITooLong, - "Unsupported Media Type": http.StatusUnsupportedMediaType, - "Requested Range Not Satisfiable": http.StatusRequestedRangeNotSatisfiable, - "Expectation Failed": http.StatusExpectationFailed, - "I'm a teapot": http.StatusTeapot, - "Misdirected Request": http.StatusMisdirectedRequest, - "Unprocessable Entity": http.StatusUnprocessableEntity, - "Locked": http.StatusLocked, - "Failed Dependency": http.StatusFailedDependency, - "Too Early": http.StatusTooEarly, - "Upgrade Required": http.StatusUpgradeRequired, - "Precondition Required": http.StatusPreconditionRequired, - "Too Many Requests": http.StatusTooManyRequests, - "Request Header Fields Too Large": http.StatusRequestHeaderFieldsTooLarge, - "Unavailable For Legal Reasons": http.StatusUnavailableForLegalReasons, - "Internal Server Error": http.StatusInternalServerError, - "Not Implemented": http.StatusNotImplemented, - "Bad Gateway": http.StatusBadGateway, - "Service Unavailable": http.StatusServiceUnavailable, - "Gateway Timeout": http.StatusGatewayTimeout, - "HTTP Version Not Supported": http.StatusHTTPVersionNotSupported, - "Variant Also Negotiates": http.StatusVariantAlsoNegotiates, - "Insufficient Storage": http.StatusInsufficientStorage, - "Loop Detected": http.StatusLoopDetected, - "Not Extended": http.StatusNotExtended, - "Network Authentication Required": http.StatusNetworkAuthenticationRequired, -} - -// StatusCode returns a HTTP Status code for the HTTP text. It returns -1 -// if the text is unknown. -func StatusCode(text string) int { - if code, ok := statusTextToCode[text]; ok { - return code - } - return -1 -} - func fwdHeadersToS3(h http.Header, w http.ResponseWriter) { const trim = "x-amz-fwd-header-" for k, v := range h { @@ -184,22 +111,29 @@ func fwdHeadersToS3(h http.Header, w http.ResponseWriter) { } } -func fwdStatusToAPIError(resp *http.Response) *APIError { - if status := resp.Header.Get(xhttp.AmzFwdStatus); status != "" && StatusCode(status) > -1 { - apiErr := &APIError{ - HTTPStatusCode: StatusCode(status), - Description: resp.Header.Get(xhttp.AmzFwdErrorMessage), - Code: resp.Header.Get(xhttp.AmzFwdErrorCode), - } - if apiErr.HTTPStatusCode == http.StatusOK { - return nil - } - return apiErr +func fwdStatusToAPIError(statusCode int, resp *http.Response) *APIError { + if statusCode < http.StatusBadRequest { + return nil + } + desc := resp.Header.Get(xhttp.AmzFwdErrorMessage) + if strings.TrimSpace(desc) == "" { + apiErr := errorCodes.ToAPIErr(ErrInvalidRequest) + return &apiErr + } + code := resp.Header.Get(xhttp.AmzFwdErrorCode) + if strings.TrimSpace(code) == "" { + apiErr := errorCodes.ToAPIErr(ErrInvalidRequest) + apiErr.Description = desc + return &apiErr + } + return &APIError{ + HTTPStatusCode: statusCode, + Description: desc, + Code: code, } - return nil } -// GetObjectLamdbaHandler - GET Object with transformed data via lambda functions +// GetObjectLambdaHandler - GET Object with transformed data via lambda functions // ---------- // This implementation of the GET operation applies lambda functions and returns the // response generated via the lambda functions. To use this API, you must have READ access @@ -263,26 +197,31 @@ func (api objectAPIHandlers) GetObjectLambdaHandler(w http.ResponseWriter, r *ht return } + statusCode := resp.StatusCode + if status := resp.Header.Get(xhttp.AmzFwdStatus); status != "" { + statusCode, err = strconv.Atoi(status) + if err != nil { + writeErrorResponse(ctx, w, APIError{ + Code: "LambdaFunctionStatusError", + HTTPStatusCode: http.StatusBadRequest, + Description: err.Error(), + }, r.URL) + return + } + } + // Set all the relevant lambda forward headers if found. fwdHeadersToS3(resp.Header, w) - if apiErr := fwdStatusToAPIError(resp); apiErr != nil { + if apiErr := fwdStatusToAPIError(statusCode, resp); apiErr != nil { writeErrorResponse(ctx, w, *apiErr, r.URL) return } - if resp.StatusCode != http.StatusOK { - writeErrorResponse(ctx, w, APIError{ - Code: "LambdaFunctionError", - HTTPStatusCode: resp.StatusCode, - Description: "unexpected failure reported from lambda function", - }, r.URL) - return - } - if !globalAPIConfig.shouldGzipObjects() { w.Header().Set(gzhttp.HeaderNoCompression, "true") } + w.WriteHeader(statusCode) io.Copy(w, resp.Body) } diff --git a/cmd/object-lambda-handlers_test.go b/cmd/object-lambda-handlers_test.go new file mode 100644 index 0000000000000..458034fec8a87 --- /dev/null +++ b/cmd/object-lambda-handlers_test.go @@ -0,0 +1,174 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strconv" + "testing" + "time" + + "github.com/minio/minio-go/v7/pkg/signer" + "github.com/minio/minio/internal/auth" + "github.com/minio/minio/internal/config" + "github.com/minio/minio/internal/config/lambda" + levent "github.com/minio/minio/internal/config/lambda/event" + xhttp "github.com/minio/minio/internal/http" +) + +func TestGetObjectLambdaHandler(t *testing.T) { + testCases := []struct { + name string + statusCode int + body string + contentType string + expectStatus int + }{ + { + name: "Success 206 Partial Content", + statusCode: 206, + body: "partial-object-data", + contentType: "text/plain", + expectStatus: 206, + }, + { + name: "Success 200 OK", + statusCode: 200, + body: "full-object-data", + contentType: "application/json", + expectStatus: 200, + }, + { + name: "Client Error 400", + statusCode: 400, + body: "bad-request", + contentType: "application/xml", + expectStatus: 400, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + runObjectLambdaTest(t, tc.statusCode, tc.body, tc.contentType, tc.expectStatus) + }) + } +} + +func runObjectLambdaTest(t *testing.T, lambdaStatus int, lambdaBody, contentType string, expectStatus int) { + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{ + t: t, + objAPITest: func(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, credentials auth.Credentials, t *testing.T) { + objectName := "dummy-object" + functionID := "lambda1" + functionToken := "token123" + + // Lambda mock server + lambdaServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set(xhttp.AmzRequestRoute, functionID) + w.Header().Set(xhttp.AmzRequestToken, functionToken) + w.Header().Set(xhttp.AmzFwdHeaderContentType, contentType) + w.Header().Set(xhttp.AmzFwdStatus, strconv.Itoa(lambdaStatus)) + w.WriteHeader(lambdaStatus) + w.Write([]byte(lambdaBody)) + })) + defer lambdaServer.Close() + + lambdaARN := "arn:minio:s3-object-lambda::lambda1:webhook" + + cfg := config.New() + cfg[config.LambdaWebhookSubSys] = map[string]config.KVS{ + functionID: { + {Key: "endpoint", Value: lambdaServer.URL}, + {Key: "enable", Value: config.EnableOn}, + }, + } + cfg[config.APISubSys] = map[string]config.KVS{ + "api": { + {Key: "gzip", Value: config.EnableOff}, + }, + } + + var err error + globalLambdaTargetList, err = lambda.FetchEnabledTargets(context.Background(), cfg, http.DefaultTransport.(*http.Transport)) + if err != nil { + t.Fatalf("failed to load lambda targets: %v", err) + } + + getLambdaEventData = func(_, _ string, _ auth.Credentials, _ *http.Request) (levent.Event, error) { + return levent.Event{ + GetObjectContext: &levent.GetObjectContext{ + OutputRoute: functionID, + OutputToken: functionToken, + InputS3URL: "http://localhost/dummy", + }, + UserRequest: levent.UserRequest{ + Headers: map[string][]string{}, + }, + UserIdentity: levent.Identity{ + PrincipalID: "test-user", + }, + }, nil + } + + body := []byte{} + req := httptest.NewRequest("GET", "/objectlambda/"+bucketName+"/"+objectName+"?lambdaArn="+url.QueryEscape(lambdaARN), bytes.NewReader(body)) + req.Form = url.Values{"lambdaArn": []string{lambdaARN}} + req.Header.Set("Host", "localhost") + req.Header.Set("X-Amz-Date", time.Now().UTC().Format("20060102T150405Z")) + sum := sha256.Sum256(body) + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:])) + req = signer.SignV4(*req, credentials.AccessKey, credentials.SecretKey, "", "us-east-1") + + rec := httptest.NewRecorder() + api := objectAPIHandlers{ + ObjectAPI: func() ObjectLayer { + return obj + }, + } + api.GetObjectLambdaHandler(rec, req) + + res := rec.Result() + defer res.Body.Close() + respBody, _ := io.ReadAll(res.Body) + + if res.StatusCode != expectStatus { + t.Errorf("Expected status %d, got %d", expectStatus, res.StatusCode) + } + + if contentType != "" { + if ct := res.Header.Get("Content-Type"); ct != contentType { + t.Errorf("Expected Content-Type %q, got %q", contentType, ct) + } + } + + if res.StatusCode < 400 { + if string(respBody) != lambdaBody { + t.Errorf("Expected body %q, got %q", lambdaBody, string(respBody)) + } + } + }, + endpoints: []string{"GetObject"}, + }) +} diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index d7fb862133e59..12a99b312ea56 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -20,7 +20,9 @@ package cmd import ( "bufio" "context" + "fmt" "io" + "maps" "net/http" "net/url" "sort" @@ -36,19 +38,18 @@ import ( sse "github.com/minio/minio/internal/bucket/encryption" objectlock "github.com/minio/minio/internal/bucket/object/lock" "github.com/minio/minio/internal/bucket/replication" - "github.com/minio/minio/internal/config/cache" "github.com/minio/minio/internal/config/dns" "github.com/minio/minio/internal/config/storageclass" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/etag" "github.com/minio/minio/internal/event" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/hash" + "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" "github.com/minio/sio" ) @@ -116,14 +117,24 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r return } - if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) { - writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL) - return + _, sourceReplReq := r.Header[xhttp.MinIOSourceReplicationRequest] + ssecRepHeaders := []string{ + "X-Minio-Replication-Server-Side-Encryption-Seal-Algorithm", + "X-Minio-Replication-Server-Side-Encryption-Sealed-Key", + "X-Minio-Replication-Server-Side-Encryption-Iv", } - - if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return + ssecRep := false + for _, header := range ssecRepHeaders { + if val := r.Header.Get(header); val != "" { + ssecRep = true + break + } + } + if !ssecRep || !sourceReplReq { + if err = setEncryptionMetadata(r, bucket, object, encMetadata); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } } // Set this for multipart only operations, we need to differentiate during // decryption if the file was actually multipart or not. @@ -173,9 +184,7 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r // We need to preserve the encryption headers set in EncryptRequest, // so we do not want to override them, copy them instead. - for k, v := range encMetadata { - metadata[k] = v - } + maps.Copy(metadata, encMetadata) // Ensure that metadata does not contain sensitive information crypto.RemoveSensitiveEntries(metadata) @@ -191,6 +200,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r return } + if r.Header.Get(xhttp.IfMatch) != "" { + opts.HasIfMatch = true + } if opts.PreserveETag != "" || r.Header.Get(xhttp.IfMatch) != "" || r.Header.Get(xhttp.IfNoneMatch) != "" { @@ -203,14 +215,18 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r } } - checksumType := hash.NewChecksumType(r.Header.Get(xhttp.AmzChecksumAlgo)) + checksumType := hash.NewChecksumHeader(r.Header) if checksumType.Is(hash.ChecksumInvalid) { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequestParameter), r.URL) + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } else if checksumType.IsSet() && !checksumType.Is(hash.ChecksumTrailing) { opts.WantChecksum = &hash.Checksum{Type: checksumType} } + if opts.WantChecksum != nil { + opts.WantChecksum.Type |= hash.ChecksumMultipart | hash.ChecksumIncludesMultipart + } + newMultipartUpload := objectAPI.NewMultipartUpload res, err := newMultipartUpload(ctx, bucket, object, opts) @@ -222,6 +238,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r response := generateInitiateMultipartUploadResponse(bucket, object, res.UploadID) if res.ChecksumAlgo != "" { w.Header().Set(xhttp.AmzChecksumAlgo, res.ChecksumAlgo) + if res.ChecksumType != "" { + w.Header().Set(xhttp.AmzChecksumType, res.ChecksumType) + } } encodedSuccessResponse := encodeResponse(response) @@ -507,8 +526,15 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt } copy(objectEncryptionKey[:], key) + var nonce [12]byte + tmp := sha256.Sum256(fmt.Append(nil, uploadID, partID)) + copy(nonce[:], tmp[:12]) + partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) - encReader, err := sio.EncryptReader(reader, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()}) + encReader, err := sio.EncryptReader(reader, sio.Config{ + Key: partEncryptionKey[:], + Nonce: &nonce, + }) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -660,7 +686,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http } case authTypeStreamingUnsignedTrailer: // Initialize stream signature verifier. - reader, s3Error = newUnsignedV4ChunkedReader(r, true) + reader, s3Error = newUnsignedV4ChunkedReader(r, true, r.Header.Get(xhttp.Authorization) != "") if s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return @@ -671,7 +697,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http return } case authTypePresigned, authTypeSigned: - if s3Error = reqSignatureV4Verify(r, globalSite.Region, serviceS3); s3Error != ErrNone { + if s3Error = reqSignatureV4Verify(r, globalSite.Region(), serviceS3); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return } @@ -757,9 +783,11 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http pReader := NewPutObjReader(hashReader) _, isEncrypted := crypto.IsEncrypted(mi.UserDefined) + _, replicationStatus := mi.UserDefined[xhttp.AmzBucketReplicationStatus] + _, sourceReplReq := r.Header[xhttp.MinIOSourceReplicationRequest] var objectEncryptionKey crypto.ObjectKey if isEncrypted { - if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) { + if !crypto.SSEC.IsRequested(r.Header) && crypto.SSEC.IsEncrypted(mi.UserDefined) && !replicationStatus { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrSSEMultipartEncrypted), r.URL) return } @@ -779,55 +807,66 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http } } - // Calculating object encryption key - key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return - } - copy(objectEncryptionKey[:], key) + if !sourceReplReq || !crypto.SSEC.IsEncrypted(mi.UserDefined) { + // Calculating object encryption key + key, err = decryptObjectMeta(key, bucket, object, mi.UserDefined) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + copy(objectEncryptionKey[:], key) + + partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) + in := io.Reader(hashReader) + if size > encryptBufferThreshold { + // The encryption reads in blocks of 64KB. + // We add a buffer on bigger files to reduce the number of syscalls upstream. + in = bufio.NewReaderSize(hashReader, encryptBufferSize) + } - partEncryptionKey := objectEncryptionKey.DerivePartKey(uint32(partID)) - in := io.Reader(hashReader) - if size > encryptBufferThreshold { - // The encryption reads in blocks of 64KB. - // We add a buffer on bigger files to reduce the number of syscalls upstream. - in = bufio.NewReaderSize(hashReader, encryptBufferSize) - } - reader, err = sio.EncryptReader(in, sio.Config{Key: partEncryptionKey[:], CipherSuites: fips.DARECiphers()}) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return - } - wantSize := int64(-1) - if size >= 0 { - info := ObjectInfo{Size: size} - wantSize = info.EncryptedSize() - } - // do not try to verify encrypted content - hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return - } - if err := hashReader.AddChecksum(r, true); err != nil { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) - return - } + var nonce [12]byte + tmp := sha256.Sum256(fmt.Append(nil, uploadID, partID)) + copy(nonce[:], tmp[:12]) - pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) - if err != nil { - writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) - return - } + reader, err = sio.EncryptReader(in, sio.Config{ + Key: partEncryptionKey[:], + Nonce: &nonce, + }) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + wantSize := int64(-1) + if size >= 0 { + info := ObjectInfo{Size: size} + wantSize = info.EncryptedSize() + } + // do not try to verify encrypted content + hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } + if err := hashReader.AddChecksum(r, true); err != nil { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) + return + } + + pReader, err = pReader.WithEncryption(hashReader, &objectEncryptionKey) + if err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return + } - if idxCb != nil { - idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb) + if idxCb != nil { + idxCb = compressionIndexEncrypter(objectEncryptionKey, idxCb) + } + opts.EncryptFn = metadataEncrypter(objectEncryptionKey) } - opts.EncryptFn = metadataEncrypter(objectEncryptionKey) } opts.IndexCB = idxCb + opts.ReplicationRequest = sourceReplReq putObjectPart := objectAPI.PutObjectPart partInfo, err := putObjectPart(ctx, bucket, object, uploadID, partID, pReader, opts) @@ -978,6 +1017,21 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite multipartETag := etag.Multipart(completeETags...) opts.UserDefined["etag"] = multipartETag.String() + if r.Header.Get(xhttp.IfMatch) != "" { + opts.HasIfMatch = true + } + if opts.PreserveETag != "" || + r.Header.Get(xhttp.IfMatch) != "" || + r.Header.Get(xhttp.IfNoneMatch) != "" { + opts.CheckPrecondFn = func(oi ObjectInfo) bool { + if _, err := DecryptObjectInfo(&oi, r); err != nil { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) + return true + } + return checkPreconditionsPUT(ctx, w, r, oi, opts) + } + } + objInfo, err := completeMultiPartUpload(ctx, bucket, object, uploadID, complMultipartUpload.Parts, opts) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -997,19 +1051,19 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } } - setPutObjHeaders(w, objInfo, false) + setPutObjHeaders(w, objInfo, false, r.Header) if dsc := mustReplicate(ctx, bucket, object, objInfo.getMustReplicateOptions(replication.ObjectReplicationType, opts)); dsc.ReplicateAny() { scheduleReplication(ctx, objInfo, objectAPI, dsc, replication.ObjectReplicationType) } if _, ok := r.Header[xhttp.MinIOSourceReplicationRequest]; ok { actualSize, _ := objInfo.GetActualSize() - defer globalReplicationStats.UpdateReplicaStat(bucket, actualSize) + defer globalReplicationStats.Load().UpdateReplicaStat(bucket, actualSize) } // Get object location. location := getObjectLocation(r, globalDomainNames, bucket, object) // Generate complete multipart response. - response := generateCompleteMultpartUploadResponse(bucket, object, location, objInfo) + response := generateCompleteMultipartUploadResponse(bucket, object, location, objInfo, r.Header) encodedSuccessResponse := encodeResponse(response) // Write success response. @@ -1027,22 +1081,6 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } sendEvent(evt) - asize, err := objInfo.GetActualSize() - if err != nil { - asize = objInfo.Size - } - - defer globalCacheConfig.Set(&cache.ObjectInfo{ - Key: objInfo.Name, - Bucket: objInfo.Bucket, - ETag: objInfo.ETag, - ModTime: objInfo.ModTime, - Expires: objInfo.ExpiresStr(), - CacheControl: objInfo.CacheControl, - Size: asize, - Metadata: cleanReservedKeys(objInfo.UserDefined), - }) - if objInfo.NumVersions > int(scannerExcessObjectVersions.Load()) { evt.EventName = event.ObjectManyVersions sendEvent(evt) @@ -1175,6 +1213,10 @@ func (api objectAPIHandlers) ListObjectPartsHandler(w http.ResponseWriter, r *ht listPartsInfo.Parts[i].ETag = tryDecryptETag(objectEncryptionKey, p.ETag, kind == crypto.S3) listPartsInfo.Parts[i].Size = p.ActualSize } + } else if _, ok := listPartsInfo.UserDefined[ReservedMetadataPrefix+"compression"]; ok { + for i, p := range listPartsInfo.Parts { + listPartsInfo.Parts[i].Size = p.ActualSize + } } response := generateListPartsResponse(listPartsInfo, encodingType) diff --git a/cmd/object_api_suite_test.go b/cmd/object_api_suite_test.go index d184ebec0b894..3377ad208d2f5 100644 --- a/cmd/object_api_suite_test.go +++ b/cmd/object_api_suite_test.go @@ -20,10 +20,13 @@ package cmd import ( "bytes" "context" + "fmt" "io" "math/rand" "strconv" + "strings" "testing" + "time" "github.com/dustin/go-humanize" "github.com/minio/minio/internal/kms" @@ -188,7 +191,7 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH if err != nil { t.Fatalf("%s: %s", instanceType, err) } - for i := 0; i < 10; i++ { + for i := range 10 { randomPerm := rand.Perm(100) randomString := "" for _, num := range randomPerm { @@ -228,7 +231,6 @@ func testMultipleObjectCreation(obj ObjectLayer, instanceType string, t TestErrH if objInfo.Size != int64(len(value)) { t.Errorf("%s: Size mismatch of the GetObject data.", instanceType) } - } } @@ -254,7 +256,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { uploadContent := "The specified multipart upload does not exist. The upload ID might be invalid, or the multipart upload might have been aborted or completed." var opts ObjectOptions // check before paging occurs. - for i := 0; i < 5; i++ { + for i := range 5 { key := "obj" + strconv.Itoa(i) _, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts) if err != nil { @@ -384,7 +386,6 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { // check results with Marker. { - result, err = obj.ListObjects(context.Background(), "bucket", "", "newPrefix", "", 3) if err != nil { t.Fatalf("%s: %s", instanceType, err) @@ -434,6 +435,46 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { t.Errorf("%s: Expected the object name to be `%s`, but instead found `%s`", instanceType, "newPrefix2", result.Objects[0].Name) } } + + // check paging works. + ag := []string{"a", "b", "c", "d", "e", "f", "g"} + checkObjCount := make(map[string]int) + for i := range 7 { + dirName := strings.Repeat(ag[i], 3) + key := fmt.Sprintf("testPrefix/%s/obj%s", dirName, dirName) + checkObjCount[key]++ + _, err = obj.PutObject(context.Background(), "bucket", key, mustGetPutObjReader(t, bytes.NewBufferString(uploadContent), int64(len(uploadContent)), "", ""), opts) + if err != nil { + t.Fatalf("%s: %s", instanceType, err) + } + } + { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + token := "" + for ctx.Err() == nil { + result, err := obj.ListObjectsV2(ctx, "bucket", "testPrefix", token, "", 2, false, "") + if err != nil { + t.Fatalf("%s: %s", instanceType, err) + } + token = result.NextContinuationToken + if len(result.Objects) == 0 { + break + } + for _, obj := range result.Objects { + checkObjCount[obj.Name]-- + } + if token == "" { + break + } + } + for key, value := range checkObjCount { + if value != 0 { + t.Errorf("%s: Expected value of objects to be %d, instead found to be %d", instanceType, 0, value) + } + delete(checkObjCount, key) + } + } } // Wrapper for calling testObjectOverwriteWorks for both Erasure and FS. @@ -511,21 +552,21 @@ func testBucketRecreateFails(obj ObjectLayer, instanceType string, t TestErrHand } } -func enableCompression(t *testing.T, encrypt bool) { +func enableCompression(t *testing.T, encrypt bool, mimeTypes []string, extensions []string) { // Enable compression and exec... globalCompressConfigMu.Lock() globalCompressConfig.Enabled = true - globalCompressConfig.MimeTypes = nil - globalCompressConfig.Extensions = nil + globalCompressConfig.MimeTypes = mimeTypes + globalCompressConfig.Extensions = extensions globalCompressConfig.AllowEncrypted = encrypt globalCompressConfigMu.Unlock() if encrypt { globalAutoEncryption = encrypt - var err error - GlobalKMS, err = kms.Parse("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=") + KMS, err := kms.ParseSecretKey("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=") if err != nil { t.Fatal(err) } + GlobalKMS = KMS } } @@ -536,11 +577,11 @@ func enableEncryption(t *testing.T) { globalCompressConfigMu.Unlock() globalAutoEncryption = true - var err error - GlobalKMS, err = kms.Parse("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=") + KMS, err := kms.ParseSecretKey("my-minio-key:5lF+0pJM0OWwlQrvK2S/I7W9mO4a6rJJI7wzj7v09cw=") if err != nil { t.Fatal(err) } + GlobalKMS = KMS } func resetCompressEncryption() { @@ -553,40 +594,66 @@ func resetCompressEncryption() { GlobalKMS = nil } -func execExtended(t *testing.T, fn func(t *testing.T)) { +func execExtended(t *testing.T, fn func(t *testing.T, init func(), bucketOptions MakeBucketOptions)) { // Exec with default settings... resetCompressEncryption() t.Run("default", func(t *testing.T) { - fn(t) + fn(t, nil, MakeBucketOptions{}) + }) + t.Run("default+versioned", func(t *testing.T) { + fn(t, nil, MakeBucketOptions{VersioningEnabled: true}) }) - - if testing.Short() { - return - } t.Run("compressed", func(t *testing.T) { - resetCompressEncryption() - enableCompression(t, false) - fn(t) + fn(t, func() { + resetCompressEncryption() + enableCompression(t, false, []string{"*"}, []string{"*"}) + }, MakeBucketOptions{}) + }) + t.Run("compressed+versioned", func(t *testing.T) { + fn(t, func() { + resetCompressEncryption() + enableCompression(t, false, []string{"*"}, []string{"*"}) + }, MakeBucketOptions{ + VersioningEnabled: true, + }) }) t.Run("encrypted", func(t *testing.T) { - resetCompressEncryption() - enableEncryption(t) - fn(t) + fn(t, func() { + resetCompressEncryption() + enableEncryption(t) + }, MakeBucketOptions{}) + }) + t.Run("encrypted+versioned", func(t *testing.T) { + fn(t, func() { + resetCompressEncryption() + enableEncryption(t) + }, MakeBucketOptions{ + VersioningEnabled: true, + }) }) t.Run("compressed+encrypted", func(t *testing.T) { - resetCompressEncryption() - enableCompression(t, true) - fn(t) + fn(t, func() { + resetCompressEncryption() + enableCompression(t, true, []string{"*"}, []string{"*"}) + }, MakeBucketOptions{}) + }) + t.Run("compressed+encrypted+versioned", func(t *testing.T) { + fn(t, func() { + resetCompressEncryption() + enableCompression(t, true, []string{"*"}, []string{"*"}) + }, MakeBucketOptions{ + VersioningEnabled: true, + }) }) } // ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed. // This can be used to test functionality when reading and writing data. func ExecExtendedObjectLayerTest(t *testing.T, objTest objTestType) { - execExtended(t, func(t *testing.T) { + execExtended(t, func(t *testing.T, init func(), bucketOptions MakeBucketOptions) { ExecObjectLayerTest(t, objTest) }) } diff --git a/cmd/os-instrumented.go b/cmd/os-instrumented.go index d9b92d4a49f5d..9a369f8661d11 100644 --- a/cmd/os-instrumented.go +++ b/cmd/os-instrumented.go @@ -213,7 +213,7 @@ func (o *osMetrics) report() madmin.OSMetrics { var m madmin.OSMetrics m.CollectedAt = time.Now() m.LifeTimeOps = make(map[string]uint64, osMetricLast) - for i := osMetric(0); i < osMetricLast; i++ { + for i := range osMetricLast { if n := atomic.LoadUint64(&o.operations[i]); n > 0 { m.LifeTimeOps[i.String()] = n } @@ -223,7 +223,7 @@ func (o *osMetrics) report() madmin.OSMetrics { } m.LastMinute.Operations = make(map[string]madmin.TimedAction, osMetricLast) - for i := osMetric(0); i < osMetricLast; i++ { + for i := range osMetricLast { lm := o.latency[i].total() if lm.N > 0 { m.LastMinute.Operations[i.String()] = lm.asTimedAction() diff --git a/cmd/os-readdir_test.go b/cmd/os-readdir_test.go index 9fac2b7d0de06..5649b5391012f 100644 --- a/cmd/os-readdir_test.go +++ b/cmd/os-readdir_test.go @@ -77,7 +77,7 @@ func setupTestReadDirEmpty(t *testing.T) (testResults []result) { func setupTestReadDirFiles(t *testing.T) (testResults []result) { dir := t.TempDir() entries := []string{} - for i := 0; i < 10; i++ { + for i := range 10 { name := fmt.Sprintf("file-%d", i) if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil { // For cleanup, its required to add these entries into test results. @@ -102,7 +102,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) { t.Fatalf("Unable to create prefix directory \"mydir\", %s", err) } entries := []string{"mydir/"} - for i := 0; i < 10; i++ { + for i := range 10 { name := fmt.Sprintf("file-%d", i) if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil { // For cleanup, its required to add these entries into test results. @@ -126,7 +126,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) { } dir := t.TempDir() entries := []string{} - for i := 0; i < 10; i++ { + for i := range 10 { name1 := fmt.Sprintf("file-%d", i) name2 := fmt.Sprintf("file-%d", i+10) if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil { diff --git a/cmd/os-reliable.go b/cmd/os-reliable.go index 9ab77939a49aa..3561cbd332eeb 100644 --- a/cmd/os-reliable.go +++ b/cmd/os-reliable.go @@ -108,6 +108,16 @@ func reliableMkdirAll(dirPath string, mode os.FileMode, baseDir string) (err err // Retry only for the first retryable error. if osIsNotExist(err) && i == 0 { i++ + // Determine if os.NotExist error is because of + // baseDir's parent being present, retry it once such + // that the MkdirAll is retried once for the parent + // of dirPath. + // Because it is worth a retry to skip a different + // baseDir which is slightly higher up the depth. + nbaseDir := path.Dir(baseDir) + if baseDir != "" && nbaseDir != "" && nbaseDir != SlashSeparator { + baseDir = nbaseDir + } continue } } diff --git a/cmd/os_unix.go b/cmd/os_unix.go index 55d3631976442..9728493f644c5 100644 --- a/cmd/os_unix.go +++ b/cmd/os_unix.go @@ -25,10 +25,10 @@ import ( "fmt" "os" "strings" - "sync" "syscall" "unsafe" + "github.com/minio/minio/internal/bpool" "golang.org/x/sys/unix" ) @@ -106,15 +106,15 @@ const blockSize = 8 << 10 // 8192 // By default at least 128 entries in single getdents call (1MiB buffer) var ( - direntPool = sync.Pool{ - New: func() interface{} { + direntPool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, blockSize*128) return &buf }, } - direntNamePool = sync.Pool{ - New: func() interface{} { + direntNamePool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, blockSize) return &buf }, @@ -136,7 +136,7 @@ func parseDirEnt(buf []byte) (consumed int, name []byte, typ os.FileMode, err er } consumed = int(dirent.Reclen) if direntInode(dirent) == 0 { // File absent in directory. - return + return consumed, name, typ, err } switch dirent.Type { case syscall.DT_REG: @@ -183,11 +183,10 @@ func readDirFn(dirPath string, fn func(name string, typ os.FileMode) error) erro } return osErrToFileErr(err) } - } defer syscall.Close(fd) - bufp := direntPool.Get().(*[]byte) + bufp := direntPool.Get() defer direntPool.Put(bufp) buf := *bufp @@ -273,11 +272,11 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er } defer syscall.Close(fd) - bufp := direntPool.Get().(*[]byte) + bufp := direntPool.Get() defer direntPool.Put(bufp) buf := *bufp - nameTmp := direntNamePool.Get().(*[]byte) + nameTmp := direntNamePool.Get() defer direntNamePool.Put(nameTmp) tmp := *nameTmp @@ -350,7 +349,7 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er entries = append(entries, nameStr) } - return + return entries, err } func globalSync() { diff --git a/cmd/os_windows.go b/cmd/os_windows.go index 758f3856aa1fb..bf03f2b906dff 100644 --- a/cmd/os_windows.go +++ b/cmd/os_windows.go @@ -60,20 +60,19 @@ func readDirFn(dirPath string, filter func(name string, typ os.FileMode) error) if err != nil { if err == syscall.ERROR_NO_MORE_FILES { break - } else { - if isSysErrPathNotFound(err) { - return nil - } - err = osErrToFileErr(&os.PathError{ - Op: "FindNextFile", - Path: dirPath, - Err: err, - }) - if err == errFileNotFound { - return nil - } - return err } + if isSysErrPathNotFound(err) { + return nil + } + err = osErrToFileErr(&os.PathError{ + Op: "FindNextFile", + Path: dirPath, + Err: err, + }) + if err == errFileNotFound { + return nil + } + return err } name := syscall.UTF16ToString(data.FileName[0:]) if name == "" || name == "." || name == ".." { // Useless names @@ -136,13 +135,12 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er if err != nil { if err == syscall.ERROR_NO_MORE_FILES { break - } else { - return nil, osErrToFileErr(&os.PathError{ - Op: "FindNextFile", - Path: dirPath, - Err: err, - }) } + return nil, osErrToFileErr(&os.PathError{ + Op: "FindNextFile", + Path: dirPath, + Err: err, + }) } name := syscall.UTF16ToString(data.FileName[0:]) @@ -175,7 +173,6 @@ func readDirWithOpts(dirPath string, opts readDirOpts) (entries []string, err er count-- entries = append(entries, name) - } return entries, nil diff --git a/cmd/peer-rest-client.go b/cmd/peer-rest-client.go index 16c22935ad8ac..eb65a40a02b2b 100644 --- a/cmd/peer-rest-client.go +++ b/cmd/peer-rest-client.go @@ -36,7 +36,7 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/rest" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // client to talk to peer Nodes. @@ -83,7 +83,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient { // Lazy initialization of grid connection. // When we create this peer client, the grid connection is likely not yet initialized. if gridHost == "" { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", peer.String()), peer.String()+":gridHost") return nil } gc := gridConn.Load() @@ -96,7 +96,7 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient { } gc = gm.Connection(gridHost) if gc == nil { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost %q not found for peer %s", gridHost, peer.String()), peer.String()+":gridHost") return nil } gridConn.Store(gc) @@ -105,18 +105,11 @@ func newPeerRESTClient(peer *xnet.Host, gridHost string) *peerRESTClient { } } -// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected -// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() -// after verifying format.json -func (client *peerRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { - return client.callWithContext(GlobalContext, method, values, body, length) -} - // Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected // permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() // after verifying format.json func (client *peerRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) { - if client == nil || !client.IsOnline() { + if client == nil { return nil, errPeerNotReachable } @@ -129,6 +122,10 @@ func (client *peerRESTClient) callWithContext(ctx context.Context, method string return respBody, nil } + if xnet.IsNetworkOrHostDown(err, true) { + return nil, errPeerNotReachable + } + return nil, err } @@ -139,7 +136,11 @@ func (client *peerRESTClient) String() string { // IsOnline returns true if the peer client is online. func (client *peerRESTClient) IsOnline() bool { - return client.restClient.IsOnline() + conn := client.gridConn() + if conn == nil { + return false + } + return client.restClient.IsOnline() || conn.State() == grid.StateConnected } // Close - marks the client as closed. @@ -149,8 +150,8 @@ func (client *peerRESTClient) Close() error { } // GetLocks - fetch older locks for a remote node. -func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) { - resp, err := getLocksRPC.Call(context.Background(), client.gridConn(), grid.NewMSS()) +func (client *peerRESTClient) GetLocks(ctx context.Context) (lockMap map[string][]lockRequesterInfo, err error) { + resp, err := getLocksRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil || resp == nil { return nil, err } @@ -158,16 +159,16 @@ func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo } // LocalStorageInfo - fetch server information for a remote node. -func (client *peerRESTClient) LocalStorageInfo(metrics bool) (info StorageInfo, err error) { - resp, err := localStorageInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LocalStorageInfo(ctx context.Context, metrics bool) (info StorageInfo, err error) { + resp, err := localStorageInfoRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTMetrics: strconv.FormatBool(metrics), })) return resp.ValueOrZero(), err } // ServerInfo - fetch server information for a remote node. -func (client *peerRESTClient) ServerInfo(metrics bool) (info madmin.ServerProperties, err error) { - resp, err := serverInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{peerRESTMetrics: strconv.FormatBool(metrics)})) +func (client *peerRESTClient) ServerInfo(ctx context.Context, metrics bool) (info madmin.ServerProperties, err error) { + resp, err := serverInfoRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{peerRESTMetrics: strconv.FormatBool(metrics)})) return resp.ValueOrZero(), err } @@ -249,10 +250,10 @@ func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.Proc } // StartProfiling - Issues profiling command on the peer node. -func (client *peerRESTClient) StartProfiling(profiler string) error { +func (client *peerRESTClient) StartProfiling(ctx context.Context, profiler string) error { values := make(url.Values) values.Set(peerRESTProfiler, profiler) - respBody, err := client.call(peerRESTMethodStartProfiling, values, nil, -1) + respBody, err := client.callWithContext(ctx, peerRESTMethodStartProfiling, values, nil, -1) if err != nil { return err } @@ -261,10 +262,10 @@ func (client *peerRESTClient) StartProfiling(profiler string) error { } // DownloadProfileData - download profiled data from a remote node. -func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err error) { - respBody, err := client.call(peerRESTMethodDownloadProfilingData, nil, nil, -1) +func (client *peerRESTClient) DownloadProfileData(ctx context.Context) (data map[string][]byte, err error) { + respBody, err := client.callWithContext(ctx, peerRESTMethodDownloadProfilingData, nil, nil, -1) if err != nil { - return + return data, err } defer xhttp.DrainBody(respBody) err = gob.NewDecoder(respBody).Decode(&data) @@ -272,8 +273,8 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err } // GetBucketStats - load bucket statistics -func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) { - resp, err := getBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) GetBucketStats(ctx context.Context, bucket string) (BucketStats, error) { + resp, err := getBucketStatsRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTBucket: bucket, })) if err != nil || resp == nil { @@ -283,8 +284,8 @@ func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) } // GetSRMetrics loads site replication metrics, optionally for a specific bucket -func (client *peerRESTClient) GetSRMetrics() (SRMetricsSummary, error) { - resp, err := getSRMetricsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS()) +func (client *peerRESTClient) GetSRMetrics(ctx context.Context) (SRMetricsSummary, error) { + resp, err := getSRMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil || resp == nil { return SRMetricsSummary{}, err } @@ -292,8 +293,8 @@ func (client *peerRESTClient) GetSRMetrics() (SRMetricsSummary, error) { } // GetAllBucketStats - load replication stats for all buckets -func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) { - resp, err := getAllBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS()) +func (client *peerRESTClient) GetAllBucketStats(ctx context.Context) (BucketStatsMap, error) { + resp, err := getAllBucketStatsRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil || resp == nil { return BucketStatsMap{}, err } @@ -301,40 +302,40 @@ func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) { } // LoadBucketMetadata - load bucket metadata -func (client *peerRESTClient) LoadBucketMetadata(bucket string) error { - _, err := loadBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadBucketMetadata(ctx context.Context, bucket string) error { + _, err := loadBucketMetadataRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTBucket: bucket, })) return err } // DeleteBucketMetadata - Delete bucket metadata -func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error { - _, err := deleteBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) DeleteBucketMetadata(ctx context.Context, bucket string) error { + _, err := deleteBucketMetadataRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTBucket: bucket, })) return err } // DeletePolicy - delete a specific canned policy. -func (client *peerRESTClient) DeletePolicy(policyName string) (err error) { - _, err = deletePolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) DeletePolicy(ctx context.Context, policyName string) (err error) { + _, err = deletePolicyRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTPolicy: policyName, })) return err } // LoadPolicy - reload a specific canned policy. -func (client *peerRESTClient) LoadPolicy(policyName string) (err error) { - _, err = loadPolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadPolicy(ctx context.Context, policyName string) (err error) { + _, err = loadPolicyRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTPolicy: policyName, })) return err } // LoadPolicyMapping - reload a specific policy mapping -func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAMUserType, isGroup bool) error { - _, err := loadPolicyMappingRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadPolicyMapping(ctx context.Context, userOrGroup string, userType IAMUserType, isGroup bool) error { + _, err := loadPolicyMappingRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTUserOrGroup: userOrGroup, peerRESTUserType: strconv.Itoa(int(userType)), peerRESTIsGroup: strconv.FormatBool(isGroup), @@ -343,24 +344,24 @@ func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAM } // DeleteUser - delete a specific user. -func (client *peerRESTClient) DeleteUser(accessKey string) (err error) { - _, err = deleteUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) DeleteUser(ctx context.Context, accessKey string) (err error) { + _, err = deleteUserRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTUser: accessKey, })) return err } // DeleteServiceAccount - delete a specific service account. -func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) { - _, err = deleteSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) DeleteServiceAccount(ctx context.Context, accessKey string) (err error) { + _, err = deleteSvcActRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTUser: accessKey, })) return err } // LoadUser - reload a specific user. -func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) { - _, err = loadUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadUser(ctx context.Context, accessKey string, temp bool) (err error) { + _, err = loadUserRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTUser: accessKey, peerRESTUserTemp: strconv.FormatBool(temp), })) @@ -368,16 +369,16 @@ func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) } // LoadServiceAccount - reload a specific service account. -func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) { - _, err = loadSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadServiceAccount(ctx context.Context, accessKey string) (err error) { + _, err = loadSvcActRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTUser: accessKey, })) return err } // LoadGroup - send load group command to peers. -func (client *peerRESTClient) LoadGroup(group string) error { - _, err := loadGroupRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{ +func (client *peerRESTClient) LoadGroup(ctx context.Context, group string) error { + _, err := loadGroupRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{ peerRESTGroup: group, })) return err @@ -419,17 +420,20 @@ func (client *peerRESTClient) CommitBinary(ctx context.Context) error { } // SignalService - sends signal to peer nodes. -func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string, dryRun bool) error { +func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string, dryRun bool, execAt *time.Time) error { values := grid.NewMSS() values.Set(peerRESTSignal, strconv.Itoa(int(sig))) values.Set(peerRESTDryRun, strconv.FormatBool(dryRun)) values.Set(peerRESTSubSys, subSys) + if execAt != nil { + values.Set(peerRESTExecAt, execAt.Format(time.RFC3339Nano)) + } _, err := signalServiceRPC.Call(context.Background(), client.gridConn(), values) return err } -func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) { - resp, err := getBackgroundHealStatusRPC.Call(context.Background(), client.gridConn(), grid.NewMSS()) +func (client *peerRESTClient) BackgroundHealStatus(ctx context.Context) (madmin.BgHealState, error) { + resp, err := getBackgroundHealStatusRPC.Call(ctx, client.gridConn(), grid.NewMSS()) return resp.ValueOrZero(), err } @@ -463,6 +467,17 @@ func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error { return err } +func (client *peerRESTClient) DeleteUploadID(ctx context.Context, uploadID string) error { + conn := client.gridConn() + if conn == nil { + return nil + } + _, err := cleanupUploadIDCacheMetaRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{ + peerRESTUploadID: uploadID, + })) + return err +} + func (client *peerRESTClient) StopRebalance(ctx context.Context) error { conn := client.gridConn() if conn == nil { @@ -500,7 +515,7 @@ func (client *peerRESTClient) doTrace(ctx context.Context, traceCh chan<- []byte payload, err := json.Marshal(traceOpts) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) return } @@ -628,7 +643,7 @@ func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTC remote = append(remote, all[i]) } if len(all) != len(remote)+1 { - logger.LogIf(context.Background(), fmt.Errorf("WARNING: Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote))) + peersLogIf(context.Background(), fmt.Errorf("Expected number of all hosts (%v) to be remote +1 (%v)", len(all), len(remote)), logger.WarningKind) } return remote, all } @@ -641,13 +656,13 @@ func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []st return getBandwidthRPC.Call(ctx, client.gridConn(), values) } -func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan Metric, error) { +func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan MetricV2, error) { resp, err := getResourceMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil { return nil, err } - ch := make(chan Metric) - go func(ch chan<- Metric) { + ch := make(chan MetricV2) + go func(ch chan<- MetricV2) { defer close(ch) for _, m := range resp.Value() { if m == nil { @@ -663,12 +678,12 @@ func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan Me return ch, nil } -func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric, error) { +func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan MetricV2, error) { resp, err := getPeerMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil { return nil, err } - ch := make(chan Metric) + ch := make(chan MetricV2) go func() { defer close(ch) for _, m := range resp.Value() { @@ -685,12 +700,12 @@ func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric return ch, nil } -func (client *peerRESTClient) GetPeerBucketMetrics(ctx context.Context) (<-chan Metric, error) { +func (client *peerRESTClient) GetPeerBucketMetrics(ctx context.Context) (<-chan MetricV2, error) { resp, err := getPeerBucketMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS()) if err != nil { return nil, err } - ch := make(chan Metric) + ch := make(chan MetricV2) go func() { defer close(ch) for _, m := range resp.Value() { @@ -715,6 +730,8 @@ func (client *peerRESTClient) SpeedTest(ctx context.Context, opts speedTestOpts) values.Set(peerRESTStorageClass, opts.storageClass) values.Set(peerRESTBucket, opts.bucketName) values.Set(peerRESTEnableSha256, strconv.FormatBool(opts.enableSha256)) + values.Set(peerRESTEnableMultipart, strconv.FormatBool(opts.enableMultipart)) + values.Set(peerRESTAccessKey, opts.creds.AccessKey) respBody, err := client.callWithContext(context.Background(), peerRESTMethodSpeedTest, values, nil, -1) if err != nil { diff --git a/cmd/peer-rest-common.go b/cmd/peer-rest-common.go index 26914adcd8d6a..b643d68a16c1d 100644 --- a/cmd/peer-rest-common.go +++ b/cmd/peer-rest-common.go @@ -17,8 +17,10 @@ package cmd +import "time" + const ( - peerRESTVersion = "v38" // Convert RPC calls + peerRESTVersion = "v39" // add more flags to speedtest API peerRESTVersionPrefix = SlashSeparator + peerRESTVersion peerRESTPrefix = minioReservedBucketPath + "/peer" peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix @@ -38,35 +40,39 @@ const ( ) const ( - peerRESTBucket = "bucket" - peerRESTBuckets = "buckets" - peerRESTUser = "user" - peerRESTGroup = "group" - peerRESTUserTemp = "user-temp" - peerRESTPolicy = "policy" - peerRESTUserOrGroup = "user-or-group" - peerRESTUserType = "user-type" - peerRESTIsGroup = "is-group" - peerRESTSignal = "signal" - peerRESTSubSys = "sub-sys" - peerRESTProfiler = "profiler" - peerRESTSize = "size" - peerRESTConcurrent = "concurrent" - peerRESTDuration = "duration" - peerRESTStorageClass = "storage-class" - peerRESTEnableSha256 = "enableSha256" - peerRESTMetricsTypes = "types" - peerRESTDisk = "disk" - peerRESTHost = "host" - peerRESTJobID = "job-id" - peerRESTDepID = "depID" - peerRESTStartRebalance = "start-rebalance" - peerRESTMetrics = "metrics" - peerRESTDryRun = "dry-run" + peerRESTBucket = "bucket" + peerRESTBuckets = "buckets" + peerRESTUser = "user" + peerRESTGroup = "group" + peerRESTUserTemp = "user-temp" + peerRESTPolicy = "policy" + peerRESTUserOrGroup = "user-or-group" + peerRESTUserType = "user-type" + peerRESTIsGroup = "is-group" + peerRESTSignal = "signal" + peerRESTSubSys = "sub-sys" + peerRESTProfiler = "profiler" + peerRESTSize = "size" + peerRESTConcurrent = "concurrent" + peerRESTDuration = "duration" + peerRESTStorageClass = "storage-class" + peerRESTEnableSha256 = "enableSha256" + peerRESTEnableMultipart = "enableMultipart" + peerRESTAccessKey = "access-key" + peerRESTMetricsTypes = "types" + peerRESTDisk = "disk" + peerRESTHost = "host" + peerRESTJobID = "job-id" + peerRESTDepID = "depID" + peerRESTStartRebalance = "start-rebalance" + peerRESTMetrics = "metrics" + peerRESTDryRun = "dry-run" + peerRESTUploadID = "up-id" peerRESTURL = "url" peerRESTSha256Sum = "sha256sum" peerRESTReleaseInfo = "releaseinfo" + peerRESTExecAt = "exec-at" peerRESTListenBucket = "bucket" peerRESTListenPrefix = "prefix" @@ -74,3 +80,5 @@ const ( peerRESTListenEvents = "events" peerRESTLogMask = "log-mask" ) + +const restartUpdateDelay = 250 * time.Millisecond diff --git a/cmd/peer-rest-server.go b/cmd/peer-rest-server.go index 98a3fe6833cd3..9335b7613e548 100644 --- a/cmd/peer-rest-server.go +++ b/cmd/peer-rest-server.go @@ -37,15 +37,14 @@ import ( "github.com/dustin/go-humanize" "github.com/klauspost/compress/zstd" "github.com/minio/madmin-go/v3" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/bucket/bandwidth" - b "github.com/minio/minio/internal/bucket/bandwidth" "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" "github.com/minio/mux" - "github.com/minio/pkg/v2/logger/message/log" ) // To abstract a node over network. @@ -54,8 +53,9 @@ type peerRESTServer struct{} var ( // Types & Wrappers aoBucketInfo = grid.NewArrayOf[*BucketInfo](func() *BucketInfo { return &BucketInfo{} }) - aoMetricsGroup = grid.NewArrayOf[*Metric](func() *Metric { return &Metric{} }) + aoMetricsGroup = grid.NewArrayOf[*MetricV2](func() *MetricV2 { return &MetricV2{} }) madminBgHealState = grid.NewJSONPool[madmin.BgHealState]() + madminHealResultItem = grid.NewJSONPool[madmin.HealResultItem]() madminCPUs = grid.NewJSONPool[madmin.CPUs]() madminMemInfo = grid.NewJSONPool[madmin.MemInfo]() madminNetInfo = grid.NewJSONPool[madmin.NetInfo]() @@ -88,16 +88,16 @@ var ( getNetInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.NetInfo]](grid.HandlerGetNetInfo, grid.NewMSS, madminNetInfo.NewJSON) getOSInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.OSInfo]](grid.HandlerGetOSInfo, grid.NewMSS, madminOSInfo.NewJSON) getPartitionsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.Partitions]](grid.HandlerGetPartitions, grid.NewMSS, madminPartitions.NewJSON) - getPeerBucketMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerBucketMetrics, grid.NewMSS, aoMetricsGroup.New) - getPeerMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerMetrics, grid.NewMSS, aoMetricsGroup.New) - getResourceMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetResourceMetrics, grid.NewMSS, aoMetricsGroup.New) + getPeerBucketMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*MetricV2]](grid.HandlerGetPeerBucketMetrics, grid.NewMSS, aoMetricsGroup.New) + getPeerMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*MetricV2]](grid.HandlerGetPeerMetrics, grid.NewMSS, aoMetricsGroup.New) + getResourceMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*MetricV2]](grid.HandlerGetResourceMetrics, grid.NewMSS, aoMetricsGroup.New) getProcInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.ProcInfo]](grid.HandlerGetProcInfo, grid.NewMSS, madminProcInfo.NewJSON) getSRMetricsRPC = grid.NewSingleHandler[*grid.MSS, *SRMetricsSummary](grid.HandlerGetSRMetrics, grid.NewMSS, func() *SRMetricsSummary { return &SRMetricsSummary{} }) getSysConfigRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysConfig]](grid.HandlerGetSysConfig, grid.NewMSS, madminSysConfig.NewJSON) getSysErrorsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysErrors]](grid.HandlerGetSysErrors, grid.NewMSS, madminSysErrors.NewJSON) getSysServicesRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysServices]](grid.HandlerGetSysServices, grid.NewMSS, madminSysServices.NewJSON) headBucketRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} }) - healBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload) + healBucketRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.HealResultItem]](grid.HandlerHealBucket, grid.NewMSS, madminHealResultItem.NewJSON) listBucketsRPC = grid.NewSingleHandler[*BucketOptions, *grid.Array[*BucketInfo]](grid.HandlerListBuckets, func() *BucketOptions { return &BucketOptions{} }, aoBucketInfo.New) loadBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn() loadGroupRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload) @@ -115,6 +115,7 @@ var ( signalServiceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerSignalService, grid.NewMSS, grid.NewNoPayload) stopRebalanceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerStopRebalance, grid.NewMSS, grid.NewNoPayload) updateMetacacheListingRPC = grid.NewSingleHandler[*metacache, *metacache](grid.HandlerUpdateMetacacheListing, func() *metacache { return &metacache{} }, func() *metacache { return &metacache{} }) + cleanupUploadIDCacheMetaRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerClearUploadID, grid.NewMSS, grid.NewNoPayload) // STREAMS // Set an output capacity of 100 for consoleLog and listenRPC @@ -145,7 +146,7 @@ func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload, return np, grid.NewRemoteErr(err) } - return + return np, nerr } // LoadPolicyHandler - reloads a policy on the server. @@ -164,7 +165,7 @@ func (s *peerRESTServer) LoadPolicyHandler(mss *grid.MSS) (np grid.NoPayload, ne return np, grid.NewRemoteErr(err) } - return + return np, nerr } // LoadPolicyMappingHandler - reloads a policy mapping on the server. @@ -188,7 +189,7 @@ func (s *peerRESTServer) LoadPolicyMappingHandler(mss *grid.MSS) (np grid.NoPayl return np, grid.NewRemoteErr(err) } - return + return np, nerr } // DeleteServiceAccountHandler - deletes a service account on the server. @@ -207,7 +208,7 @@ func (s *peerRESTServer) DeleteServiceAccountHandler(mss *grid.MSS) (np grid.NoP return np, grid.NewRemoteErr(err) } - return + return np, nerr } // LoadServiceAccountHandler - reloads a service account on the server. @@ -226,7 +227,7 @@ func (s *peerRESTServer) LoadServiceAccountHandler(mss *grid.MSS) (np grid.NoPay return np, grid.NewRemoteErr(err) } - return + return np, nerr } // DeleteUserHandler - deletes a user on the server. @@ -245,7 +246,7 @@ func (s *peerRESTServer) DeleteUserHandler(mss *grid.MSS) (np grid.NoPayload, ne return np, grid.NewRemoteErr(err) } - return + return np, nerr } // LoadUserHandler - reloads a user on the server. @@ -274,7 +275,7 @@ func (s *peerRESTServer) LoadUserHandler(mss *grid.MSS) (np grid.NoPayload, nerr return np, grid.NewRemoteErr(err) } - return + return np, nerr } // LoadGroupHandler - reloads group along with members list. @@ -294,7 +295,7 @@ func (s *peerRESTServer) LoadGroupHandler(mss *grid.MSS) (np grid.NoPayload, ner return np, grid.NewRemoteErr(err) } - return + return np, nerr } // StartProfilingHandler - Issues the start profiling command. @@ -349,7 +350,7 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r * s.writeErrorResponse(w, err) return } - logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData)) + peersLogIf(ctx, gob.NewEncoder(w).Encode(profileData)) } func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) { @@ -368,7 +369,7 @@ func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madm // ServerInfoHandler - returns Server Info func (s *peerRESTServer) ServerInfoHandler(params *grid.MSS) (*grid.JSON[madmin.ServerProperties], *grid.RemoteErr) { - r := http.Request{Host: globalMinioHost} + r := http.Request{Host: globalLocalNodeName} metrics, err := strconv.ParseBool(params.Get(peerRESTMetrics)) if err != nil { return nil, grid.NewRemoteErr(err) @@ -379,37 +380,37 @@ func (s *peerRESTServer) ServerInfoHandler(params *grid.MSS) (*grid.JSON[madmin. // GetCPUsHandler - returns CPU info. func (s *peerRESTServer) GetCPUsHandler(_ *grid.MSS) (*grid.JSON[madmin.CPUs], *grid.RemoteErr) { - info := madmin.GetCPUs(context.Background(), globalMinioHost) + info := madmin.GetCPUs(context.Background(), globalLocalNodeName) return madminCPUs.NewJSONWith(&info), nil } // GetNetInfoHandler - returns network information. func (s *peerRESTServer) GetNetInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.NetInfo], *grid.RemoteErr) { - info := madmin.GetNetInfo(globalMinioHost, globalInternodeInterface) + info := madmin.GetNetInfo(globalLocalNodeName, globalInternodeInterface) return madminNetInfo.NewJSONWith(&info), nil } // GetPartitionsHandler - returns disk partition information. func (s *peerRESTServer) GetPartitionsHandler(_ *grid.MSS) (*grid.JSON[madmin.Partitions], *grid.RemoteErr) { - info := madmin.GetPartitions(context.Background(), globalMinioHost) + info := madmin.GetPartitions(context.Background(), globalLocalNodeName) return madminPartitions.NewJSONWith(&info), nil } // GetOSInfoHandler - returns operating system's information. func (s *peerRESTServer) GetOSInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.OSInfo], *grid.RemoteErr) { - info := madmin.GetOSInfo(context.Background(), globalMinioHost) + info := madmin.GetOSInfo(context.Background(), globalLocalNodeName) return madminOSInfo.NewJSONWith(&info), nil } // GetProcInfoHandler - returns this MinIO process information. func (s *peerRESTServer) GetProcInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.ProcInfo], *grid.RemoteErr) { - info := madmin.GetProcInfo(context.Background(), globalMinioHost) + info := madmin.GetProcInfo(context.Background(), globalLocalNodeName) return madminProcInfo.NewJSONWith(&info), nil } // GetMemInfoHandler - returns memory information. func (s *peerRESTServer) GetMemInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.MemInfo], *grid.RemoteErr) { - info := madmin.GetMemInfo(context.Background(), globalMinioHost) + info := madmin.GetMemInfo(context.Background(), globalLocalNodeName) return madminMemInfo.NewJSONWith(&info), nil } @@ -445,20 +446,20 @@ func (s *peerRESTServer) GetMetricsHandler(v *grid.URLValues) (*grid.JSON[madmin // GetSysConfigHandler - returns system config information. // (only the config that are of concern to minio) func (s *peerRESTServer) GetSysConfigHandler(_ *grid.MSS) (*grid.JSON[madmin.SysConfig], *grid.RemoteErr) { - info := madmin.GetSysConfig(context.Background(), globalMinioHost) + info := madmin.GetSysConfig(context.Background(), globalLocalNodeName) return madminSysConfig.NewJSONWith(&info), nil } // GetSysServicesHandler - returns system services information. // (only the services that are of concern to minio) func (s *peerRESTServer) GetSysServicesHandler(_ *grid.MSS) (*grid.JSON[madmin.SysServices], *grid.RemoteErr) { - info := madmin.GetSysServices(context.Background(), globalMinioHost) + info := madmin.GetSysServices(context.Background(), globalLocalNodeName) return madminSysServices.NewJSONWith(&info), nil } // GetSysErrorsHandler - returns system level errors func (s *peerRESTServer) GetSysErrorsHandler(_ *grid.MSS) (*grid.JSON[madmin.SysErrors], *grid.RemoteErr) { - info := madmin.GetSysErrors(context.Background(), globalMinioHost) + info := madmin.GetSysErrors(context.Background(), globalLocalNodeName) return madminSysErrors.NewJSONWith(&info), nil } @@ -469,7 +470,7 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP return np, grid.NewRemoteErr(errors.New("Bucket name is missing")) } - globalReplicationStats.Delete(bucketName) + globalReplicationStats.Load().Delete(bucketName) globalBucketMetadataSys.Remove(bucketName) globalBucketTargetSys.Delete(bucketName) globalEventNotifier.RemoveNotification(bucketName) @@ -478,17 +479,17 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP if localMetacacheMgr != nil { localMetacacheMgr.deleteBucketCache(bucketName) } - return + return np, nerr } // GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer. func (s *peerRESTServer) GetAllBucketStatsHandler(mss *grid.MSS) (*BucketStatsMap, *grid.RemoteErr) { - replicationStats := globalReplicationStats.GetAll() + replicationStats := globalReplicationStats.Load().GetAll() bucketStatsMap := make(map[string]BucketStats, len(replicationStats)) for k, v := range replicationStats { bucketStatsMap[k] = BucketStats{ ReplicationStats: v, - ProxyStats: globalReplicationStats.getProxyStats(k), + ProxyStats: globalReplicationStats.Load().getProxyStats(k), } } return &BucketStatsMap{Stats: bucketStatsMap, Timestamp: time.Now()}, nil @@ -501,11 +502,14 @@ func (s *peerRESTServer) GetBucketStatsHandler(vars *grid.MSS) (*BucketStats, *g if bucketName == "" { return nil, grid.NewRemoteErrString("Bucket name is missing") } - + st := globalReplicationStats.Load() + if st == nil { + return &BucketStats{}, nil + } bs := BucketStats{ - ReplicationStats: globalReplicationStats.Get(bucketName), - QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{globalReplicationStats.getNodeQueueStats(bucketName)}}, - ProxyStats: globalReplicationStats.getProxyStats(bucketName), + ReplicationStats: st.Get(bucketName), + QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{st.getNodeQueueStats(bucketName)}}, + ProxyStats: st.getProxyStats(bucketName), } return &bs, nil } @@ -516,9 +520,11 @@ func (s *peerRESTServer) GetSRMetricsHandler(mss *grid.MSS) (*SRMetricsSummary, if objAPI == nil { return nil, grid.NewRemoteErr(errServerNotInitialized) } - - sm := globalReplicationStats.getSRMetricsForNode() - return &sm, nil + if st := globalReplicationStats.Load(); st != nil { + sm := st.getSRMetricsForNode() + return &sm, nil + } + return &SRMetricsSummary{}, nil } // LoadBucketMetadataHandler - reloads in memory bucket metadata @@ -548,7 +554,7 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPay globalBucketTargetSys.UpdateAllTargets(bucketName, meta.bucketTargetConfig) } - return + return np, nerr } func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) { @@ -630,7 +636,7 @@ func (s *peerRESTServer) VerifyBinaryHandler(w http.ResponseWriter, r *http.Requ } if lrTime.Sub(currentReleaseTime) <= 0 { - s.writeErrorResponse(w, fmt.Errorf("server is already running the latest version: %s", Version)) + s.writeErrorResponse(w, fmt.Errorf("server is running the latest version: %s", Version)) return } @@ -664,7 +670,7 @@ var errUnsupportedSignal = fmt.Errorf("unsupported signal") func waitingDrivesNode() map[string]madmin.DiskMetrics { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() errs := make([]error, len(localDrives)) @@ -693,6 +699,18 @@ func (s *peerRESTServer) SignalServiceHandler(vars *grid.MSS) (np grid.NoPayload if err != nil { return np, grid.NewRemoteErr(err) } + + // Wait until the specified time before executing the signal. + if t := vars.Get(peerRESTExecAt); t != "" { + execAt, err := time.Parse(time.RFC3339Nano, vars.Get(peerRESTExecAt)) + if err != nil { + logger.LogIf(GlobalContext, "signalservice", err) + execAt = time.Now().Add(restartUpdateDelay) + } + if d := time.Until(execAt); d > 0 { + time.Sleep(d) + } + } signal := serviceSignal(si) switch signal { case serviceRestart, serviceStop: @@ -815,7 +833,7 @@ func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, o buf.Reset() tmpEvt.Records[0] = ev if err := enc.Encode(tmpEvt); err != nil { - logger.LogOnceIf(ctx, err, "event: Encode failed") + peersLogOnceIf(ctx, err, "event: Encode failed") continue } out <- grid.NewBytesWithCopyOf(buf.Bytes()) @@ -866,8 +884,8 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g return np, grid.NewRemoteErr(errServerNotInitialized) } - logger.LogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) - return + peersLogIf(context.Background(), globalSiteReplicationSys.Init(context.Background(), objAPI)) + return np, nerr } func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { @@ -878,14 +896,34 @@ func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload pools, ok := objAPI.(*erasureServerPools) if !ok { - return + return np, nerr } if err := pools.ReloadPoolMeta(context.Background()); err != nil { return np, grid.NewRemoteErr(err) } - return + return np, nerr +} + +func (s *peerRESTServer) HandlerClearUploadID(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { + objAPI := newObjectLayerFn() + if objAPI == nil { + return np, grid.NewRemoteErr(errServerNotInitialized) + } + + pools, ok := objAPI.(*erasureServerPools) + if !ok { + return np, nerr + } + + // No need to return errors, this is not a highly strict operation. + uploadID := mss.Get(peerRESTUploadID) + if uploadID != "" { + pools.ClearUploadID(uploadID) + } + + return np, nerr } func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { @@ -900,7 +938,7 @@ func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, } pools.StopRebalance() - return + return np, nerr } func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { @@ -927,7 +965,7 @@ func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayl go pools.StartRebalance() } - return + return np, nerr } func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { @@ -939,11 +977,11 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid go func() { err := globalTierConfigMgr.Reload(context.Background(), newObjectLayerFn()) if err != nil { - logger.LogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err)) + peersLogIf(context.Background(), fmt.Errorf("Failed to reload remote tier config %s", err)) } }() - return + return np, nerr } // ConsoleLogHandler sends console logs of this node back to peer rest client @@ -996,13 +1034,13 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { // GetBandwidth gets the bandwidth for the buckets requested. func (s *peerRESTServer) GetBandwidth(params *grid.URLValues) (*bandwidth.BucketBandwidthReport, *grid.RemoteErr) { buckets := params.Values().Get("buckets") - selectBuckets := b.SelectBuckets(buckets) + selectBuckets := bandwidth.SelectBuckets(buckets) return globalBucketMonitor.GetReport(selectBuckets), nil } -func (s *peerRESTServer) GetResourceMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) { - res := make([]*Metric, 0, len(resourceMetricsGroups)) - populateAndPublish(resourceMetricsGroups, func(m Metric) bool { +func (s *peerRESTServer) GetResourceMetrics(_ *grid.MSS) (*grid.Array[*MetricV2], *grid.RemoteErr) { + res := make([]*MetricV2, 0, len(resourceMetricsGroups)) + populateAndPublish(resourceMetricsGroups, func(m MetricV2) bool { if m.VariableLabels == nil { m.VariableLabels = make(map[string]string, 1) } @@ -1014,9 +1052,9 @@ func (s *peerRESTServer) GetResourceMetrics(_ *grid.MSS) (*grid.Array[*Metric], } // GetPeerMetrics gets the metrics to be federated across peers. -func (s *peerRESTServer) GetPeerMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) { - res := make([]*Metric, 0, len(peerMetricsGroups)) - populateAndPublish(peerMetricsGroups, func(m Metric) bool { +func (s *peerRESTServer) GetPeerMetrics(_ *grid.MSS) (*grid.Array[*MetricV2], *grid.RemoteErr) { + res := make([]*MetricV2, 0, len(peerMetricsGroups)) + populateAndPublish(peerMetricsGroups, func(m MetricV2) bool { if m.VariableLabels == nil { m.VariableLabels = make(map[string]string, 1) } @@ -1028,9 +1066,9 @@ func (s *peerRESTServer) GetPeerMetrics(_ *grid.MSS) (*grid.Array[*Metric], *gri } // GetPeerBucketMetrics gets the metrics to be federated across peers. -func (s *peerRESTServer) GetPeerBucketMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) { - res := make([]*Metric, 0, len(bucketPeerMetricsGroups)) - populateAndPublish(bucketPeerMetricsGroups, func(m Metric) bool { +func (s *peerRESTServer) GetPeerBucketMetrics(_ *grid.MSS) (*grid.Array[*MetricV2], *grid.RemoteErr) { + res := make([]*MetricV2, 0, len(bucketPeerMetricsGroups)) + populateAndPublish(bucketPeerMetricsGroups, func(m MetricV2) bool { if m.VariableLabels == nil { m.VariableLabels = make(map[string]string, 1) } @@ -1059,6 +1097,13 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request storageClass := r.Form.Get(peerRESTStorageClass) bucketName := r.Form.Get(peerRESTBucket) enableSha256 := r.Form.Get(peerRESTEnableSha256) == "true" + enableMultipart := r.Form.Get(peerRESTEnableMultipart) == "true" + + u, ok := globalIAMSys.GetUser(r.Context(), r.Form.Get(peerRESTAccessKey)) + if !ok { + s.writeErrorResponse(w, errAuthentication) + return + } size, err := strconv.Atoi(sizeStr) if err != nil { @@ -1078,19 +1123,21 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request done := keepHTTPResponseAlive(w) result, err := selfSpeedTest(r.Context(), speedTestOpts{ - objectSize: size, - concurrency: concurrent, - duration: duration, - storageClass: storageClass, - bucketName: bucketName, - enableSha256: enableSha256, + objectSize: size, + concurrency: concurrent, + duration: duration, + storageClass: storageClass, + bucketName: bucketName, + enableSha256: enableSha256, + enableMultipart: enableMultipart, + creds: u.Credentials, }) if err != nil { result.Error = err.Error() } done(nil) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } // GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server @@ -1139,7 +1186,7 @@ func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Re result := driveSpeedTest(r.Context(), opts) done(nil) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } // GetReplicationMRFHandler - returns replication MRF for bucket @@ -1152,7 +1199,7 @@ func (s *peerRESTServer) GetReplicationMRFHandler(w http.ResponseWriter, r *http vars := mux.Vars(r) bucketName := vars[peerRESTBucket] ctx := newContext(r, w, "GetReplicationMRF") - re, err := globalReplicationPool.getMRF(ctx, bucketName) + re, err := globalReplicationPool.Get().getMRF(ctx, bucketName) if err != nil { s.writeErrorResponse(w, err) return @@ -1186,7 +1233,7 @@ func (s *peerRESTServer) DevNull(w http.ResponseWriter, r *http.Request) { // If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec) // would mean the network is not stable. Logging here will help in debugging network issues. if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) { - logger.LogIf(ctx, err) + peersLogIf(ctx, err) } } if err != nil { @@ -1208,24 +1255,24 @@ func (s *peerRESTServer) NetSpeedTestHandler(w http.ResponseWriter, r *http.Requ duration = time.Second * 10 } result := netperf(r.Context(), duration.Round(time.Second)) - logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result)) + peersLogIf(r.Context(), gob.NewEncoder(w).Encode(result)) } -func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) { +func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np *grid.JSON[madmin.HealResultItem], nerr *grid.RemoteErr) { bucket := mss.Get(peerS3Bucket) if isMinioMetaBucket(bucket) { return np, grid.NewRemoteErr(errInvalidArgument) } bucketDeleted := mss.Get(peerS3BucketDeleted) == "true" - _, err := healBucketLocal(context.Background(), bucket, madmin.HealOpts{ + res, err := healBucketLocal(context.Background(), bucket, madmin.HealOpts{ Remove: bucketDeleted, }) if err != nil { return np, grid.NewRemoteErr(err) } - return np, nil + return madminHealResultItem.NewJSONWith(&res), nil } func (s *peerRESTServer) ListBucketsHandler(opts *BucketOptions) (*grid.Array[*BucketInfo], *grid.RemoteErr) { @@ -1260,6 +1307,7 @@ func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr * return &VolInfo{ Name: bucketInfo.Name, Created: bucketInfo.Created, + Deleted: bucketInfo.Deleted, // needed for site replication }, nil } @@ -1365,6 +1413,7 @@ func registerPeerRESTHandlers(router *mux.Router, gm *grid.Manager) { logger.FatalIf(signalServiceRPC.Register(gm, server.SignalServiceHandler), "unable to register handler") logger.FatalIf(stopRebalanceRPC.Register(gm, server.StopRebalanceHandler), "unable to register handler") logger.FatalIf(updateMetacacheListingRPC.Register(gm, server.UpdateMetacacheListingHandler), "unable to register handler") + logger.FatalIf(cleanupUploadIDCacheMetaRPC.Register(gm, server.HandlerClearUploadID), "unable to register handler") logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerTrace, grid.StreamHandler{ Handle: server.TraceHandler, diff --git a/cmd/peer-s3-client.go b/cmd/peer-s3-client.go index 9841baae73905..438b4d3360cbb 100644 --- a/cmd/peer-s3-client.go +++ b/cmd/peer-s3-client.go @@ -21,6 +21,7 @@ import ( "context" "errors" "fmt" + "slices" "sort" "strconv" "sync/atomic" @@ -28,9 +29,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/grid" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sync/errgroup" - "golang.org/x/exp/slices" + "github.com/minio/pkg/v3/sync/errgroup" ) var errPeerOffline = errors.New("peer is offline") @@ -114,7 +113,6 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -139,13 +137,16 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin poolErrs = append(poolErrs, reduceWriteQuorumErrs(ctx, perPoolErrs, bucketOpIgnoredErrs, quorum)) } - opts.Remove = isAllBucketsNotFound(poolErrs) - opts.Recreate = !opts.Remove + if !opts.Recreate { + // when there is no force recreate look for pool + // errors to recreate the bucket on all pools. + opts.Remove = isAllBucketsNotFound(poolErrs) + opts.Recreate = !opts.Remove + } g = errgroup.WithNErrs(len(sys.peerClients)) healBucketResults := make([]madmin.HealResultItem, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -175,13 +176,24 @@ func (sys *S3PeerSys) HealBucket(ctx context.Context, bucket string, opts madmin } } + if healBucketErr := reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, len(errs)/2+1); healBucketErr != nil { + return madmin.HealResultItem{}, toObjectErr(healBucketErr, bucket) + } + + res := madmin.HealResultItem{ + Type: madmin.HealItemBucket, + Bucket: bucket, + SetCount: -1, // explicitly set an invalid value -1, for bucket heal scenario + } + for i, err := range errs { if err == nil { - return healBucketResults[i], nil + res.Before.Drives = append(res.Before.Drives, healBucketResults[i].Before.Drives...) + res.After.Drives = append(res.After.Drives, healBucketResults[i].After.Drives...) } } - return madmin.HealResultItem{}, toObjectErr(errVolumeNotFound, bucket) + return res, nil } // ListBuckets lists buckets across all nodes and returns a consistent view: @@ -193,7 +205,6 @@ func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) ([]Bu nodeBuckets := make([][]BucketInfo, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -255,9 +266,9 @@ func (sys *S3PeerSys) ListBuckets(ctx context.Context, opts BucketOptions) ([]Bu for bktName, count := range bucketsMap { if count < quorum { // Queue a bucket heal task - globalMRFState.addPartialOp(partialOperation{ - bucket: bktName, - queued: time.Now(), + globalMRFState.addPartialOp(PartialOperation{ + Bucket: bktName, + Queued: time.Now(), }) } } @@ -281,7 +292,6 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc bucketInfos := make([]BucketInfo, len(sys.peerClients)) for idx, client := range sys.peerClients { - idx := idx client := client g.Go(func() error { if client == nil { @@ -321,6 +331,9 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc } func (client *remotePeerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + bi, err := listBucketsRPC.Call(ctx, client.gridConn(), &opts) if err != nil { return nil, toStorageErr(err) @@ -346,14 +359,11 @@ func (client *remotePeerS3Client) HealBucket(ctx context.Context, bucket string, peerS3BucketDeleted: strconv.FormatBool(opts.Remove), }) - _, err := healBucketRPC.Call(ctx, conn, mss) + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() - // Initialize heal result info - return madmin.HealResultItem{ - Type: madmin.HealItemBucket, - Bucket: bucket, - SetCount: -1, // explicitly set an invalid value -1, for bucket heal scenario - }, toStorageErr(err) + resp, err := healBucketRPC.Call(ctx, conn, mss) + return resp.ValueOrZero(), toStorageErr(err) } // GetBucketInfo returns bucket stat info from a peer @@ -368,6 +378,9 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri peerS3BucketDeleted: strconv.FormatBool(opts.Deleted), }) + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + volInfo, err := headBucketRPC.Call(ctx, conn, mss) if err != nil { return BucketInfo{}, toStorageErr(err) @@ -376,6 +389,7 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri return BucketInfo{ Name: volInfo.Name, Created: volInfo.Created, + Deleted: volInfo.Deleted, }, nil } @@ -383,7 +397,6 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri func (sys *S3PeerSys) MakeBucket(ctx context.Context, bucket string, opts MakeBucketOptions) error { g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - client := client g.Go(func() error { if client == nil { return errPeerOffline @@ -419,6 +432,9 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string, peerS3BucketForceCreate: strconv.FormatBool(opts.ForceCreate), }) + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err := makeBucketRPC.Call(ctx, conn, mss) return toStorageErr(err) } @@ -427,7 +443,6 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string, func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error { g := errgroup.WithNErrs(len(sys.peerClients)) for idx, client := range sys.peerClients { - client := client g.Go(func() error { if client == nil { return errPeerOffline @@ -468,6 +483,9 @@ func (client *remotePeerS3Client) DeleteBucket(ctx context.Context, bucket strin peerS3BucketForceDelete: strconv.FormatBool(opts.Force), }) + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err := deleteBucketRPC.Call(ctx, conn, mss) return toStorageErr(err) } @@ -511,7 +529,7 @@ func newPeerS3Client(node Node) peerS3Client { // Lazy initialization of grid connection. // When we create this peer client, the grid connection is likely not yet initialized. if node.GridHost == "" { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost is empty for peer %s", node.Host), node.Host+":gridHost") return nil } gc := gridConn.Load() @@ -524,7 +542,7 @@ func newPeerS3Client(node Node) peerS3Client { } gc = gm.Connection(node.GridHost) if gc == nil { - logger.LogOnceIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost") + bugLogIf(context.Background(), fmt.Errorf("gridHost %s not found for peer %s", node.GridHost, node.Host), node.Host+":gridHost") return nil } gridConn.Store(gc) diff --git a/cmd/peer-s3-server.go b/cmd/peer-s3-server.go index a41d139c09a2e..227aebe8d2f2e 100644 --- a/cmd/peer-s3-server.go +++ b/cmd/peer-s3-server.go @@ -22,7 +22,8 @@ import ( "errors" "github.com/minio/madmin-go/v3" - "github.com/minio/pkg/v2/sync/errgroup" + "github.com/minio/pkg/v3/sync/errgroup" + "github.com/puzpuzpuz/xsync/v3" ) const ( @@ -34,7 +35,7 @@ const ( func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (res madmin.HealResultItem, err error) { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() // Initialize sync waitgroup. @@ -46,7 +47,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() (serr error) { if localDrives[index] == nil { beforeState[index] = madmin.DriveStateOffline @@ -101,7 +101,7 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( for i := range beforeState { res.Before.Drives = append(res.Before.Drives, madmin.HealDriveInfo{ UUID: "", - Endpoint: localDrives[i].String(), + Endpoint: localDrives[i].Endpoint().String(), State: beforeState[i], }) } @@ -110,7 +110,6 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( if !isMinioMetaBucketName(bucket) && !isAllBucketsNotFound(errs) && opts.Remove { g := errgroup.WithNErrs(len(localDrives)) for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -123,14 +122,13 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( g.Wait() } - // Create the quorum lost volume only if its nor makred for delete + // Create the lost volume only if its not marked for delete if !opts.Remove { // Initialize sync waitgroup. g = errgroup.WithNErrs(len(localDrives)) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if beforeState[index] == madmin.DriveStateMissing { err := localDrives[index].MakeVol(ctx, bucket) @@ -149,7 +147,7 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( for i := range afterState { res.After.Drives = append(res.After.Drives, madmin.HealDriveInfo{ UUID: "", - Endpoint: localDrives[i].String(), + Endpoint: localDrives[i].Endpoint().String(), State: afterState[i], }) } @@ -158,13 +156,13 @@ func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) ( func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []BucketInfo, err error) { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() quorum := (len(localDrives) / 2) buckets = make([]BucketInfo, 0, 32) - healBuckets := map[string]VolInfo{} + healBuckets := xsync.NewMapOf[string, VolInfo]() // lists all unique buckets across drives. if err := listAllBuckets(ctx, localDrives, healBuckets, quorum); err != nil { @@ -172,7 +170,7 @@ func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []Bucket } // include deleted buckets in listBuckets output - deletedBuckets := map[string]VolInfo{} + deletedBuckets := xsync.NewMapOf[string, VolInfo]() if opts.Deleted { // lists all deleted buckets across drives. @@ -181,38 +179,42 @@ func listBucketsLocal(ctx context.Context, opts BucketOptions) (buckets []Bucket } } - for _, v := range healBuckets { + healBuckets.Range(func(_ string, volInfo VolInfo) bool { bi := BucketInfo{ - Name: v.Name, - Created: v.Created, + Name: volInfo.Name, + Created: volInfo.Created, } - if vi, ok := deletedBuckets[v.Name]; ok { + if vi, ok := deletedBuckets.Load(volInfo.Name); ok { bi.Deleted = vi.Created } buckets = append(buckets, bi) - } + return true + }) - for _, v := range deletedBuckets { - if _, ok := healBuckets[v.Name]; !ok { + deletedBuckets.Range(func(_ string, v VolInfo) bool { + if _, ok := healBuckets.Load(v.Name); !ok { buckets = append(buckets, BucketInfo{ Name: v.Name, Deleted: v.Created, }) } - } + return true + }) return buckets, nil } -func cloneDrives(drives []StorageAPI) []StorageAPI { - newDrives := make([]StorageAPI, len(drives)) - copy(newDrives, drives) - return newDrives +func cloneDrives(drives map[string]StorageAPI) []StorageAPI { + copyDrives := make([]StorageAPI, 0, len(drives)) + for _, drive := range drives { + copyDrives = append(copyDrives, drive) + } + return copyDrives } func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) (BucketInfo, error) { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32) @@ -220,7 +222,6 @@ func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -261,14 +262,13 @@ func getBucketInfoLocal(ctx context.Context, bucket string, opts BucketOptions) func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOptions) error { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound @@ -277,36 +277,18 @@ func deleteBucketLocal(ctx context.Context, bucket string, opts DeleteBucketOpti }, index) } - var recreate bool - errs := g.Wait() - for index, err := range errs { - if errors.Is(err, errVolumeNotEmpty) { - recreate = true - } - if err == nil && recreate { - // ignore any errors - localDrives[index].MakeVol(ctx, bucket) - } - } - - // Since we recreated buckets and error was `not-empty`, return not-empty. - if recreate { - return errVolumeNotEmpty - } // for all other errors reduce by write quorum. - - return reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, (len(localDrives)/2)+1) + return reduceWriteQuorumErrs(ctx, g.Wait(), bucketOpIgnoredErrs, (len(localDrives)/2)+1) } func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions) error { globalLocalDrivesMu.RLock() - localDrives := cloneDrives(globalLocalDrives) + localDrives := cloneDrives(globalLocalDrivesMap) globalLocalDrivesMu.RUnlock() g := errgroup.WithNErrs(len(localDrives)).WithConcurrency(32) // Make a volume entry on all underlying storage disks. for index := range localDrives { - index := index g.Go(func() error { if localDrives[index] == nil { return errDiskNotFound diff --git a/cmd/perf-tests.go b/cmd/perf-tests.go index 5a9bcf3807402..f9b4663f45c25 100644 --- a/cmd/perf-tests.go +++ b/cmd/perf-tests.go @@ -33,9 +33,10 @@ import ( "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/pkg/v2/randreader" + "github.com/minio/pkg/v3/randreader" ) // SpeedTestResult return value of the speedtest function @@ -72,7 +73,7 @@ func (f *firstByteRecorder) Read(p []byte) (n int, err error) { } // Runs the speedtest on local MinIO process. -func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, error) { +func selfSpeedTest(ctx context.Context, opts speedTestOpts) (res SpeedTestResult, err error) { objAPI := newObjectLayerFn() if objAPI == nil { return SpeedTestResult{}, errServerNotInitialized @@ -96,7 +97,24 @@ func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, er popts := minio.PutObjectOptions{ UserMetadata: userMetadata, DisableContentSha256: !opts.enableSha256, - DisableMultipart: true, + DisableMultipart: !opts.enableMultipart, + } + + clnt := globalMinioClient + if !globalAPIConfig.permitRootAccess() { + region := globalSite.Region() + if region == "" { + region = "us-east-1" + } + clnt, err = minio.New(globalLocalNodeName, &minio.Options{ + Creds: credentials.NewStaticV4(opts.creds.AccessKey, opts.creds.SecretKey, opts.creds.SessionToken), + Secure: globalIsTLS, + Transport: globalRemoteTargetTransport, + Region: region, + }) + if err != nil { + return res, err + } } var mu sync.Mutex @@ -109,7 +127,7 @@ func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, er t := time.Now() reader := newRandomReader(opts.objectSize) tmpObjName := pathJoin(objNamePrefix, fmt.Sprintf("%d/%d", i, objCountPerThread[i])) - info, err := globalMinioClient.PutObject(uploadsCtx, opts.bucketName, tmpObjName, reader, int64(opts.objectSize), popts) + info, err := clnt.PutObject(uploadsCtx, opts.bucketName, tmpObjName, reader, int64(opts.objectSize), popts) if err != nil { if !contextCanceled(uploadsCtx) && !errors.Is(err, context.Canceled) { errOnce.Do(func() { @@ -150,7 +168,7 @@ func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, er var downloadTTFB madmin.TimeDurations wg.Add(opts.concurrency) - c := minio.Core{Client: globalMinioClient} + c := minio.Core{Client: clnt} for i := 0; i < opts.concurrency; i++ { go func(i int) { defer wg.Done() @@ -320,10 +338,7 @@ func netperf(ctx context.Context, duration time.Duration) madmin.NetperfNodeResu time.Sleep(duration) xioutil.SafeClose(r.eof) wg.Wait() - for { - if globalNetPerfRX.ActiveConnections() == 0 { - break - } + for globalNetPerfRX.ActiveConnections() != 0 { time.Sleep(time.Second) } rx := float64(globalNetPerfRX.RXSample) @@ -360,7 +375,7 @@ func siteNetperf(ctx context.Context, duration time.Duration) madmin.SiteNetPerf } info := info wg.Add(connectionsPerPeer) - for i := 0; i < connectionsPerPeer; i++ { + for range connectionsPerPeer { go func() { defer wg.Done() ctx, cancel := context.WithTimeout(ctx, duration+10*time.Second) @@ -378,10 +393,7 @@ func siteNetperf(ctx context.Context, duration time.Duration) madmin.SiteNetPerf time.Sleep(duration) xioutil.SafeClose(r.eof) wg.Wait() - for { - if globalSiteNetPerfRX.ActiveConnections() == 0 || contextCanceled(ctx) { - break - } + for globalSiteNetPerfRX.ActiveConnections() != 0 && !contextCanceled(ctx) { time.Sleep(time.Second) } rx := float64(globalSiteNetPerfRX.RXSample) @@ -410,7 +422,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io cli, err := globalSiteReplicationSys.getAdminClient(ctx, deploymentID) if err != nil { result.Error = err.Error() - return + return result } rp := cli.GetEndpointURL() reqURL := &url.URL{ @@ -422,7 +434,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io req, err := http.NewRequestWithContext(ctx, http.MethodPost, reqURL.String(), reader) if err != nil { result.Error = err.Error() - return + return result } client := &http.Client{ Transport: globalRemoteTargetTransport, @@ -430,7 +442,7 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io resp, err := client.Do(req) if err != nil { result.Error = err.Error() - return + return result } defer xhttp.DrainBody(resp.Body) err = gob.NewDecoder(resp.Body).Decode(&result) @@ -439,5 +451,5 @@ func perfNetRequest(ctx context.Context, deploymentID, reqPath string, reader io if err != nil { result.Error = err.Error() } - return + return result } diff --git a/cmd/policy_test.go b/cmd/policy_test.go index b4da5943bb436..bd9c9add5bdd5 100644 --- a/cmd/policy_test.go +++ b/cmd/policy_test.go @@ -23,8 +23,8 @@ import ( miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" "github.com/minio/minio-go/v7/pkg/set" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/policy/condition" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/policy/condition" ) func TestPolicySysIsAllowed(t *testing.T) { diff --git a/cmd/post-policy-fan-out.go b/cmd/post-policy-fan-out.go index 500818bacf47b..23a118986e40a 100644 --- a/cmd/post-policy-fan-out.go +++ b/cmd/post-policy-fan-out.go @@ -20,10 +20,11 @@ package cmd import ( "bytes" "context" + "maps" "sync" "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/s3utils" + "github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" @@ -78,10 +79,15 @@ func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, }() userDefined := make(map[string]string, len(req.UserMetadata)) - for k, v := range req.UserMetadata { - userDefined[k] = v + maps.Copy(userDefined, req.UserMetadata) + + tgs, err := tags.NewTags(req.UserTags, true) + if err != nil { + errs[idx] = err + return } - userDefined[xhttp.AmzObjectTagging] = s3utils.TagEncode(req.UserTags) + + userDefined[xhttp.AmzObjectTagging] = tgs.String() if opts.Kind != nil { encrd, objectEncryptionKey, err := newEncryptReader(ctx, hr, opts.Kind, opts.KeyID, opts.Key, bucket, req.Key, userDefined, opts.KmsCtx) diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index a694fef86d568..9a02ca8464f0e 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -23,6 +23,7 @@ import ( "encoding/base64" "fmt" "io" + "maps" "mime/multipart" "net/http" "net/http/httptest" @@ -56,10 +57,12 @@ func newPostPolicyBytesV4WithContentRange(credential, bucketName, objectKey stri credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) // Add the meta-uuid string, set to 1234 uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + // Add the content-encoding string, set to gzip. + contentEncodingConditionStr := fmt.Sprintf(`["eq", "$content-encoding", "%s"]`, "gzip") // Combine all conditions into one string. - conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr, - keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr, + keyConditionStr, contentLengthCondStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr, contentEncodingConditionStr) retStr := "{" retStr = retStr + expirationStr + "," retStr += conditionStr @@ -85,9 +88,11 @@ func newPostPolicyBytesV4(credential, bucketName, objectKey string, expiration t credentialConditionStr := fmt.Sprintf(`["eq", "$x-amz-credential", "%s"]`, credential) // Add the meta-uuid string, set to 1234 uuidConditionStr := fmt.Sprintf(`["eq", "$x-amz-meta-uuid", "%s"]`, "1234") + // Add the content-encoding string, set to gzip + contentEncodingConditionStr := fmt.Sprintf(`["eq", "$content-encoding", "%s"]`, "gzip") // Combine all conditions into one string. - conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr) + conditionStr := fmt.Sprintf(`"conditions":[%s, %s, %s, %s, %s, %s, %s]`, bucketConditionStr, keyConditionStr, algorithmConditionStr, dateConditionStr, credentialConditionStr, uuidConditionStr, contentEncodingConditionStr) retStr := "{" retStr = retStr + expirationStr + "," retStr += conditionStr @@ -210,18 +215,23 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr // Test cases for signature-V2. testCasesV2 := []struct { expectedStatus int - accessKey string secretKey string + formData map[string]string }{ - {http.StatusForbidden, "invalidaccesskey", credentials.SecretKey}, - {http.StatusForbidden, credentials.AccessKey, "invalidsecretkey"}, - {http.StatusNoContent, credentials.AccessKey, credentials.SecretKey}, + {http.StatusForbidden, credentials.SecretKey, map[string]string{"AWSAccessKeyId": "invalidaccesskey"}}, + {http.StatusForbidden, "invalidsecretkey", map[string]string{"AWSAccessKeyId": credentials.AccessKey}}, + {http.StatusNoContent, credentials.SecretKey, map[string]string{"AWSAccessKeyId": credentials.AccessKey}}, + {http.StatusForbidden, credentials.SecretKey, map[string]string{"Awsaccesskeyid": "invalidaccesskey"}}, + {http.StatusForbidden, "invalidsecretkey", map[string]string{"Awsaccesskeyid": credentials.AccessKey}}, + {http.StatusNoContent, credentials.SecretKey, map[string]string{"Awsaccesskeyid": credentials.AccessKey}}, + // Forbidden with key not in policy.conditions for signed requests V2. + {http.StatusForbidden, credentials.SecretKey, map[string]string{"Awsaccesskeyid": credentials.AccessKey, "AnotherKey": "AnotherContent"}}, } for i, test := range testCasesV2 { // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() - req, perr := newPostRequestV2("", bucketName, "testobject", test.accessKey, test.secretKey) + req, perr := newPostRequestV2("", bucketName, "testobject", test.secretKey, test.formData) if perr != nil { t.Fatalf("Test %d: %s: Failed to create HTTP request for PostPolicyHandler: %v", i+1, instanceType, perr) } @@ -229,7 +239,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr // Call the ServeHTTP to execute the handler. apiRouter.ServeHTTP(rec, req) if rec.Code != test.expectedStatus { - t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, test.expectedStatus, rec.Code) + t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`, Resp: %s", i+1, instanceType, test.expectedStatus, rec.Code, rec.Body) } } @@ -312,7 +322,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus int accessKey string secretKey string - dates []interface{} + dates []any policy string noFilename bool corruptedBase64 bool @@ -325,8 +335,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"]]}`, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, }, // Success case, no multipart filename. { @@ -335,8 +345,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"]]}`, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, noFilename: true, }, // Success case, big body. @@ -346,8 +356,8 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusNoContent, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, - policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"]]}`, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$x-amz-meta-uuid", "1234"],["eq", "$content-encoding", "gzip"]]}`, }, // Corrupted Base 64 result { @@ -356,7 +366,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedBase64: true, }, @@ -367,7 +377,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusBadRequest, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, corruptedMultipart: true, }, @@ -379,7 +389,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: "", secretKey: "", - dates: []interface{}{}, + dates: []any{}, policy: ``, }, // Expired document @@ -389,7 +399,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTime.Add(-1 * time.Minute * 5).Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTime.Add(-1 * time.Minute * 5).Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], ["starts-with", "$key", "test/"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}`, }, // Corrupted policy document @@ -399,7 +409,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr expectedRespStatus: http.StatusForbidden, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, - dates: []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, + dates: []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)}, policy: `{"3/aws4_request"]]}`, }, } @@ -442,7 +452,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr malformedBody: false, ignoreContentLength: false, }, - // Failed with Content-Length not specified. + // Success with Content-Length not specified. { objectName: "test", data: bytes.Repeat([]byte("a"), 1025), @@ -541,8 +551,8 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t // initialize HTTP NewRecorder, this records any mutations to response writer inside the handler. rec := httptest.NewRecorder() - dates := []interface{}{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} - policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL.String() + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-meta-uuid", "1234"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"]]}` + dates := []any{curTimePlus5Min.Format(iso8601TimeFormat), curTime.Format(iso8601DateFormat), curTime.Format(yyyymmdd)} + policy := `{"expiration": "%s","conditions":[["eq", "$bucket", "` + bucketName + `"], {"success_action_redirect":"` + redirectURL.String() + `"},["starts-with", "$key", "test/"], ["eq", "$x-amz-meta-uuid", "1234"], ["eq", "$x-amz-algorithm", "AWS4-HMAC-SHA256"], ["eq", "$x-amz-date", "%s"], ["eq", "$x-amz-credential", "` + credentials.AccessKey + `/%s/us-east-1/s3/aws4_request"],["eq", "$content-encoding", "gzip"]]}` // Generate the final policy document policy = fmt.Sprintf(policy, dates...) @@ -593,7 +603,7 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l return signature } -func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secretKey string) (*http.Request, error) { +func newPostRequestV2(endPoint, bucketName, objectName string, secretKey string, formInputData map[string]string) (*http.Request, error) { // Expire the request five minutes from now. expirationTime := UTCNow().Add(time.Minute * 5) // Create a new post policy. @@ -605,15 +615,14 @@ func newPostRequestV2(endPoint, bucketName, objectName string, accessKey, secret signature := calculateSignatureV2(encodedPolicy, secretKey) formData := map[string]string{ - "AWSAccessKeyId": accessKey, - "bucket": bucketName, - "key": objectName + "/${filename}", - "policy": encodedPolicy, - "signature": signature, - "X-Amz-Ignore-signature": "", - "X-Amz-Ignore-AWSAccessKeyId": "", + "bucket": bucketName, + "key": objectName + "/${filename}", + "policy": encodedPolicy, + "signature": signature, } + maps.Copy(formData, formInputData) + // Create the multipart form. var buf bytes.Buffer w := multipart.NewWriter(&buf) @@ -695,9 +704,7 @@ func newPostRequestV4Generic(endPoint, bucketName, objectName string, objData [] } // Add form data - for k, v := range addFormData { - formData[k] = v - } + maps.Copy(formData, addFormData) // Create the multipart form. var buf bytes.Buffer diff --git a/cmd/postpolicyform.go b/cmd/postpolicyform.go index eab146d173fcc..1c2eeaf40c562 100644 --- a/cmd/postpolicyform.go +++ b/cmd/postpolicyform.go @@ -29,10 +29,10 @@ import ( "strings" "time" - "github.com/bcicen/jstream" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/set" xhttp "github.com/minio/minio/internal/http" + "github.com/minio/minio/internal/s3select/jstream" ) // startWithConds - map which indicates if a given condition supports starts-with policy operator @@ -51,18 +51,7 @@ var startsWithConds = map[string]bool{ "$x-amz-algorithm": false, "$x-amz-credential": false, "$x-amz-date": false, -} - -var postPolicyIgnoreKeys = map[string]bool{ - "Policy": true, - xhttp.AmzSignature: true, - xhttp.ContentEncoding: true, - http.CanonicalHeaderKey(xhttp.AmzChecksumAlgo): true, - http.CanonicalHeaderKey(xhttp.AmzChecksumCRC32): true, - http.CanonicalHeaderKey(xhttp.AmzChecksumCRC32C): true, - http.CanonicalHeaderKey(xhttp.AmzChecksumSHA1): true, - http.CanonicalHeaderKey(xhttp.AmzChecksumSHA256): true, - http.CanonicalHeaderKey(xhttp.AmzChecksumMode): true, + "$tagging": false, } // Add policy conditionals. @@ -73,7 +62,7 @@ const ( ) // toString - Safely convert interface to string without causing panic. -func toString(val interface{}) string { +func toString(val any) string { switch v := val.(type) { case string: return v @@ -83,12 +72,12 @@ func toString(val interface{}) string { } // toLowerString - safely convert interface to lower string -func toLowerString(val interface{}) string { +func toLowerString(val any) string { return strings.ToLower(toString(val)) } // toInteger _ Safely convert interface to integer without causing panic. -func toInteger(val interface{}) (int64, error) { +func toInteger(val any) (int64, error) { switch v := val.(type) { case float64: return int64(v), nil @@ -105,7 +94,7 @@ func toInteger(val interface{}) (int64, error) { } // isString - Safely check if val is of type string without causing panic. -func isString(val interface{}) bool { +func isString(val any) bool { _, ok := val.(string) return ok } @@ -140,19 +129,20 @@ type PostPolicyForm struct { func sanitizePolicy(r io.Reader) (io.Reader, error) { var buf bytes.Buffer e := json.NewEncoder(&buf) - d := jstream.NewDecoder(r, 0).ObjectAsKVS() + d := jstream.NewDecoder(r, 0).ObjectAsKVS().MaxDepth(10) sset := set.NewStringSet() for mv := range d.Stream() { - var kvs jstream.KVS if mv.ValueType == jstream.Object { // This is a JSON object type (that preserves key order) - kvs = mv.Value.(jstream.KVS) - for _, kv := range kvs { - if sset.Contains(kv.Key) { - // Reject duplicate conditions or expiration. - return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key) + kvs, ok := mv.Value.(jstream.KVS) + if ok { + for _, kv := range kvs { + if sset.Contains(kv.Key) { + // Reject duplicate conditions or expiration. + return nil, fmt.Errorf("input policy has multiple %s, please fix your client code", kv.Key) + } + sset.Add(kv.Key) } - sset.Add(kv.Key) } e.Encode(kvs) } @@ -172,8 +162,8 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { // Convert po into interfaces and // perform strict type conversion using reflection. var rawPolicy struct { - Expiration string `json:"expiration"` - Conditions []interface{} `json:"conditions"` + Expiration string `json:"expiration"` + Conditions []any `json:"conditions"` } d.DisallowUnknownFields() @@ -192,7 +182,7 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { // Parse conditions. for _, val := range rawPolicy.Conditions { switch condt := val.(type) { - case map[string]interface{}: // Handle key:value map types. + case map[string]any: // Handle key:value map types. for k, v := range condt { if !isString(v) { // Pre-check value type. // All values must be of type string. @@ -208,7 +198,7 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { policyCondEqual, "$" + strings.ToLower(k), toString(v), }) } - case []interface{}: // Handle array types. + case []any: // Handle array types. if len(condt) != 3 { // Return error if we have insufficient elements. return parsedPolicy, fmt.Errorf("Malformed conditional fields %s of type %s found in POST policy form", condt, reflect.TypeOf(condt).String()) } @@ -232,19 +222,19 @@ func parsePostPolicyForm(r io.Reader) (PostPolicyForm, error) { operator, matchType, value, }) case policyCondContentLength: - min, err := toInteger(condt[1]) + minLen, err := toInteger(condt[1]) if err != nil { return parsedPolicy, err } - max, err := toInteger(condt[2]) + maxLen, err := toInteger(condt[2]) if err != nil { return parsedPolicy, err } parsedPolicy.Conditions.ContentLengthRange = contentLengthRange{ - Min: min, - Max: max, + Min: minLen, + Max: maxLen, Valid: true, } default: @@ -272,60 +262,57 @@ func checkPolicyCond(op string, input1, input2 string) bool { return false } +// S3 docs: "Each form field that you specify in a form (except x-amz-signature, file, policy, and field names +// that have an x-ignore- prefix) must appear in the list of conditions." +// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html +// keyInPolicyExceptions - list of keys that, when present in the form, can be missing in the conditions of the policy. +var keyInPolicyExceptions = map[string]bool{ + xhttp.AmzSignature: true, + "File": true, + "Policy": true, + + // MinIO specific exceptions to the general S3 rule above. + encrypt.SseKmsKeyID: true, + encrypt.SseEncryptionContext: true, + encrypt.SseCustomerAlgorithm: true, + encrypt.SseCustomerKey: true, + encrypt.SseCustomerKeyMD5: true, +} + // checkPostPolicy - apply policy conditions and validate input values. -// (http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html) +// Note that content-length-range is checked in the API handler function PostPolicyBucketHandler. +// formValues is the already-canonicalized form values from the POST request. func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) error { // Check if policy document expiry date is still not reached if !postPolicyForm.Expiration.After(UTCNow()) { return fmt.Errorf("Invalid according to Policy: Policy expired") } - // check all formValues appear in postPolicyForm or return error. #https://github.com/minio/minio/issues/17391 - checkHeader := map[string][]string{} - ignoreKeys := map[string]bool{} - for key, value := range formValues { - switch { - case ignoreKeys[key], postPolicyIgnoreKeys[key], strings.HasPrefix(key, encrypt.SseGenericHeader): + + // mustFindInPolicy is a map to list all the keys that we must find in the policy as + // we process it below. At the end of checkPostPolicy function, if any key is left in + // this map, that's an error. + mustFindInPolicy := make(map[string][]string, len(formValues)) + for key, values := range formValues { + if keyInPolicyExceptions[key] || strings.HasPrefix(key, "X-Ignore-") { continue - case strings.HasPrefix(key, "X-Amz-Ignore-"): - ignoreKey := strings.Replace(key, "X-Amz-Ignore-", "", 1) - ignoreKeys[ignoreKey] = true - // if it have already - delete(checkHeader, ignoreKey) - default: - checkHeader[key] = value - } - } - // map to store the metadata - metaMap := make(map[string]string) - for _, policy := range postPolicyForm.Conditions.Policies { - if strings.HasPrefix(policy.Key, "$x-amz-meta-") { - formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) - metaMap[formCanonicalName] = policy.Value - } - } - // Check if any extra metadata field is passed as input - for key := range formValues { - if strings.HasPrefix(key, "X-Amz-Meta-") { - if _, ok := metaMap[key]; !ok { - return fmt.Errorf("Invalid according to Policy: Extra input fields: %s", key) - } } + mustFindInPolicy[key] = values } - // Flag to indicate if all policies conditions are satisfied - var condPassed bool - // Iterate over policy conditions and check them against received form fields for _, policy := range postPolicyForm.Conditions.Policies { // Form fields names are in canonical format, convert conditions names // to canonical for simplification purpose, so `$key` will become `Key` formCanonicalName := http.CanonicalHeaderKey(strings.TrimPrefix(policy.Key, "$")) + // Operator for the current policy condition op := policy.Operator - // Multiple values should not occur - if len(checkHeader[formCanonicalName]) >= 2 { - return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]. FormValues have multiple values: [%s]", op, policy.Key, policy.Value, strings.Join(checkHeader[formCanonicalName], ", ")) + + // Multiple values are not allowed for a single form field + if len(mustFindInPolicy[formCanonicalName]) >= 2 { + return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]. FormValues have multiple values: [%s]", op, policy.Key, policy.Value, strings.Join(mustFindInPolicy[formCanonicalName], ", ")) } + // If the current policy condition is known if startsWithSupported, condFound := startsWithConds[policy.Key]; condFound { // Check if the current condition supports starts-with operator @@ -333,27 +320,38 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro return fmt.Errorf("Invalid according to Policy: Policy Condition failed") } // Check if current policy condition is satisfied - condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) - if !condPassed { + if !checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) { return fmt.Errorf("Invalid according to Policy: Policy Condition failed") } } else if strings.HasPrefix(policy.Key, "$x-amz-meta-") || strings.HasPrefix(policy.Key, "$x-amz-") { // This covers all conditions X-Amz-Meta-* and X-Amz-* // Check if policy condition is satisfied - condPassed = checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) - if !condPassed { + if !checkPolicyCond(op, formValues.Get(formCanonicalName), policy.Value) { return fmt.Errorf("Invalid according to Policy: Policy Condition failed: [%s, %s, %s]", op, policy.Key, policy.Value) } } - delete(checkHeader, formCanonicalName) + delete(mustFindInPolicy, formCanonicalName) + } + + // For SignV2 - Signature/AWSAccessKeyId fields do not need to be in the policy + if _, ok := formValues[xhttp.AmzSignatureV2]; ok { + delete(mustFindInPolicy, xhttp.AmzSignatureV2) + for k := range mustFindInPolicy { + // case-insensitivity for AWSAccessKeyId + if strings.EqualFold(k, xhttp.AmzAccessKeyID) { + delete(mustFindInPolicy, k) + break + } + } } - if len(checkHeader) != 0 { - logKeys := make([]string, 0, len(checkHeader)) - for key := range checkHeader { + // Check mustFindInPolicy to see if any key is left, if so, it was not found in policy and we return an error. + if len(mustFindInPolicy) != 0 { + logKeys := make([]string, 0, len(mustFindInPolicy)) + for key := range mustFindInPolicy { logKeys = append(logKeys, key) } - return fmt.Errorf("Each form field that you specify in a form (except %s) must appear in the list of conditions.", strings.Join(logKeys, ", ")) + return fmt.Errorf("Each form field that you specify in a form must appear in the list of policy conditions. %q not specified in the policy.", strings.Join(logKeys, ", ")) } return nil diff --git a/cmd/postpolicyform_test.go b/cmd/postpolicyform_test.go index 8095c4d0f9bed..721718bca27f2 100644 --- a/cmd/postpolicyform_test.go +++ b/cmd/postpolicyform_test.go @@ -20,12 +20,12 @@ package cmd import ( "bytes" "encoding/base64" - "fmt" "net/http" "strings" "testing" minio "github.com/minio/minio-go/v7" + xhttp "github.com/minio/minio/internal/http" ) func TestParsePostPolicyForm(t *testing.T) { @@ -65,7 +65,6 @@ func TestParsePostPolicyForm(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { _, err := parsePostPolicyForm(strings.NewReader(testCase.policy)) if testCase.success && err != nil { @@ -78,6 +77,28 @@ func TestParsePostPolicyForm(t *testing.T) { } } +type formValues struct { + http.Header +} + +func newFormValues() formValues { + return formValues{make(http.Header)} +} + +func (f formValues) Set(key, value string) formValues { + f.Header.Set(key, value) + return f +} + +func (f formValues) Add(key, value string) formValues { + f.Header.Add(key, value) + return f +} + +func (f formValues) Clone() formValues { + return formValues{f.Header.Clone()} +} + // Test Post Policy parsing and checking conditions func TestPostPolicyForm(t *testing.T) { pp := minio.NewPostPolicy() @@ -85,76 +106,193 @@ func TestPostPolicyForm(t *testing.T) { pp.SetContentType("image/jpeg") pp.SetUserMetadata("uuid", "14365123651274") pp.SetKeyStartsWith("user/user1/filename") - pp.SetContentLengthRange(1048579, 10485760) + pp.SetContentLengthRange(100, 999999) // not testable from this layer, condition is checked in the API handler. pp.SetSuccessStatusAction("201") + pp.SetCondition("eq", "X-Amz-Credential", "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request") + pp.SetCondition("eq", "X-Amz-Algorithm", "AWS4-HMAC-SHA256") + pp.SetCondition("eq", xhttp.AmzDate, "20160727T000000Z") + + defaultFormVals := newFormValues() + defaultFormVals.Set("Bucket", "testbucket") + defaultFormVals.Set("Content-Type", "image/jpeg") + defaultFormVals.Set(xhttp.AmzMetaUUID, "14365123651274") + defaultFormVals.Set("Key", "user/user1/filename/${filename}/myfile.txt") + defaultFormVals.Set("X-Amz-Credential", "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request") + defaultFormVals.Set("X-Amz-Algorithm", "AWS4-HMAC-SHA256") + defaultFormVals.Set(xhttp.AmzDate, "20160727T000000Z") + defaultFormVals.Set("Success_action_status", "201") + + policyCondFailedErr := "Invalid according to Policy: Policy Condition failed" type testCase struct { - Bucket string - Key string - XAmzDate string - XAmzAlgorithm string - XAmzCredential string - XAmzMetaUUID string - ContentType string - SuccessActionStatus string - Policy string - Expired bool - expectedErr error + name string + fv formValues + expired bool + wantErr string } + // Test case just contains fields we override from defaultFormVals. testCases := []testCase{ - // Everything is fine with this test - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: nil}, - // Expired policy document - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", Expired: true, expectedErr: fmt.Errorf("Invalid according to Policy: Policy expired")}, - // Different AMZ date - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "2017T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Key which doesn't start with user/user1/filename - {Bucket: "testbucket", Key: "myfile.txt", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Incorrect bucket name. - {Bucket: "incorrect", Key: "user/user1/filename/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Incorrect key name - {Bucket: "testbucket", Key: "incorrect", XAmzDate: "20160727T000000Z", XAmzMetaUUID: "14365123651274", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Incorrect date - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "incorrect", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Incorrect ContentType - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "14365123651274", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "incorrect", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed")}, - // Incorrect Metadata - {Bucket: "testbucket", Key: "user/user1/filename/${filename}/myfile.txt", XAmzMetaUUID: "151274", SuccessActionStatus: "201", XAmzCredential: "KVGKMDUQ23TCZXTLTHLP/20160727/us-east-1/s3/aws4_request", XAmzDate: "20160727T000000Z", XAmzAlgorithm: "AWS4-HMAC-SHA256", ContentType: "image/jpeg", expectedErr: fmt.Errorf("Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]")}, + { + name: "happy path no errors", + fv: defaultFormVals.Clone(), + wantErr: "", + }, + { + name: "expired policy document", + fv: defaultFormVals.Clone(), + expired: true, + wantErr: "Invalid according to Policy: Policy expired", + }, + { + name: "different AMZ date", + fv: defaultFormVals.Clone().Set(xhttp.AmzDate, "2017T000000Z"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect date", + fv: defaultFormVals.Clone().Set(xhttp.AmzDate, "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "key which doesn't start with user/user1/filename", + fv: defaultFormVals.Clone().Set("Key", "myfile.txt"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect key name", + fv: defaultFormVals.Clone().Set("Key", "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect bucket name", + fv: defaultFormVals.Clone().Set("Bucket", "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect ContentType", + fv: defaultFormVals.Clone().Set(xhttp.ContentType, "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect X-Amz-Algorithm", + fv: defaultFormVals.Clone().Set(xhttp.AmzAlgorithm, "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect X-Amz-Credential", + fv: defaultFormVals.Clone().Set(xhttp.AmzCredential, "incorrect"), + wantErr: policyCondFailedErr, + }, + { + name: "incorrect metadata uuid", + fv: defaultFormVals.Clone().Set(xhttp.AmzMetaUUID, "151274"), + wantErr: "Invalid according to Policy: Policy Condition failed: [eq, $x-amz-meta-uuid, 14365123651274]", + }, + { + name: "unknown key XAmzMetaName is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(xhttp.AmzMetaName, "my-name"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Meta-Name" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumAlgo is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumAlgo), "algo-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Algorithm" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumCRC32 is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumCRC32), "crc32-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Crc32" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumCRC32C is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumCRC32C), "crc32c-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Crc32c" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumSHA1 is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumSHA1), "sha1-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Sha1" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumSHA256 is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumSHA256), "sha256-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Sha256" not specified in the policy.`, + }, + { + name: "unknown key XAmzChecksumMode is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.AmzChecksumMode), "mode-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "X-Amz-Checksum-Mode" not specified in the policy.`, + }, + { + name: "unknown key Content-Encoding is error as it does not appear in policy", + fv: defaultFormVals.Clone().Set(http.CanonicalHeaderKey(xhttp.ContentEncoding), "encoding-val"), + wantErr: `Each form field that you specify in a form must appear in the list of policy conditions. "Content-Encoding" not specified in the policy.`, + }, + { + name: "many bucket values", + fv: defaultFormVals.Clone().Add("Bucket", "anotherbucket"), + wantErr: "Invalid according to Policy: Policy Condition failed: [eq, $bucket, testbucket]. FormValues have multiple values: [testbucket, anotherbucket]", + }, + { + name: "XAmzSignature does not have to appear in policy", + fv: defaultFormVals.Clone().Set(xhttp.AmzSignature, "my-signature"), + }, + { + name: "XIgnoreFoo does not have to appear in policy", + fv: defaultFormVals.Clone().Set("X-Ignore-Foo", "my-foo-value"), + }, + { + name: "File does not have to appear in policy", + fv: defaultFormVals.Clone().Set("File", "file-value"), + }, + { + name: "Signature does not have to appear in policy", + fv: defaultFormVals.Clone().Set(xhttp.AmzSignatureV2, "signature-value"), + }, + { + name: "AWSAccessKeyID does not have to appear in policy", + fv: defaultFormVals.Clone().Set(xhttp.AmzAccessKeyID, "access").Set(xhttp.AmzSignatureV2, "signature-value"), + }, + { + name: "any form value starting with X-Amz-Server-Side-Encryption- does not have to appear in policy", + fv: defaultFormVals.Clone(). + Set(xhttp.AmzServerSideEncryptionKmsContext, "context-val"). + Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, "algo-val"), + }, } - // Validate all the test cases. - for i, tt := range testCases { - formValues := make(http.Header) - formValues.Set("Bucket", tt.Bucket) - formValues.Set("Key", tt.Key) - formValues.Set("Content-Type", tt.ContentType) - formValues.Set("X-Amz-Date", tt.XAmzDate) - formValues.Set("X-Amz-Meta-Uuid", tt.XAmzMetaUUID) - formValues.Set("X-Amz-Algorithm", tt.XAmzAlgorithm) - formValues.Set("X-Amz-Credential", tt.XAmzCredential) - if tt.Expired { - // Expired already. - pp.SetExpires(UTCNow().AddDate(0, 0, -10)) - } else { - // Expires in 10 days. - pp.SetExpires(UTCNow().AddDate(0, 0, 10)) - } - - formValues.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String()))) - formValues.Set("Success_action_status", tt.SuccessActionStatus) - policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String()))) - if err != nil { - t.Fatal(err) - } - - postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes)) - if err != nil { - t.Fatal(err) - } - - err = checkPostPolicy(formValues, postPolicyForm) - if err != nil && tt.expectedErr != nil && err.Error() != tt.expectedErr.Error() { - t.Fatalf("Test %d:, Expected %s, got %s", i+1, tt.expectedErr.Error(), err.Error()) - } + + // Run tests + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + if tt.expired { + // Expired already. + pp.SetExpires(UTCNow().AddDate(0, 0, -10)) + } else { + // Expires in 10 days. + pp.SetExpires(UTCNow().AddDate(0, 0, 10)) + } + + tt.fv.Set("Policy", base64.StdEncoding.EncodeToString([]byte(pp.String()))) + + policyBytes, err := base64.StdEncoding.DecodeString(base64.StdEncoding.EncodeToString([]byte(pp.String()))) + if err != nil { + t.Fatal(err) + } + + postPolicyForm, err := parsePostPolicyForm(bytes.NewReader(policyBytes)) + if err != nil { + t.Fatal(err) + } + + errStr := "" + err = checkPostPolicy(tt.fv.Header, postPolicyForm) + if err != nil { + errStr = err.Error() + } + if errStr != tt.wantErr { + t.Errorf("test: '%s', want error: '%s', got error: '%s'", tt.name, tt.wantErr, errStr) + } + }) } } diff --git a/cmd/prepare-storage.go b/cmd/prepare-storage.go index e0ad6a942b3a2..578a5fadeedd9 100644 --- a/cmd/prepare-storage.go +++ b/cmd/prepare-storage.go @@ -48,7 +48,7 @@ var printEndpointError = func() func(Endpoint, error, bool) { printOnce[endpoint] = m if once { m[err.Error()]++ - logger.LogAlwaysIf(ctx, err) + peersLogAlwaysIf(ctx, err) return } } @@ -60,7 +60,7 @@ var printEndpointError = func() func(Endpoint, error, bool) { // once not set, check if same error occurred 3 times in // a row, then make sure we print it to call attention. if m[err.Error()] > 2 { - logger.LogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err)) + peersLogAlwaysIf(ctx, fmt.Errorf("Following error has been printed %d times.. %w", m[err.Error()], err)) // Reduce the count to introduce further delay in printing // but let it again print after the 2th attempt m[err.Error()]-- @@ -86,14 +86,14 @@ func bgFormatErasureCleanupTmp(diskPath string) { tmpOld := pathJoin(diskPath, minioMetaTmpBucket+"-old", tmpID) if err := renameAll(pathJoin(diskPath, minioMetaTmpBucket), tmpOld, diskPath); err != nil && !errors.Is(err, errFileNotFound) { - logger.LogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty please investigate", + storageLogIf(GlobalContext, fmt.Errorf("unable to rename (%s -> %s) %w, drive may be faulty, please investigate", pathJoin(diskPath, minioMetaTmpBucket), tmpOld, osErrToFileErr(err))) } if err := mkdirAll(pathJoin(diskPath, minioMetaTmpDeletedBucket), 0o777, diskPath); err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty please investigate", + storageLogIf(GlobalContext, fmt.Errorf("unable to create (%s) %w, drive may be faulty, please investigate", pathJoin(diskPath, minioMetaTmpBucket), err)) } @@ -154,27 +154,25 @@ func isServerResolvable(endpoint Endpoint, timeout time.Duration) error { // connect to list of endpoints and load all Erasure disk formats, validate the formats are correct // and are in quorum, if no formats are found attempt to initialize all of them for the first // time. additionally make sure to close all the disks used in this attempt. -func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { - // Initialize all storage disks - storageDisks, errs := initStorageDisksWithErrors(endpoints, storageOpts{cleanUp: true, healthCheck: true}) +func connectLoadInitFormats(verboseLogging bool, firstDisk bool, storageDisks []StorageAPI, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (format *formatErasureV3, err error) { + // Attempt to load all `format.json` from all disks. + formatConfigs, sErrs := loadFormatErasureAll(storageDisks, false) - defer func(storageDisks []StorageAPI) { - if err != nil { - closeStorageDisks(storageDisks...) - } - }(storageDisks) + if err := checkDiskFatalErrs(sErrs); err != nil { + return nil, err + } - for i, err := range errs { - if err != nil && !errors.Is(err, errXLBackend) { + for i, err := range sErrs { + if err != nil && !errors.Is(err, errXLBackend) && !errors.Is(err, errUnformattedDisk) { if errors.Is(err, errDiskNotFound) && verboseLogging { if globalEndpoints.NEndpoints() > 1 { - logger.Error("Unable to connect to %s: %v", endpoints[i], isServerResolvable(endpoints[i], time.Second)) + logger.Info("Unable to connect to %s: %v, will be retried", endpoints[i], isServerResolvable(endpoints[i], time.Second)) } else { logger.Fatal(err, "Unable to connect to %s: %v", endpoints[i], isServerResolvable(endpoints[i], time.Second)) } } else { if globalEndpoints.NEndpoints() > 1 { - logger.Error("Unable to use the drive %s: %v", endpoints[i], err) + logger.Info("Unable to use the drive %s: %v, will be retried", endpoints[i], err) } else { logger.Fatal(errInvalidArgument, "Unable to use the drive %s: %v", endpoints[i], err) } @@ -182,30 +180,13 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo } } - if err := checkDiskFatalErrs(errs); err != nil { - return nil, nil, err - } - - // Attempt to load all `format.json` from all disks. - formatConfigs, sErrs := loadFormatErasureAll(storageDisks, false) - - // Check if we have - for i, sErr := range sErrs { - // print the error, nonetheless, which is perhaps unhandled - if !errors.Is(sErr, errUnformattedDisk) && !errors.Is(sErr, errDiskNotFound) && verboseLogging { - if sErr != nil { - logger.Error("Unable to read 'format.json' from %s: %v\n", endpoints[i], sErr) - } - } - } - // Pre-emptively check if one of the formatted disks // is invalid. This function returns success for the // most part unless one of the formats is not consistent // with expected Erasure format. For example if a user is // trying to pool FS backend into an Erasure set. if err = checkFormatErasureValues(formatConfigs, storageDisks, setDriveCount); err != nil { - return nil, nil, err + return nil, err } // All disks report unformatted we should initialized everyone. @@ -216,57 +197,46 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, endpoints Endpo // Initialize erasure code format on disks format, err = initFormatErasure(GlobalContext, storageDisks, setCount, setDriveCount, deploymentID, sErrs) if err != nil { - return nil, nil, err + return nil, err } - // Assign globalDeploymentID() on first run for the - // minio server managing the first disk - globalDeploymentIDPtr.Store(&format.ID) - return storageDisks, format, nil + return format, nil } // Return error when quorum unformatted disks - indicating we are // waiting for first server to be online. unformattedDisks := quorumUnformattedDisks(sErrs) if unformattedDisks && !firstDisk { - return nil, nil, errNotFirstDisk + return nil, errNotFirstDisk } // Return error when quorum unformatted disks but waiting for rest // of the servers to be online. if unformattedDisks && firstDisk { - return nil, nil, errFirstDiskWait + return nil, errFirstDiskWait } format, err = getFormatErasureInQuorum(formatConfigs) if err != nil { - logger.LogIf(GlobalContext, err) - return nil, nil, err - } - - if format.ID == "" { - // Not a first disk, wait until first disk fixes deploymentID - if !firstDisk { - return nil, nil, errNotFirstDisk - } - if err = formatErasureFixDeploymentID(endpoints, storageDisks, format, formatConfigs); err != nil { - logger.LogIf(GlobalContext, err) - return nil, nil, err + var drivesNotFound int + for _, format := range formatConfigs { + if format != nil { + continue + } + drivesNotFound++ } + return nil, fmt.Errorf("%w (offline-drives=%d/%d)", err, drivesNotFound, len(formatConfigs)) } - globalDeploymentIDPtr.Store(&format.ID) - - if err = formatErasureFixLocalDeploymentID(endpoints, storageDisks, format); err != nil { - logger.LogIf(GlobalContext, err) - return nil, nil, err + if format.ID == "" { + return nil, errors.New("deployment ID missing from disk format, unable to start the server") } - return storageDisks, format, nil + return format, nil } // Format disks before initialization of object layer. -func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) ([]StorageAPI, *formatErasureV3, error) { +func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCount, setDriveCount int, deploymentID string) (storageDisks []StorageAPI, format *formatErasureV3, err error) { if len(endpoints) == 0 || setCount == 0 || setDriveCount == 0 { return nil, nil, errInvalidArgument } @@ -283,7 +253,26 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou verbose bool ) - storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID) + // Initialize all storage disks + storageDisks, errs := initStorageDisksWithErrors(endpoints, storageOpts{cleanUp: true, healthCheck: true}) + + if err := checkDiskFatalErrs(errs); err != nil { + return nil, nil, err + } + + defer func() { + if err == nil && format != nil { + // Assign globalDeploymentID() on first run for the + // minio server managing the first disk + globalDeploymentIDPtr.Store(&format.ID) + + // Set the deployment ID here to avoid races. + xhttp.SetDeploymentID(format.ID) + xhttp.SetMinIOVersion(Version) + } + }() + + format, err = connectLoadInitFormats(verbose, firstDisk, storageDisks, endpoints, poolCount, setCount, setDriveCount, deploymentID) if err == nil { return storageDisks, format, nil } @@ -301,28 +290,28 @@ func waitForFormatErasure(firstDisk bool, endpoints Endpoints, poolCount, setCou tries = 1 } - storageDisks, format, err := connectLoadInitFormats(verbose, firstDisk, endpoints, poolCount, setCount, setDriveCount, deploymentID) + format, err = connectLoadInitFormats(verbose, firstDisk, storageDisks, endpoints, poolCount, setCount, setDriveCount, deploymentID) if err == nil { return storageDisks, format, nil } tries++ - switch err { - case errNotFirstDisk: + switch { + case errors.Is(err, errNotFirstDisk): // Fresh setup, wait for first server to be up. logger.Info("Waiting for the first server to format the drives (elapsed %s)\n", getElapsedTime()) - case errFirstDiskWait: + case errors.Is(err, errFirstDiskWait): // Fresh setup, wait for other servers to come up. logger.Info("Waiting for all other servers to be online to format the drives (elapses %s)\n", getElapsedTime()) - case errErasureReadQuorum: + case errors.Is(err, errErasureReadQuorum): // no quorum available continue to wait for minimum number of servers. logger.Info("Waiting for a minimum of %d drives to come online (elapsed %s)\n", len(endpoints)/2, getElapsedTime()) - case errErasureWriteQuorum: + case errors.Is(err, errErasureWriteQuorum): // no quorum available continue to wait for minimum number of servers. logger.Info("Waiting for a minimum of %d drives to come online (elapsed %s)\n", (len(endpoints)/2)+1, getElapsedTime()) - case errErasureV3ThisEmpty: + case errors.Is(err, errErasureV3ThisEmpty): // need to wait for this error to be healed, so continue. default: // For all other unhandled errors we exit and fail. diff --git a/cmd/rebalance-admin.go b/cmd/rebalance-admin.go index 3cd831d1591ed..4fa372ceba227 100644 --- a/cmd/rebalance-admin.go +++ b/cmd/rebalance-admin.go @@ -33,17 +33,17 @@ type rebalPoolProgress struct { } type rebalancePoolStatus struct { - ID int `json:"id"` // Pool index (zero-based) - Status string `json:"status"` // Active if rebalance is running, empty otherwise - Used float64 `json:"used"` // Percentage used space - Progress rebalPoolProgress `json:"progress,omitempty"` // is empty when rebalance is not running + ID int `json:"id"` // Pool index (zero-based) + Status string `json:"status"` // Active if rebalance is running, empty otherwise + Used float64 `json:"used"` // Percentage used space + Progress rebalPoolProgress `json:"progress"` // is empty when rebalance is not running } // rebalanceAdminStatus holds rebalance status related information exported to mc, console, etc. type rebalanceAdminStatus struct { ID string // identifies the ongoing rebalance operation by a uuid Pools []rebalancePoolStatus `json:"pools"` // contains all pools, including inactive - StoppedAt time.Time `json:"stoppedAt,omitempty"` + StoppedAt time.Time `json:"stoppedAt"` } func rebalanceStatus(ctx context.Context, z *erasureServerPools) (r rebalanceAdminStatus, err error) { diff --git a/cmd/routers.go b/cmd/routers.go index d5e77ddf2e709..c0bfc91df6b7b 100644 --- a/cmd/routers.go +++ b/cmd/routers.go @@ -26,20 +26,28 @@ import ( // Composed function registering routers for only distributed Erasure setup. func registerDistErasureRouters(router *mux.Router, endpointServerPools EndpointServerPools) { + var ( + lockGrid = globalLockGrid.Load() + commonGrid = globalGrid.Load() + ) + // Register storage REST router only if its a distributed setup. - registerStorageRESTHandlers(router, endpointServerPools, globalGrid.Load()) + registerStorageRESTHandlers(router, endpointServerPools, commonGrid) // Register peer REST router only if its a distributed setup. - registerPeerRESTHandlers(router, globalGrid.Load()) + registerPeerRESTHandlers(router, commonGrid) // Register bootstrap REST router for distributed setups. - registerBootstrapRESTHandlers(globalGrid.Load()) + registerBootstrapRESTHandlers(commonGrid) // Register distributed namespace lock routers. - registerLockRESTHandlers() + registerLockRESTHandlers(lockGrid) + + // Add lock grid to router + router.Handle(grid.RouteLockPath, adminMiddleware(lockGrid.Handler(storageServerRequestValidate), noGZFlag, noObjLayerFlag)) // Add grid to router - router.Handle(grid.RoutePath, adminMiddleware(globalGrid.Load().Handler(), noGZFlag, noObjLayerFlag)) + router.Handle(grid.RoutePath, adminMiddleware(commonGrid.Handler(storageServerRequestValidate), noGZFlag, noObjLayerFlag)) } // List of some generic middlewares which are applied for all incoming requests. diff --git a/cmd/s3-zip-handlers.go b/cmd/s3-zip-handlers.go index 6cb2c892e153c..e2c91226c0f1b 100644 --- a/cmd/s3-zip-handlers.go +++ b/cmd/s3-zip-handlers.go @@ -22,7 +22,9 @@ import ( "context" "errors" "io" + "mime" "net/http" + "path/filepath" "sort" "strings" @@ -30,7 +32,7 @@ import ( "github.com/minio/minio/internal/crypto" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" "github.com/minio/zipindex" ) @@ -140,7 +142,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, return } - zipInfo := zipObjInfo.ArchiveInfo() + zipInfo := zipObjInfo.ArchiveInfo(r.Header) if len(zipInfo) == 0 { opts.EncryptFn, err = zipObjInfo.metadataEncryptFn(r.Header) if err != nil { @@ -166,10 +168,11 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, // New object info fileObjInfo := ObjectInfo{ - Bucket: bucket, - Name: object, - Size: int64(file.UncompressedSize64), - ModTime: zipObjInfo.ModTime, + Bucket: bucket, + Name: object, + Size: int64(file.UncompressedSize64), + ModTime: zipObjInfo.ModTime, + ContentType: mime.TypeByExtension(filepath.Ext(object)), } var rc io.ReadCloser @@ -177,10 +180,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, if file.UncompressedSize64 > 0 { // There may be number of header bytes before the content. // Reading 64K extra. This should more than cover name and any "extra" details. - end := file.Offset + int64(file.CompressedSize64) + 64<<10 - if end > zipObjInfo.Size { - end = zipObjInfo.Size - } + end := min(file.Offset+int64(file.CompressedSize64)+64<<10, zipObjInfo.Size) rs := &HTTPRangeSpec{Start: file.Offset, End: end} gr, err := objectAPI.GetObjectNInfo(ctx, bucket, zipPath, rs, nil, opts) if err != nil { @@ -199,7 +199,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, defer rc.Close() - if err = setObjectHeaders(w, fileObjInfo, nil, opts); err != nil { + if err = setObjectHeaders(ctx, w, fileObjInfo, nil, opts); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -230,7 +230,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, } // listObjectsV2InArchive generates S3 listing result ListObjectsV2Info from zip file, all parameters are already validated by the caller. -func listObjectsV2InArchive(ctx context.Context, objectAPI ObjectLayer, bucket, prefix, token, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (ListObjectsV2Info, error) { +func listObjectsV2InArchive(ctx context.Context, objectAPI ObjectLayer, bucket, prefix, token, delimiter string, maxKeys int, startAfter string, h http.Header) (ListObjectsV2Info, error) { zipPath, _, err := splitZipExtensionPath(prefix) if err != nil { // Return empty listing @@ -243,7 +243,7 @@ func listObjectsV2InArchive(ctx context.Context, objectAPI ObjectLayer, bucket, return ListObjectsV2Info{}, nil } - zipInfo := zipObjInfo.ArchiveInfo() + zipInfo := zipObjInfo.ArchiveInfo(h) if len(zipInfo) == 0 { // Always update the latest version zipInfo, err = updateObjectMetadataWithZipInfo(ctx, objectAPI, bucket, zipPath, ObjectOptions{}) @@ -435,7 +435,7 @@ func (api objectAPIHandlers) headObjectInArchiveFileHandler(ctx context.Context, return } - zipInfo := zipObjInfo.ArchiveInfo() + zipInfo := zipObjInfo.ArchiveInfo(r.Header) if len(zipInfo) == 0 { opts.EncryptFn, err = zipObjInfo.metadataEncryptFn(r.Header) if err != nil { @@ -467,7 +467,7 @@ func (api objectAPIHandlers) headObjectInArchiveFileHandler(ctx context.Context, } // Set standard object headers. - if err = setObjectHeaders(w, objInfo, nil, opts); err != nil { + if err = setObjectHeaders(ctx, w, objInfo, nil, opts); err != nil { writeErrorResponseHeadersOnly(w, toAPIError(ctx, err)) return } diff --git a/cmd/server-main.go b/cmd/server-main.go index 9268d000e5a3f..ade86cca19d4a 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -18,6 +18,7 @@ package cmd import ( + "bytes" "context" "encoding/hex" "errors" @@ -28,12 +29,15 @@ import ( "net" "os" "os/signal" + "path/filepath" "runtime" + "slices" "strings" "syscall" "time" "github.com/coreos/go-systemd/v22/daemon" + "github.com/dustin/go-humanize" "github.com/minio/cli" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" @@ -43,14 +47,14 @@ import ( "github.com/minio/minio/internal/bucket/bandwidth" "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/config" + "github.com/minio/minio/internal/config/api" "github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/certs" - "github.com/minio/pkg/v2/env" - "golang.org/x/exp/slices" + "github.com/minio/pkg/v3/certs" + "github.com/minio/pkg/v3/env" "gopkg.in/yaml.v2" ) @@ -81,11 +85,12 @@ var ServerFlags = []cli.Flag{ }, cli.DurationFlag{ Name: "shutdown-timeout", - Value: xhttp.DefaultShutdownTimeout, - Usage: "shutdown timeout to gracefully shutdown server", + Value: time.Second * 30, + Usage: "shutdown timeout to gracefully shutdown server (DEPRECATED)", EnvVar: "MINIO_SHUTDOWN_TIMEOUT", Hidden: true, }, + cli.DurationFlag{ Name: "idle-timeout", Value: xhttp.DefaultIdleTimeout, @@ -100,32 +105,6 @@ var ServerFlags = []cli.Flag{ EnvVar: "MINIO_READ_HEADER_TIMEOUT", Hidden: true, }, - cli.DurationFlag{ - Name: "conn-client-read-deadline", - Usage: "custom connection READ deadline for incoming requests", - Hidden: true, - EnvVar: "MINIO_CONN_CLIENT_READ_DEADLINE", - }, - cli.DurationFlag{ - Name: "conn-client-write-deadline", - Usage: "custom connection WRITE deadline for outgoing requests", - Hidden: true, - EnvVar: "MINIO_CONN_CLIENT_WRITE_DEADLINE", - }, - cli.DurationFlag{ - Name: "conn-read-deadline", - Usage: "custom connection READ deadline", - Hidden: true, - Value: 10 * time.Minute, - EnvVar: "MINIO_CONN_READ_DEADLINE", - }, - cli.DurationFlag{ - Name: "conn-write-deadline", - Usage: "custom connection WRITE deadline", - Hidden: true, - Value: 10 * time.Minute, - EnvVar: "MINIO_CONN_WRITE_DEADLINE", - }, cli.DurationFlag{ Name: "conn-user-timeout", Usage: "custom TCP_USER_TIMEOUT for socket buffers", @@ -141,9 +120,14 @@ var ServerFlags = []cli.Flag{ }, cli.DurationFlag{ Name: "dns-cache-ttl", - Usage: "custom DNS cache TTL for baremetal setups", + Usage: "custom DNS cache TTL", Hidden: true, - Value: 10 * time.Minute, + Value: func() time.Duration { + if orchestrated { + return 30 * time.Second + } + return 10 * time.Minute + }(), EnvVar: "MINIO_DNS_CACHE_TTL", }, cli.IntFlag{ @@ -161,20 +145,55 @@ var ServerFlags = []cli.Flag{ Name: "sftp", Usage: "enable and configure an SFTP server", }, -} - -var gatewayCmd = cli.Command{ - Name: "gateway", - Usage: "start object storage gateway", - Hidden: true, - Flags: append(ServerFlags, GlobalFlags...), - HideHelpCommand: true, - Action: gatewayMain, -} - -func gatewayMain(ctx *cli.Context) error { - logger.Fatal(errInvalidArgument, "Gateway is deprecated, To continue to use Gateway please use releases no later than 'RELEASE.2022-10-24T18-35-07Z'. We recommend all our users to migrate from gateway mode to server mode. Please read https://blog.min.io/deprecation-of-the-minio-gateway/") - return nil + cli.StringFlag{ + Name: "crossdomain-xml", + Usage: "provide a custom crossdomain-xml configuration to report at http://endpoint/crossdomain.xml", + Hidden: true, + EnvVar: "MINIO_CROSSDOMAIN_XML", + }, + cli.StringFlag{ + Name: "memlimit", + Usage: "set global memory limit per server via GOMEMLIMIT", + Hidden: true, + EnvVar: "MINIO_MEMLIMIT", + }, + cli.IntFlag{ + Name: "send-buf-size", + Value: 4 * humanize.MiByte, + EnvVar: "MINIO_SEND_BUF_SIZE", + Hidden: true, + }, + cli.IntFlag{ + Name: "recv-buf-size", + Value: 4 * humanize.MiByte, + EnvVar: "MINIO_RECV_BUF_SIZE", + Hidden: true, + }, + cli.StringFlag{ + Name: "log-dir", + Usage: "specify the directory to save the server log", + EnvVar: "MINIO_LOG_DIR", + Hidden: true, + }, + cli.IntFlag{ + Name: "log-size", + Usage: "specify the maximum server log file size in bytes before its rotated", + Value: 10 * humanize.MiByte, + EnvVar: "MINIO_LOG_SIZE", + Hidden: true, + }, + cli.BoolFlag{ + Name: "log-compress", + Usage: "specify if we want the rotated logs to be gzip compressed or not", + EnvVar: "MINIO_LOG_COMPRESS", + Hidden: true, + }, + cli.StringFlag{ + Name: "log-prefix", + Usage: "specify the log prefix name for the server log", + EnvVar: "MINIO_LOG_PREFIX", + Hidden: true, + }, } var serverCmd = cli.Command{ @@ -251,23 +270,7 @@ func serverCmdArgs(ctx *cli.Context) []string { return strings.Fields(v) } -func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error { - rd, err := Open(configFile) - if err != nil { - return err - } - defer rd.Close() - - cf := &config.ServerConfig{} - dec := yaml.NewDecoder(rd) - dec.SetStrict(true) - if err = dec.Decode(cf); err != nil { - return err - } - if cf.Version != "v1" { - return fmt.Errorf("unexpected version: %s", cf.Version) - } - +func configCommonToSrvCtx(cf config.ServerConfigCommon, ctxt *serverCtxt) { ctxt.RootUser = cf.RootUser ctxt.RootPwd = cf.RootPwd @@ -295,8 +298,75 @@ func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error { if cf.Options.SFTP.SSHPrivateKey != "" { ctxt.SFTP = append(ctxt.SFTP, fmt.Sprintf("ssh-private-key=%s", cf.Options.SFTP.SSHPrivateKey)) } +} + +func mergeServerCtxtFromConfigFile(configFile string, ctxt *serverCtxt) error { + rd, err := xioutil.ReadFile(configFile) + if err != nil { + return err + } - ctxt.Layout, err = buildDisksLayoutFromConfFile(cf.Pools) + cfReader := bytes.NewReader(rd) + + cv := config.ServerConfigVersion{} + if err = yaml.Unmarshal(rd, &cv); err != nil { + return err + } + + switch cv.Version { + case "v1", "v2": + default: + return fmt.Errorf("unexpected version: %s", cv.Version) + } + + cfCommon := config.ServerConfigCommon{} + if err = yaml.Unmarshal(rd, &cfCommon); err != nil { + return err + } + + configCommonToSrvCtx(cfCommon, ctxt) + + v, err := env.GetInt(EnvErasureSetDriveCount, 0) + if err != nil { + return err + } + setDriveCount := uint64(v) + + var pools []poolArgs + switch cv.Version { + case "v1": + cfV1 := config.ServerConfigV1{} + if err = yaml.Unmarshal(rd, &cfV1); err != nil { + return err + } + + pools = make([]poolArgs, 0, len(cfV1.Pools)) + for _, list := range cfV1.Pools { + pools = append(pools, poolArgs{ + args: list, + setDriveCount: setDriveCount, + }) + } + case "v2": + cf := config.ServerConfig{} + cfReader.Seek(0, io.SeekStart) + if err = yaml.Unmarshal(rd, &cf); err != nil { + return err + } + + pools = make([]poolArgs, 0, len(cf.Pools)) + for _, list := range cf.Pools { + driveCount := list.SetDriveCount + if setDriveCount > 0 { + driveCount = setDriveCount + } + pools = append(pools, poolArgs{ + args: list.Args, + setDriveCount: driveCount, + }) + } + } + ctxt.Layout, err = buildDisksLayoutFromConfFile(pools) return err } @@ -345,37 +415,36 @@ func serverHandleCmdArgs(ctxt serverCtxt) { // Initialize, see which NIC the service is running on, and save it as global value setGlobalInternodeInterface(ctxt.Interface) + globalTCPOptions = xhttp.TCPOptions{ + UserTimeout: int(ctxt.UserTimeout.Milliseconds()), + // FIXME: Bring this back when we have valid way to handle deadlines + // DriveOPTimeout: globalDriveConfig.GetOPTimeout, + Interface: ctxt.Interface, + SendBufSize: ctxt.SendBufSize, + RecvBufSize: ctxt.RecvBufSize, + IdleTimeout: ctxt.IdleTimeout, + } + // allow transport to be HTTP/1.1 for proxying. - globalProxyTransport = NewCustomHTTPProxyTransport()() - globalProxyEndpoints = GetProxyEndpoints(globalEndpoints) globalInternodeTransport = NewInternodeHTTPTransport(ctxt.MaxIdleConnsPerHost)() globalRemoteTargetTransport = NewRemoteTargetHTTPTransport(false)() - globalHealthChkTransport = NewHTTPTransport() + globalProxyEndpoints = GetProxyEndpoints(globalEndpoints, globalRemoteTargetTransport) + globalForwarder = handlers.NewForwarder(&handlers.Forwarder{ PassHost: true, - RoundTripper: NewHTTPTransportWithTimeout(1 * time.Hour), + RoundTripper: globalRemoteTargetTransport, Logger: func(err error) { if err != nil && !errors.Is(err, context.Canceled) { - logger.LogIf(GlobalContext, err) + proxyLogIf(GlobalContext, err) } }, }) - globalTCPOptions = xhttp.TCPOptions{ - UserTimeout: int(ctxt.UserTimeout.Milliseconds()), - ClientReadTimeout: ctxt.ConnClientReadDeadline, - ClientWriteTimeout: ctxt.ConnClientWriteDeadline, - Interface: ctxt.Interface, - } - // On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back // to IPv6 address ie minio will start listening on IPv6 address whereas another // (non-)minio process is listening on IPv4 of given port. // To avoid this error situation we check for port availability. logger.FatalIf(xhttp.CheckPortAvailability(globalMinioHost, globalMinioPort, globalTCPOptions), "Unable to start the server") - - globalConnReadDeadline = ctxt.ConnReadDeadline - globalConnWriteDeadline = ctxt.ConnWriteDeadline } func initAllSubsystems(ctx context.Context) { @@ -383,7 +452,9 @@ func initAllSubsystems(ctx context.Context) { globalNotificationSys = NewNotificationSys(globalEndpoints) // Create new notification system - globalEventNotifier = NewEventNotifier(GlobalContext) + if globalEventNotifier == nil { + globalEventNotifier = NewEventNotifier(GlobalContext) + } // Create new bucket metadata system. if globalBucketMetadataSys == nil { @@ -566,7 +637,7 @@ func initConfigSubsystem(ctx context.Context, newObject ObjectLayer) error { } // Any other config errors we simply print a message and proceed forward. - logger.LogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err)) + configLogIf(ctx, fmt.Errorf("Unable to initialize config, some features may be missing: %w", err)) } return nil @@ -587,12 +658,7 @@ func setGlobalInternodeInterface(interfaceName string) { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = net.DefaultResolver.LookupHost - } - - haddrs, err := lookupHost(ctx, host) + haddrs, err := globalDNSCache.LookupHost(ctx, host) if err == nil { ip = haddrs[0] } @@ -620,22 +686,15 @@ func getServerListenAddrs() []string { // Use a string set to avoid duplication addrs := set.NewStringSet() // Listen on local interface to receive requests from Console - for _, ip := range mustGetLocalIPs() { - if ip != nil && ip.IsLoopback() { - addrs.Add(net.JoinHostPort(ip.String(), globalMinioPort)) - } + for _, ip := range localLoopbacks.ToSlice() { + addrs.Add(net.JoinHostPort(ip, globalMinioPort)) } host, _ := mustSplitHostPort(globalMinioAddr) if host != "" { ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = net.DefaultResolver.LookupHost - } - - haddrs, err := lookupHost(ctx, host) + haddrs, err := globalDNSCache.LookupHost(ctx, host) if err == nil { for _, addr := range haddrs { addrs.Add(net.JoinHostPort(addr, globalMinioPort)) @@ -650,8 +709,43 @@ func getServerListenAddrs() []string { return addrs.ToSlice() } +var globalLoggerOutput io.WriteCloser + +func initializeLogRotate(ctx *cli.Context) (io.WriteCloser, error) { + lgDir := ctx.String("log-dir") + if lgDir == "" { + return os.Stderr, nil + } + lgDirAbs, err := filepath.Abs(lgDir) + if err != nil { + return nil, err + } + lgSize := ctx.Int("log-size") + + var fileNameFunc func() string + if ctx.IsSet("log-prefix") { + fileNameFunc = func() string { + return fmt.Sprintf("%s-%s.log", ctx.String("log-prefix"), fmt.Sprintf("%X", time.Now().UTC().UnixNano())) + } + } + + output, err := logger.NewDir(logger.Options{ + Directory: lgDirAbs, + MaximumFileSize: int64(lgSize), + Compress: ctx.Bool("log-compress"), + FileNameFunc: fileNameFunc, + }) + if err != nil { + return nil, err + } + logger.EnableJSON() + return output, nil +} + // serverMain handler called for 'minio server' command. func serverMain(ctx *cli.Context) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + var warnings []string signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) @@ -662,16 +756,31 @@ func serverMain(ctx *cli.Context) { // Initialize globalConsoleSys system bootstrapTrace("newConsoleLogger", func() { - globalConsoleSys = NewConsoleLogger(GlobalContext) + output, err := initializeLogRotate(ctx) + if err == nil { + logger.Output = output + globalConsoleSys = NewConsoleLogger(GlobalContext, output) + globalLoggerOutput = output + } else { + logger.Output = os.Stderr + globalConsoleSys = NewConsoleLogger(GlobalContext, os.Stderr) + } logger.AddSystemTarget(GlobalContext, globalConsoleSys) // Set node name, only set for distributed setup. globalConsoleSys.SetNodeName(globalLocalNodeName) + if err != nil { + // We can only log here since we need globalConsoleSys initialized + logger.Fatal(err, "invalid --logrorate-dir option") + } }) // Always load ENV variables from files first. loadEnvVarsFromFiles() + // Handle early server environment vars + serverHandleEarlyEnvVars() + // Handle all server command args and build the disks layout bootstrapTrace("serverHandleCmdArgs", func() { err := buildServerCtxt(ctx, &globalServerCtxt) @@ -686,10 +795,6 @@ func serverMain(ctx *cli.Context) { // Handle all server environment vars. serverHandleEnvVars() - // Load the root credentials from the shell environment or from - // the config file if not defined, set the default one. - loadRootCredentials() - // Perform any self-tests bootstrapTrace("selftests", func() { bitrotSelfTest() @@ -700,6 +805,29 @@ func serverMain(ctx *cli.Context) { // Initialize KMS configuration bootstrapTrace("handleKMSConfig", handleKMSConfig) + // Load the root credentials from the shell environment or from + // the config file if not defined, set the default one. + bootstrapTrace("rootCredentials", func() { + cred := loadRootCredentials() + if !cred.IsValid() && (env.Get(api.EnvAPIRootAccess, config.EnableOn) == config.EnableOff) { + // Generate KMS based credentials if root access is disabled + // and no ENV is set. + cred = autoGenerateRootCredentials() + } + + if !cred.IsValid() { + cred = auth.DefaultCredentials + } + + var err error + globalNodeAuthToken, err = authenticateNode(cred.AccessKey, cred.SecretKey) + if err != nil { + logger.Fatal(err, "Unable to generate internode credentials") + } + + globalActiveCred = cred + }) + // Initialize all help bootstrapTrace("initHelp", initHelp) @@ -718,6 +846,11 @@ func serverMain(ctx *cli.Context) { } } + var getCert certs.GetCertificateFunc + if globalTLSCerts != nil { + getCert = globalTLSCerts.GetCertificate + } + // Check for updates in non-blocking manner. go func() { if !globalServerCtxt.Quiet && !globalInplaceUpdateDisabled { @@ -730,30 +863,31 @@ func serverMain(ctx *cli.Context) { // Set system resources to maximum. bootstrapTrace("setMaxResources", func() { - _ = setMaxResources() + _ = setMaxResources(globalServerCtxt) }) // Verify kernel release and version. if oldLinux() { - warnings = append(warnings, color.YellowBold("- Detected Linux kernel version older than 4.0.0 release, there are some known potential performance problems with this kernel version. MinIO recommends a minimum of 4.x.x linux kernel version for best performance")) + warnings = append(warnings, color.YellowBold("Detected Linux kernel version older than 4.0 release, there are some known potential performance problems with this kernel version. MinIO recommends a minimum of 4.x linux kernel version for best performance")) } maxProcs := runtime.GOMAXPROCS(0) cpuProcs := runtime.NumCPU() if maxProcs < cpuProcs { - warnings = append(warnings, color.YellowBold("- Detected GOMAXPROCS(%d) < NumCPU(%d), please make sure to provide all PROCS to MinIO for optimal performance", maxProcs, cpuProcs)) + warnings = append(warnings, color.YellowBold("Detected GOMAXPROCS(%d) < NumCPU(%d), please make sure to provide all PROCS to MinIO for optimal performance", + maxProcs, cpuProcs)) } - var getCert certs.GetCertificateFunc - if globalTLSCerts != nil { - getCert = globalTLSCerts.GetCertificate - } - - // Initialize gridn + // Initialize grid bootstrapTrace("initGrid", func() { logger.FatalIf(initGlobalGrid(GlobalContext, globalEndpoints), "Unable to configure server grid RPC services") }) + // Initialize lock grid + bootstrapTrace("initLockGrid", func() { + logger.FatalIf(initGlobalLockGrid(GlobalContext, globalEndpoints), "Unable to configure server lock grid RPC services") + }) + // Configure server. bootstrapTrace("configureServer", func() { handler, err := configureServerHandler(globalEndpoints) @@ -761,13 +895,15 @@ func serverMain(ctx *cli.Context) { logger.Fatal(config.ErrUnexpectedError(err), "Unable to configure one of server's RPC services") } // Allow grid to start after registering all services. - xioutil.SafeClose(globalGridStart) + close(globalGridStart) + close(globalLockGridStart) httpServer := xhttp.NewServer(getServerListenAddrs()). UseHandler(setCriticalErrorHandler(corsHandler(handler))). UseTLSConfig(newTLSConfig(getCert)). - UseShutdownTimeout(globalServerCtxt.ShutdownTimeout). UseIdleTimeout(globalServerCtxt.IdleTimeout). + UseReadTimeout(globalServerCtxt.IdleTimeout). + UseWriteTimeout(globalServerCtxt.IdleTimeout). UseReadHeaderTimeout(globalServerCtxt.ReadHeaderTimeout). UseBaseContext(GlobalContext). UseCustomLogger(log.New(io.Discard, "", 0)). // Turn-off random logging by Go stdlib @@ -776,7 +912,7 @@ func serverMain(ctx *cli.Context) { httpServer.TCPOptions.Trace = bootstrapTraceMsg go func() { serveFn, err := httpServer.Init(GlobalContext, func(listenAddr string, err error) { - logger.LogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err)) + bootLogIf(GlobalContext, fmt.Errorf("Unable to listen on `%s`: %v", listenAddr, err)) }) if err != nil { globalHTTPServerErrorCh <- err @@ -797,7 +933,7 @@ func serverMain(ctx *cli.Context) { }) } - if !globalDisableFreezeOnBoot { + if globalEnableSyncBoot { // Freeze the services until the bucket notification subsystem gets initialized. bootstrapTrace("freezeServices", freezeServices) } @@ -811,9 +947,6 @@ func serverMain(ctx *cli.Context) { } }) - xhttp.SetDeploymentID(globalDeploymentID()) - xhttp.SetMinIOVersion(Version) - for _, n := range globalNodes { nodeName := n.Host if n.IsLocal { @@ -823,6 +956,22 @@ func serverMain(ctx *cli.Context) { globalNodeNamesHex[hex.EncodeToString(nodeNameSum[:])] = struct{}{} } + bootstrapTrace("waitForQuorum", func() { + result := newObject.Health(context.Background(), HealthOptions{NoLogging: true}) + for !result.HealthyRead { + if debugNoExit { + logger.Info("Not waiting for quorum since we are debugging.. possible cause unhealthy sets") + logger.Info(result.String()) + break + } + d := time.Duration(r.Float64() * float64(time.Second)) + logger.Info("Waiting for quorum READ healthcheck to succeed retrying in %s.. possible cause unhealthy sets", d) + logger.Info(result.String()) + time.Sleep(d) + result = newObject.Health(context.Background(), HealthOptions{NoLogging: true}) + } + }) + var err error bootstrapTrace("initServerConfig", func() { if err = initServerConfig(GlobalContext, newObject); err != nil { @@ -838,15 +987,15 @@ func serverMain(ctx *cli.Context) { logger.FatalIf(err, "Server startup canceled upon user request") } - logger.LogIf(GlobalContext, err) + bootLogIf(GlobalContext, err) } if !globalServerCtxt.StrictS3Compat { - warnings = append(warnings, color.YellowBold("- Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production")) + warnings = append(warnings, color.YellowBold("Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production")) } }) if globalActiveCred.Equal(auth.DefaultCredentials) { - msg := fmt.Sprintf("- Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables", + msg := fmt.Sprintf("Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables", globalActiveCred) warnings = append(warnings, color.YellowBold(msg)) } @@ -889,12 +1038,11 @@ func serverMain(ctx *cli.Context) { }() go func() { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - if !globalDisableFreezeOnBoot { + if globalEnableSyncBoot { defer bootstrapTrace("unfreezeServices", unfreezeServices) t := time.AfterFunc(5*time.Minute, func() { - warnings = append(warnings, color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please set '_MINIO_DISABLE_API_FREEZE_ON_BOOT=true' to not freeze the APIs")) + warnings = append(warnings, + color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please remove 'MINIO_SYNC_BOOT=on' to not freeze the APIs")) }) defer t.Stop() } @@ -920,44 +1068,38 @@ func serverMain(ctx *cli.Context) { globalTransitionState.Init(newObject) }) - // Initialize batch job pool. - bootstrapTrace("newBatchJobPool", func() { - globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100) - }) - - // Initialize the license update job - bootstrapTrace("initLicenseUpdateJob", func() { - initLicenseUpdateJob(GlobalContext, newObject) - }) - go func() { // Initialize transition tier configuration manager bootstrapTrace("globalTierConfigMgr.Init", func() { if err := globalTierConfigMgr.Init(GlobalContext, newObject); err != nil { - logger.LogIf(GlobalContext, err) + bootLogIf(GlobalContext, err) } }) }() // Initialize bucket notification system. bootstrapTrace("initBucketTargets", func() { - logger.LogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject)) + bootLogIf(GlobalContext, globalEventNotifier.InitBucketTargets(GlobalContext, newObject)) }) - var buckets []BucketInfo + var buckets []string // List buckets to initialize bucket metadata sub-sys. bootstrapTrace("listBuckets", func() { for { - buckets, err = newObject.ListBuckets(GlobalContext, BucketOptions{}) + bucketsList, err := newObject.ListBuckets(GlobalContext, BucketOptions{NoMetadata: true}) if err != nil { if configRetriableErrors(err) { logger.Info("Waiting for list buckets to succeed to initialize buckets.. possible cause (%v)", err) time.Sleep(time.Duration(r.Float64() * float64(time.Second))) continue } - logger.LogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err)) + bootLogIf(GlobalContext, fmt.Errorf("Unable to list buckets to initialize bucket metadata sub-system: %w", err)) } + buckets = make([]string, len(bucketsList)) + for i := range bucketsList { + buckets[i] = bucketsList[i].Name + } break } }) @@ -969,7 +1111,7 @@ func serverMain(ctx *cli.Context) { // initialize replication resync state. bootstrapTrace("initResync", func() { - globalReplicationPool.initResync(GlobalContext, buckets, newObject) + globalReplicationPool.Get().initResync(GlobalContext, buckets, newObject) }) // Initialize site replication manager after bucket metadata @@ -977,11 +1119,6 @@ func serverMain(ctx *cli.Context) { globalSiteReplicationSys.Init(GlobalContext, newObject) }) - // Initialize quota manager. - bootstrapTrace("globalBucketQuotaSys.Init", func() { - globalBucketQuotaSys.Init(newObject) - }) - // Populate existing buckets to the etcd backend if globalDNSConfig != nil { // Background this operation. @@ -990,26 +1127,30 @@ func serverMain(ctx *cli.Context) { }) } + // Initialize batch job pool. + bootstrapTrace("newBatchJobPool", func() { + globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100) + globalBatchJobsMetrics = batchJobMetrics{ + metrics: make(map[string]*batchJobInfo), + } + go globalBatchJobsMetrics.init(GlobalContext, newObject) + go globalBatchJobsMetrics.purgeJobMetrics() + }) + // Prints the formatted startup message, if err is not nil then it prints additional information as well. printStartupMessage(getAPIEndpoints(), err) // Print a warning at the end of the startup banner so it is more noticeable - if newObject.BackendInfo().StandardSCParity == 0 { - warnings = append(warnings, color.YellowBold("- The standard parity is set to 0. This can lead to data loss.")) - } - objAPI := newObjectLayerFn() - if objAPI != nil { - printStorageInfo(objAPI.StorageInfo(GlobalContext, true)) + if newObject.BackendInfo().StandardSCParity == 0 && !globalIsErasureSD { + warnings = append(warnings, color.YellowBold("The standard parity is set to 0. This can lead to data loss.")) } - if len(warnings) > 0 { - logger.Info(color.Yellow("STARTUP WARNINGS:")) - for _, warn := range warnings { - logger.Info(warn) - } + + for _, warn := range warnings { + logger.Warning(warn) } }() - region := globalSite.Region + region := globalSite.Region() if region == "" { region = "us-east-1" } @@ -1017,7 +1158,7 @@ func serverMain(ctx *cli.Context) { globalMinioClient, err = minio.New(globalLocalNodeName, &minio.Options{ Creds: credentials.NewStaticV4(globalActiveCred.AccessKey, globalActiveCred.SecretKey, ""), Secure: globalIsTLS, - Transport: globalProxyTransport, + Transport: globalRemoteTargetTransport, Region: region, }) logger.FatalIf(err, "Unable to initialize MinIO client") diff --git a/cmd/server-main_test.go b/cmd/server-main_test.go index 6ed8506503986..fd397b9f437b9 100644 --- a/cmd/server-main_test.go +++ b/cmd/server-main_test.go @@ -52,7 +52,6 @@ func TestServerConfigFile(t *testing.T) { expectedErr: true, }, } { - testcase := testcase t.Run(testcase.config, func(t *testing.T) { sctx := &serverCtxt{} err := mergeServerCtxtFromConfigFile(testcase.config, sctx) @@ -76,7 +75,7 @@ func TestServerConfigFile(t *testing.T) { // Tests initializing new object layer. func TestNewObjectLayer(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() // Tests for ErasureSD object layer. nDisks := 1 diff --git a/cmd/server-rlimit.go b/cmd/server-rlimit.go index 6a3611b4ef60e..ecb779e17e236 100644 --- a/cmd/server-rlimit.go +++ b/cmd/server-rlimit.go @@ -21,9 +21,10 @@ import ( "runtime" "runtime/debug" + "github.com/dustin/go-humanize" "github.com/minio/madmin-go/v3/kernel" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/sys" + "github.com/minio/pkg/v3/sys" ) func oldLinux() bool { @@ -43,7 +44,7 @@ func oldLinux() bool { return currentKernel < kernel.Version(4, 0, 0) } -func setMaxResources() (err error) { +func setMaxResources(ctx serverCtxt) (err error) { // Set the Go runtime max threads threshold to 90% of kernel setting. sysMaxThreads, err := sys.GetMaxThreads() if err == nil { @@ -70,11 +71,24 @@ func setMaxResources() (err error) { return err } - // Set max memory limit as current memory limit. - if _, maxLimit, err = sys.GetMaxMemoryLimit(); err != nil { + _, vssLimit, err := sys.GetMaxMemoryLimit() + if err != nil { return err } - err = sys.SetMaxMemoryLimit(maxLimit, maxLimit) - return err + if vssLimit > 0 && vssLimit < humanize.GiByte { + logger.Info("WARNING: maximum virtual memory limit (%s) is too small for 'go runtime', please consider setting `ulimit -v` to unlimited", + humanize.IBytes(vssLimit)) + } + + if ctx.MemLimit > 0 { + debug.SetMemoryLimit(int64(ctx.MemLimit)) + } + + // Do not use RLIMIT_AS as that is not useful and at times on systems < 4Gi + // this can crash the Go runtime if the value is smaller refer + // - https://github.com/golang/go/issues/38010 + // - https://github.com/golang/go/issues/43699 + // So do not add `sys.SetMaxMemoryLimit()` this is not useful for any practical purposes. + return nil } diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index ee02b08a54105..ce98d0086b96c 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -23,10 +23,10 @@ import ( "net/url" "strings" - "github.com/minio/madmin-go/v3" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/logger" - xnet "github.com/minio/pkg/v2/net" ) // generates format string depending on the string length and padding. @@ -37,7 +37,11 @@ func getFormatStr(strLen int, padding int) string { // Prints the formatted startup message. func printStartupMessage(apiEndpoints []string, err error) { - logger.Info(color.Bold(MinioBannerName)) + banner := strings.Repeat("-", len(MinioBannerName)) + if globalIsDistErasure { + logger.Startup(color.Bold(banner)) + } + logger.Startup(color.Bold(MinioBannerName)) if err != nil { if globalConsoleSys != nil { globalConsoleSys.Send(GlobalContext, fmt.Sprintf("Server startup failed with '%v', some features may be missing", err)) @@ -47,7 +51,7 @@ func printStartupMessage(apiEndpoints []string, err error) { if !globalSubnetConfig.Registered() { var builder strings.Builder startupBanner(&builder) - logger.Info(builder.String()) + logger.Startup(builder.String()) } strippedAPIEndpoints := stripStandardPorts(apiEndpoints, globalMinioHost) @@ -61,6 +65,9 @@ func printStartupMessage(apiEndpoints []string, err error) { // Prints documentation message. printObjectAPIMsg() + if globalIsDistErasure { + logger.Startup(color.Bold(banner)) + } } // Returns true if input is IPv6 @@ -109,25 +116,25 @@ func printServerCommonMsg(apiEndpoints []string) { cred := globalActiveCred // Get saved region. - region := globalSite.Region + region := globalSite.Region() apiEndpointStr := strings.TrimSpace(strings.Join(apiEndpoints, " ")) // Colorize the message and print. - logger.Info(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr))) + logger.Startup(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr))) if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON && globalAPIConfig.permitRootAccess()) { - logger.Info(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey)) - logger.Info(color.Blue(" RootPass: ") + color.Bold("%s \n", cred.SecretKey)) + logger.Startup(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey)) + logger.Startup(color.Blue(" RootPass: ") + color.Bold("%s \n", cred.SecretKey)) if region != "" { - logger.Info(color.Blue(" Region: ") + color.Bold("%s", fmt.Sprintf(getFormatStr(len(region), 2), region))) + logger.Startup(color.Blue(" Region: ") + color.Bold("%s", fmt.Sprintf(getFormatStr(len(region), 2), region))) } } if globalBrowserEnabled { consoleEndpointStr := strings.Join(stripStandardPorts(getConsoleEndpoints(), globalMinioConsoleHost), " ") - logger.Info(color.Blue("WebUI: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr))) + logger.Startup(color.Blue("WebUI: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr))) if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON && globalAPIConfig.permitRootAccess()) { - logger.Info(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey)) - logger.Info(color.Blue(" RootPass: ") + color.Bold("%s ", cred.SecretKey)) + logger.Startup(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey)) + logger.Startup(color.Blue(" RootPass: ") + color.Bold("%s ", cred.SecretKey)) } } @@ -137,7 +144,7 @@ func printServerCommonMsg(apiEndpoints []string) { // Prints startup message for Object API access, prints link to our SDK documentation. func printObjectAPIMsg() { - logger.Info(color.Blue("\nDocs: ") + "https://min.io/docs/minio/linux/index.html") + logger.Startup(color.Blue("\nDocs: ") + "https://docs.min.io") } func printLambdaTargets() { @@ -146,10 +153,10 @@ func printLambdaTargets() { } arnMsg := color.Blue("Object Lambda ARNs: ") - for _, arn := range globalLambdaTargetList.List(globalSite.Region) { + for _, arn := range globalLambdaTargetList.List(globalSite.Region()) { arnMsg += color.Bold(fmt.Sprintf("%s ", arn)) } - logger.Info(arnMsg + "\n") + logger.Startup(arnMsg + "\n") } // Prints bucket notification configurations. @@ -158,7 +165,7 @@ func printEventNotifiers() { return } - arns := globalEventNotifier.GetARNList(true) + arns := globalEventNotifier.GetARNList() if len(arns) == 0 { return } @@ -168,7 +175,7 @@ func printEventNotifiers() { arnMsg += color.Bold(fmt.Sprintf("%s ", arn)) } - logger.Info(arnMsg + "\n") + logger.Startup(arnMsg + "\n") } // Prints startup message for command line access. Prints link to our documentation @@ -177,39 +184,13 @@ func printCLIAccessMsg(endPoint string, alias string) { // Get saved credentials. cred := globalActiveCred - const mcQuickStartGuide = "https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart" + const mcQuickStartGuide = "https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart" // Configure 'mc', following block prints platform specific information for minio client. if color.IsTerminal() && (!globalServerCtxt.Anonymous && globalAPIConfig.permitRootAccess()) { - logger.Info(color.Blue("\nCLI: ") + mcQuickStartGuide) + logger.Startup(color.Blue("\nCLI: ") + mcQuickStartGuide) mcMessage := fmt.Sprintf("$ mc alias set '%s' '%s' '%s' '%s'", alias, endPoint, cred.AccessKey, cred.SecretKey) - logger.Info(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) - } -} - -// Get formatted disk/storage info message. -func getStorageInfoMsg(storageInfo StorageInfo) string { - var msg string - var mcMessage string - onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks) - if storageInfo.Backend.Type == madmin.Erasure { - if offlineDisks.Sum() > 0 { - mcMessage = "Use `mc admin info` to look for latest server/drive info\n" - } - - diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", onlineDisks.Sum(), offlineDisks.Sum()) - msg += color.Blue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo) - if len(mcMessage) > 0 { - msg = fmt.Sprintf("%s %s", mcMessage, msg) - } - } - return msg -} - -// Prints startup message of storage capacity and erasure information. -func printStorageInfo(storageInfo StorageInfo) { - if msg := getStorageInfoMsg(storageInfo); msg != "" { - logger.Info(msg) + logger.Startup(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage)) } } diff --git a/cmd/server-startup-msg_test.go b/cmd/server-startup-msg_test.go index 019477f849550..08b451827f31d 100644 --- a/cmd/server-startup-msg_test.go +++ b/cmd/server-startup-msg_test.go @@ -21,32 +21,9 @@ import ( "context" "os" "reflect" - "strings" "testing" - - "github.com/minio/madmin-go/v3" ) -// Tests if we generate storage info. -func TestStorageInfoMsg(t *testing.T) { - infoStorage := StorageInfo{} - infoStorage.Disks = []madmin.Disk{ - {Endpoint: "http://127.0.0.1:9000/data/1/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9000/data/2/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9000/data/3/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9000/data/4/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9001/data/1/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9001/data/2/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9001/data/3/", State: madmin.DriveStateOk}, - {Endpoint: "http://127.0.0.1:9001/data/4/", State: madmin.DriveStateOffline}, - } - infoStorage.Backend.Type = madmin.Erasure - - if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "7 Online, 1 Offline") { - t.Fatal("Unexpected storage info message, found:", msg) - } -} - // Tests stripping standard ports from apiEndpoints. func TestStripStandardPorts(t *testing.T) { apiEndpoints := []string{"http://127.0.0.1:9000", "http://127.0.0.2:80", "https://127.0.0.3:443"} @@ -72,7 +49,7 @@ func TestStripStandardPorts(t *testing.T) { // Test printing server common message. func TestPrintServerCommonMessage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -90,7 +67,7 @@ func TestPrintServerCommonMessage(t *testing.T) { // Tests print cli access message. func TestPrintCLIAccessMsg(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -108,7 +85,7 @@ func TestPrintCLIAccessMsg(t *testing.T) { // Test print startup message. func TestPrintStartupMessage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) diff --git a/cmd/server_test.go b/cmd/server_test.go index ef1c5d311d178..e69117351544b 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -35,9 +35,11 @@ import ( "time" "github.com/dustin/go-humanize" + jwtgo "github.com/golang-jwt/jwt/v4" "github.com/minio/minio-go/v7/pkg/set" + "github.com/minio/minio-go/v7/pkg/signer" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // API suite container common to both ErasureSD and Erasure. @@ -58,7 +60,7 @@ type check struct { } // Assert - checks if gotValue is same as expectedValue, if not fails the test. -func (c *check) Assert(gotValue interface{}, expectedValue interface{}) { +func (c *check) Assert(gotValue any, expectedValue any) { c.Helper() if !reflect.DeepEqual(gotValue, expectedValue) { c.Fatalf("Test %s expected %v, got %v", c.testType, expectedValue, gotValue) @@ -107,6 +109,7 @@ func runAllTests(suite *TestSuiteCommon, c *check) { suite.TestListObjectsHandler(c) suite.TestListObjectVersionsOutputOrderHandler(c) suite.TestListObjectsHandlerErrors(c) + suite.TestListObjectsV2HadoopUAHandler(c) suite.TestPutBucketErrors(c) suite.TestGetObjectLarge10MiB(c) suite.TestGetObjectLarge11MiB(c) @@ -121,6 +124,10 @@ func runAllTests(suite *TestSuiteCommon, c *check) { suite.TestObjectMultipartListError(c) suite.TestObjectValidMD5(c) suite.TestObjectMultipart(c) + suite.TestMetricsV3Handler(c) + suite.TestBucketSQSNotificationWebHook(c) + suite.TestBucketSQSNotificationAMQP(c) + suite.TestUnsignedCVE(c) suite.TearDownSuite(c) } @@ -188,6 +195,36 @@ func (s *TestSuiteCommon) TearDownSuite(c *check) { s.testServer.Stop() } +const ( + defaultPrometheusJWTExpiry = 100 * 365 * 24 * time.Hour +) + +func (s *TestSuiteCommon) TestMetricsV3Handler(c *check) { + jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.StandardClaims{ + ExpiresAt: time.Now().UTC().Add(defaultPrometheusJWTExpiry).Unix(), + Subject: s.accessKey, + Issuer: "prometheus", + }) + + token, err := jwt.SignedString([]byte(s.secretKey)) + c.Assert(err, nil) + + for _, cpath := range globalMetricsV3CollectorPaths { + request, err := newTestSignedRequest(http.MethodGet, s.endPoint+minioReservedBucketPath+metricsV3Path+string(cpath), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + + request.Header.Set("Authorization", "Bearer "+token) + + // execute the request. + response, err := s.client.Do(request) + c.Assert(err, nil) + + // assert the http response status code. + c.Assert(response.StatusCode, http.StatusOK) + } +} + func (s *TestSuiteCommon) TestBucketSQSNotificationWebHook(c *check) { // Sample bucket notification. bucketNotificationBuf := `s3:ObjectCreated:Putprefiximages/1arn:minio:sqs:us-east-1:444455556666:webhook` @@ -319,6 +356,59 @@ func (s *TestSuiteCommon) TestObjectDir(c *check) { c.Assert(response.StatusCode, http.StatusNoContent) } +func (s *TestSuiteCommon) TestUnsignedCVE(c *check) { + c.Helper() + + // generate a random bucket Name. + bucketName := getRandomBucketName() + + // HTTP request to create the bucket. + request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + + // execute the request. + response, err := s.client.Do(request) + c.Assert(err, nil) + + // assert the http response status code. + c.Assert(response.StatusCode, http.StatusOK) + + req, err := http.NewRequest(http.MethodPut, getPutObjectURL(s.endPoint, bucketName, "test-cve-object.txt"), nil) + c.Assert(err, nil) + + req.Body = io.NopCloser(bytes.NewReader([]byte("foobar!\n"))) + req.Trailer = http.Header{} + req.Trailer.Set("x-amz-checksum-crc32", "rK0DXg==") + + now := UTCNow() + + req = signer.StreamingUnsignedV4(req, "", 8, now) + + maliciousHeaders := http.Header{ + "Authorization": []string{fmt.Sprintf("AWS4-HMAC-SHA256 Credential=%s/%s/us-east-1/s3/aws4_request, SignedHeaders=invalidheader, Signature=deadbeefdeadbeefdeadbeeddeadbeeddeadbeefdeadbeefdeadbeefdeadbeef", s.accessKey, now.Format(yyyymmdd))}, + "User-Agent": []string{"A malicious request"}, + "X-Amz-Decoded-Content-Length": []string{"8"}, + "Content-Encoding": []string{"aws-chunked"}, + "X-Amz-Trailer": []string{"x-amz-checksum-crc32"}, + "x-amz-content-sha256": []string{unsignedPayloadTrailer}, + } + + for k, v := range maliciousHeaders { + req.Header.Set(k, v[0]) + } + + // execute the request. + response, err = s.client.Do(req) + c.Assert(err, nil) + + // out, err = httputil.DumpResponse(response, true) + // fmt.Println("RESPONSE ===\n", string(out), err) + + // assert the http response status code. + c.Assert(response.StatusCode, http.StatusBadRequest) +} + func (s *TestSuiteCommon) TestBucketSQSNotificationAMQP(c *check) { // Sample bucket notification. bucketNotificationBuf := `s3:ObjectCreated:Putprefiximages/1arn:minio:sqs:us-east-1:444455556666:amqp` @@ -563,7 +653,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { delObjReq := DeleteObjectsRequest{ Quiet: false, } - for i := 0; i < 10; i++ { + for i := range 10 { // Obtain http request to upload object. // object Name contains a prefix. objName := fmt.Sprintf("%d/%s", i, objectName) @@ -600,7 +690,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { c.Assert(err, nil) err = xml.Unmarshal(delRespBytes, &deleteResp) c.Assert(err, nil) - for i := 0; i < 10; i++ { + for i := range 10 { // All the objects should be under deleted list (including non-existent object) c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ ObjectName: delObjReq.Objects[i].ObjectName, @@ -624,7 +714,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) { err = xml.Unmarshal(delRespBytes, &deleteResp) c.Assert(err, nil) c.Assert(len(deleteResp.DeletedObjects), len(delObjReq.Objects)) - for i := 0; i < 10; i++ { + for i := range 10 { c.Assert(deleteResp.DeletedObjects[i], DeletedObject{ ObjectName: delObjReq.Objects[i].ObjectName, VersionID: delObjReq.Objects[i].VersionID, @@ -964,7 +1054,7 @@ func (s *TestSuiteCommon) TestPutBucket(c *check) { // The purpose this block is not to check for correctness of functionality // Run the test with -race flag to utilize this var wg sync.WaitGroup - for i := 0; i < testConcurrencyLevel; i++ { + for range testConcurrencyLevel { wg.Add(1) go func() { defer wg.Done() @@ -1586,7 +1676,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *check) { c.Assert(err, nil) c.Assert(response.StatusCode, http.StatusOK) - for _, objectName := range []string{"foo bar 1", "foo bar 2"} { + for _, objectName := range []string{"foo bar 1", "foo bar 2", "obj2", "obj2/"} { buffer := bytes.NewReader([]byte("Hello World")) request, err = newTestSignedRequest(http.MethodPut, getPutObjectURL(s.endPoint, bucketName, objectName), int64(buffer.Len()), buffer, s.accessKey, s.secretKey, s.signer) @@ -1604,26 +1694,189 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *check) { {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", ""), []string{"foo bar 1", "foo bar 2"}}, {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", "url"), []string{"foo+bar+1", "foo+bar+2"}}, { - getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", ""), + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "", ""), []string{ "foo bar 1", "foo bar 2", }, }, { - getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", ""), + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", "", ""), []string{ "foo bar 1", "foo bar 2", fmt.Sprintf("%sminio", globalMinioDefaultOwnerID), }, }, - {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "url"), []string{"foo+bar+1", "foo+bar+2"}}, + {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "url", ""), []string{"foo+bar+1", "foo+bar+2"}}, + { + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "", ""), + []string{ + "obj2", + "obj2/", + }, + }, + } + + for _, testCase := range testCases { + // create listObjectsV1 request with valid parameters + request, err = newTestSignedRequest(http.MethodGet, testCase.getURL, 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + // execute the HTTP request. + response, err = s.client.Do(request) + c.Assert(err, nil) + c.Assert(response.StatusCode, http.StatusOK) + + getContent, err := io.ReadAll(response.Body) + c.Assert(err, nil) + + for _, expectedStr := range testCase.expectedStrings { + c.Assert(strings.Contains(string(getContent), expectedStr), true) + } } +} + +// TestListObjectsV2HadoopUAHandler - Test ListObjectsV2 call with max-keys=2 and Hadoop User-Agent +func (s *TestSuiteCommon) TestListObjectsV2HadoopUAHandler(c *check) { + // generate a random bucket name. + bucketName := getRandomBucketName() + // HTTP request to create the bucket. + request, err := newTestSignedRequest(http.MethodPut, getMakeBucketURL(s.endPoint, bucketName), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + // execute the HTTP request to create bucket. + response, err := s.client.Do(request) + c.Assert(err, nil) + c.Assert(response.StatusCode, http.StatusOK) + + // enable versioning on the bucket. + enableVersioningBody := []byte("Enabled") + enableVersioningBucketRequest, err := newTestSignedRequest(http.MethodPut, getBucketVersioningConfigURL(s.endPoint, bucketName), + int64(len(enableVersioningBody)), bytes.NewReader(enableVersioningBody), s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + // execute the HTTP request to create bucket. + response, err = s.client.Do(enableVersioningBucketRequest) + c.Assert(err, nil) + c.Assert(response.StatusCode, http.StatusOK) + + for _, objectName := range []string{"pfx/a/1.txt", "pfx/b/2.txt", "pfx/", "pfx2/c/3.txt", "pfx2/d/3.txt", "pfx1/1.txt", "pfx2/", "pfx3/", "pfx4/"} { + buffer := bytes.NewReader([]byte("")) + request, err = newTestSignedRequest(http.MethodPut, getPutObjectURL(s.endPoint, bucketName, objectName), + int64(buffer.Len()), buffer, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + response, err = s.client.Do(request) + c.Assert(err, nil) + c.Assert(response.StatusCode, http.StatusOK) + } + for _, objectName := range []string{"pfx2/c/3.txt", "pfx2/d/3.txt", "pfx2/", "pfx3/"} { + delRequest, err := newTestSignedRequest(http.MethodDelete, getDeleteObjectURL(s.endPoint, bucketName, objectName), + 0, nil, s.accessKey, s.secretKey, s.signer) + c.Assert(err, nil) + response, err = s.client.Do(delRequest) + c.Assert(err, nil) + c.Assert(response.StatusCode, http.StatusNoContent) + } + testCases := []struct { + getURL string + expectedStrings []string + userAgent string + }{ + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx/a/", "2", "", "", "/"), + []string{ + "pfx/a/1.txt", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx/a/", "2", "", "", "/"), + []string{ + "pfx/a/", + "pfx/a/1.txt", + }, + "", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/c", "2", "true", "", "/"), + []string{ + "pfx2/c12/falsepfx2/c/", + }, + "", + }, + + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/c", "2", "true", "", "/"), + []string{ + "pfx2/c12/false", + "pfx2/c/", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/c", "2", "true", "", "/"), + []string{ + "pfx2/c12/false", + "pfx2/c/", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/", "2", "false", "", "/"), + []string{ + "pfx2/22/false", + "pfx2/c/pfx2/d/", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/", "2", "false", "", "/"), + []string{ + "pfx2/c/", + }, + "", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx2/", "2", "false", "", "/"), + []string{ + "pfx2/22/false", + "pfx2/c/pfx2/d/", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx3/", "2", "false", "", "/"), + []string{ + "pfx3/02/false", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx4/", "2", "false", "", "/"), + []string{ + "pfx4/12/falsepfx4/", + }, + "Hadoop 3.3.2, aws-sdk-java/1.12.262 Linux/5.14.0-362.24.1.el9_3.x86_64 OpenJDK_64-Bit_Server_VM/11.0.22+7 java/11.0.22 scala/2.12.15 vendor/Eclipse_Adoptium cfg/retry-mode/legacy", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx3/", "2", "false", "", "/"), + []string{ + "pfx3/02/false", + }, + "", + }, + { + getListObjectsV2URL(s.endPoint, bucketName, "pfx4/", "2", "false", "", "/"), + []string{ + "pfx4/12/falsepfx4/", + }, + "", + }, + } for _, testCase := range testCases { // create listObjectsV1 request with valid parameters request, err = newTestSignedRequest(http.MethodGet, testCase.getURL, 0, nil, s.accessKey, s.secretKey, s.signer) + request.Header.Set("User-Agent", testCase.userAgent) c.Assert(err, nil) // execute the HTTP request. response, err = s.client.Do(request) @@ -1740,7 +1993,7 @@ func (s *TestSuiteCommon) TestListObjectsSpecialCharactersHandler(c *check) { {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", ""), []string{"foo bar 1", "foo bar 2", "foo  bar"}}, {getListObjectsV1URL(s.endPoint, bucketName, "", "1000", "url"), []string{"foo+bar+1", "foo+bar+2", "foo+%01+bar"}}, { - getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", ""), + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "", ""), []string{ "foo bar 1", "foo bar 2", @@ -1749,7 +2002,7 @@ func (s *TestSuiteCommon) TestListObjectsSpecialCharactersHandler(c *check) { }, }, { - getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", ""), + getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "true", "", ""), []string{ "foo bar 1", "foo bar 2", @@ -1757,7 +2010,7 @@ func (s *TestSuiteCommon) TestListObjectsSpecialCharactersHandler(c *check) { fmt.Sprintf("%sminio", globalMinioDefaultOwnerID), }, }, - {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "url"), []string{"foo+bar+1", "foo+bar+2", "foo+%01+bar"}}, + {getListObjectsV2URL(s.endPoint, bucketName, "", "1000", "", "url", ""), []string{"foo+bar+1", "foo+bar+2", "foo+%01+bar"}}, } for _, testCase := range testCases { @@ -1804,7 +2057,7 @@ func (s *TestSuiteCommon) TestListObjectsHandlerErrors(c *check) { verifyError(c, response, "InvalidArgument", "Argument maxKeys must be an integer between 0 and 2147483647", http.StatusBadRequest) // create listObjectsV2 request with invalid value of max-keys parameter. max-keys is set to -2. - request, err = newTestSignedRequest(http.MethodGet, getListObjectsV2URL(s.endPoint, bucketName, "", "-2", "", ""), + request, err = newTestSignedRequest(http.MethodGet, getListObjectsV2URL(s.endPoint, bucketName, "", "-2", "", "", ""), 0, nil, s.accessKey, s.secretKey, s.signer) c.Assert(err, nil) // execute the HTTP request. @@ -1874,7 +2127,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge10MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,1234567890,1234567890,123"` // Create 10MiB content where each line contains 1024 characters. - for i := 0; i < 10*1024; i++ { + for i := range 10 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putContent := buffer.String() @@ -1936,7 +2189,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,123` // Create 11MiB content where each line contains 1024 characters. - for i := 0; i < 11*1024; i++ { + for i := range 11 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putMD5 := getMD5Hash(buffer.Bytes()) @@ -2087,7 +2340,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge11MiB(c *check) { 1234567890,1234567890,1234567890,123` // Create 11MiB content where each line contains 1024 // characters. - for i := 0; i < 11*1024; i++ { + for i := range 11 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } putContent := buffer.String() @@ -2153,7 +2406,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge10MiB(c *check) { 1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890,1234567890, 1234567890,1234567890,1234567890,123` // Create 10MiB content where each line contains 1024 characters. - for i := 0; i < 10*1024; i++ { + for i := range 10 * 1024 { buffer.WriteString(fmt.Sprintf("[%05d] %s\n", i, line)) } diff --git a/cmd/sftp-server-driver.go b/cmd/sftp-server-driver.go index a713c30e38d1b..3ce7c0b43adf1 100644 --- a/cmd/sftp-server-driver.go +++ b/cmd/sftp-server-driver.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -23,7 +23,9 @@ import ( "errors" "fmt" "io" + "net/http" "os" + "path" "strings" "sync" "time" @@ -31,16 +33,20 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio/internal/auth" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/v3/mimedb" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) +// Maximum write offset for incoming SFTP blocks. +// Set to 100MiB to prevent hostile DOS attacks. +const ftpMaxWriteOffset = 100 << 20 + type sftpDriver struct { permissions *ssh.Permissions endpoint string + remoteIP string } //msgp:ignore sftpMetrics @@ -48,7 +54,7 @@ type sftpMetrics struct{} var globalSftpMetrics sftpMetrics -func sftpTrace(s *sftp.Request, startTime time.Time, source string, user string, err error) madmin.TraceInfo { +func sftpTrace(s *sftp.Request, startTime time.Time, source string, user string, err error, sz int64) madmin.TraceInfo { var errStr string if err != nil { errStr = err.Error() @@ -57,18 +63,25 @@ func sftpTrace(s *sftp.Request, startTime time.Time, source string, user string, TraceType: madmin.TraceFTP, Time: startTime, NodeName: globalLocalNodeName, - FuncName: fmt.Sprintf("sftp USER=%s COMMAND=%s PARAM=%s, Source=%s", user, s.Method, s.Filepath, source), + FuncName: s.Method, Duration: time.Since(startTime), Path: s.Filepath, Error: errStr, + Bytes: sz, + Custom: map[string]string{ + "user": user, + "cmd": s.Method, + "param": s.Filepath, + "source": source, + }, } } -func (m *sftpMetrics) log(s *sftp.Request, user string) func(err error) { +func (m *sftpMetrics) log(s *sftp.Request, user string) func(sz int64, err error) { startTime := time.Now() source := getSource(2) - return func(err error) { - globalTrace.Publish(sftpTrace(s, startTime, source, user, err)) + return func(sz int64, err error) { + globalTrace.Publish(sftpTrace(s, startTime, source, user, err, sz)) } } @@ -78,8 +91,12 @@ func (m *sftpMetrics) log(s *sftp.Request, user string) func(err error) { // - sftp.Filewrite // - sftp.Filelist // - sftp.Filecmd -func NewSFTPDriver(perms *ssh.Permissions) sftp.Handlers { - handler := &sftpDriver{endpoint: fmt.Sprintf("127.0.0.1:%s", globalMinioPort), permissions: perms} +func NewSFTPDriver(perms *ssh.Permissions, remoteIP string) sftp.Handlers { + handler := &sftpDriver{ + endpoint: fmt.Sprintf("127.0.0.1:%s", globalMinioPort), + permissions: perms, + remoteIP: remoteIP, + } return sftp.Handlers{ FileGet: handler, FilePut: handler, @@ -88,101 +105,43 @@ func NewSFTPDriver(perms *ssh.Permissions) sftp.Handlers { } } -func (f *sftpDriver) getMinIOClient() (*minio.Client, error) { - ui, ok := globalIAMSys.GetUser(context.Background(), f.AccessKey()) - if !ok && !globalIAMSys.LDAPConfig.Enabled() { - return nil, errNoSuchUser - } - if !ok && globalIAMSys.LDAPConfig.Enabled() { - sa, _, err := globalIAMSys.getServiceAccount(context.Background(), f.AccessKey()) - if err != nil && !errors.Is(err, errNoSuchServiceAccount) { - return nil, err - } - var mcreds *credentials.Credentials - if errors.Is(err, errNoSuchServiceAccount) { - targetUser, targetGroups, err := globalIAMSys.LDAPConfig.LookupUserDN(f.AccessKey()) - if err != nil { - return nil, err - } - expiryDur, err := globalIAMSys.LDAPConfig.GetExpiryDuration("") - if err != nil { - return nil, err - } - claims := make(map[string]interface{}) - claims[expClaim] = UTCNow().Add(expiryDur).Unix() - for k, v := range f.permissions.CriticalOptions { - claims[k] = v - } - - cred, err := auth.GetNewCredentialsWithMetadata(claims, globalActiveCred.SecretKey) - if err != nil { - return nil, err - } - - // Set the parent of the temporary access key, this is useful - // in obtaining service accounts by this cred. - cred.ParentUser = targetUser - - // Set this value to LDAP groups, LDAP user can be part - // of large number of groups - cred.Groups = targetGroups - - // Set the newly generated credentials, policyName is empty on purpose - // LDAP policies are applied automatically using their ldapUser, ldapGroups - // mapping. - updatedAt, err := globalIAMSys.SetTempUser(context.Background(), cred.AccessKey, cred, "") - if err != nil { - return nil, err - } - - // Call hook for site replication. - logger.LogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ - Type: madmin.SRIAMItemSTSAcc, - STSCredential: &madmin.SRSTSCredential{ - AccessKey: cred.AccessKey, - SecretKey: cred.SecretKey, - SessionToken: cred.SessionToken, - ParentUser: cred.ParentUser, - }, - UpdatedAt: updatedAt, - })) - - mcreds = credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken) - } else { - mcreds = credentials.NewStaticV4(sa.Credentials.AccessKey, sa.Credentials.SecretKey, "") - } - - return minio.New(f.endpoint, &minio.Options{ - Creds: mcreds, - Secure: globalIsTLS, - Transport: globalRemoteFTPClientTransport, - }) - } +type forwardForTransport struct { + tr http.RoundTripper + fwd string +} - // ok == true - at this point +func (f forwardForTransport) RoundTrip(r *http.Request) (*http.Response, error) { + r.Header.Set("X-Forwarded-For", f.fwd) + return f.tr.RoundTrip(r) +} - if ui.Credentials.IsTemp() { - // Temporary credentials are not allowed. - return nil, errAuthentication +func (f *sftpDriver) getMinIOClient() (*minio.Client, error) { + mcreds := credentials.NewStaticV4( + f.permissions.CriticalOptions["AccessKey"], + f.permissions.CriticalOptions["SecretKey"], + f.permissions.CriticalOptions["SessionToken"], + ) + // Set X-Forwarded-For on all requests. + tr := http.RoundTripper(globalRemoteFTPClientTransport) + if f.remoteIP != "" { + tr = forwardForTransport{tr: tr, fwd: f.remoteIP} } - return minio.New(f.endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(ui.Credentials.AccessKey, ui.Credentials.SecretKey, ""), - Secure: globalIsTLS, - Transport: globalRemoteFTPClientTransport, + TrailingHeaders: true, + Creds: mcreds, + Secure: globalIsTLS, + Transport: tr, }) } func (f *sftpDriver) AccessKey() string { - if _, ok := f.permissions.CriticalOptions["accessKey"]; !ok { - return f.permissions.CriticalOptions[ldapUserN] - } - return f.permissions.CriticalOptions["accessKey"] + return f.permissions.CriticalOptions["AccessKey"] } func (f *sftpDriver) Fileread(r *sftp.Request) (ra io.ReaderAt, err error) { + // This is not timing the actual read operation, but the time it takes to prepare the reader. stopFn := globalSftpMetrics.log(r, f.AccessKey()) - defer stopFn(err) + defer stopFn(0, err) flags := r.Pflags() if !flags.Read { @@ -260,6 +219,9 @@ func (w *writerAt) WriteAt(b []byte, offset int64) (n int, err error) { n, err = w.w.Write(b) w.nextOffset += int64(n) } else { + if offset > w.nextOffset+ftpMaxWriteOffset { + return 0, fmt.Errorf("write offset %d is too far ahead of next offset %d", offset, w.nextOffset) + } w.buffer[offset] = make([]byte, len(b)) copy(w.buffer[offset], b) n = len(b) @@ -285,7 +247,12 @@ again: func (f *sftpDriver) Filewrite(r *sftp.Request) (w io.WriterAt, err error) { stopFn := globalSftpMetrics.log(r, f.AccessKey()) - defer stopFn(err) + defer func() { + if err != nil { + // If there is an error, we never started the goroutine. + stopFn(0, err) + } + }() flags := r.Pflags() if !flags.Write { @@ -320,7 +287,12 @@ func (f *sftpDriver) Filewrite(r *sftp.Request) (w io.WriterAt, err error) { } wa.wg.Add(1) go func() { - _, err := clnt.PutObject(r.Context(), bucket, object, pr, -1, minio.PutObjectOptions{SendContentMd5: true}) + oi, err := clnt.PutObject(r.Context(), bucket, object, pr, -1, minio.PutObjectOptions{ + ContentType: mimedb.TypeByExtension(path.Ext(object)), + DisableContentSha256: true, + Checksum: minio.ChecksumFullObjectCRC32C, + }) + stopFn(oi.Size, err) pr.CloseWithError(err) wa.wg.Done() }() @@ -329,7 +301,7 @@ func (f *sftpDriver) Filewrite(r *sftp.Request) (w io.WriterAt, err error) { func (f *sftpDriver) Filecmd(r *sftp.Request) (err error) { stopFn := globalSftpMetrics.log(r, f.AccessKey()) - defer stopFn(err) + defer stopFn(0, err) clnt, err := f.getMinIOClient() if err != nil { @@ -338,7 +310,7 @@ func (f *sftpDriver) Filecmd(r *sftp.Request) (err error) { switch r.Method { case "Setstat", "Rename", "Link", "Symlink": - return NotImplemented{} + return sftp.ErrSSHFxOpUnsupported case "Rmdir": bucket, prefix := path2BucketObject(r.Filepath) @@ -394,16 +366,13 @@ func (f *sftpDriver) Filecmd(r *sftp.Request) (err error) { } if prefix == "" { - return clnt.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: globalSite.Region}) + return clnt.MakeBucket(context.Background(), bucket, minio.MakeBucketOptions{Region: globalSite.Region()}) } dirPath := buildMinioDir(prefix) _, err = clnt.PutObject(context.Background(), bucket, dirPath, bytes.NewReader([]byte("")), 0, - // Always send Content-MD5 to succeed with bucket with - // locking enabled. There is no performance hit since - // this is always an empty object - minio.PutObjectOptions{SendContentMd5: true}, + minio.PutObjectOptions{DisableContentSha256: true}, ) return err } @@ -428,7 +397,7 @@ func (f listerAt) ListAt(ls []os.FileInfo, offset int64) (int, error) { func (f *sftpDriver) Filelist(r *sftp.Request) (la sftp.ListerAt, err error) { stopFn := globalSftpMetrics.log(r, f.AccessKey()) - defer stopFn(err) + defer stopFn(0, err) clnt, err := f.getMinIOClient() if err != nil { diff --git a/cmd/sftp-server.go b/cmd/sftp-server.go index ee9331aa5e432..0a0164a4b35fd 100644 --- a/cmd/sftp-server.go +++ b/cmd/sftp-server.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -28,12 +28,325 @@ import ( "strings" "time" + "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/logger" - xsftp "github.com/minio/pkg/v2/sftp" + xldap "github.com/minio/pkg/v3/ldap" + xsftp "github.com/minio/pkg/v3/sftp" "github.com/pkg/sftp" "golang.org/x/crypto/ssh" ) +const ( + kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" + kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" + kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256" + kexAlgoDH16SHA512 = "diffie-hellman-group16-sha512" + kexAlgoECDH256 = "ecdh-sha2-nistp256" + kexAlgoECDH384 = "ecdh-sha2-nistp384" + kexAlgoECDH521 = "ecdh-sha2-nistp521" + kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org" + kexAlgoCurve25519SHA256 = "curve25519-sha256" + + chacha20Poly1305ID = "chacha20-poly1305@openssh.com" + gcm256CipherID = "aes256-gcm@openssh.com" + aes128cbcID = "aes128-cbc" + tripledescbcID = "3des-cbc" +) + +var ( + errSFTPPublicKeyBadFormat = errors.New("the public key provided could not be parsed") + errSFTPUserHasNoPolicies = errors.New("no policies present on this account") + errSFTPLDAPNotEnabled = errors.New("ldap authentication is not enabled") +) + +// if the sftp parameter --trusted-user-ca-key is set, then +// the final form of the key file will be set as this variable. +var globalSFTPTrustedCAPubkey ssh.PublicKey + +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=46 +// preferredKexAlgos specifies the default preference for key-exchange +// algorithms in preference order. The diffie-hellman-group16-sha512 algorithm +// is disabled by default because it is a bit slower than the others. +var preferredKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH14SHA1, +} + +// supportedKexAlgos specifies the supported key-exchange algorithms in +// preference order. +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=44 +var supportedKexAlgos = []string{ + kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH, + // P384 and P521 are not constant-time yet, but since we don't + // reuse ephemeral keys, using them for ECDH should be OK. + kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, + kexAlgoDH14SHA256, kexAlgoDH16SHA512, kexAlgoDH14SHA1, + kexAlgoDH1SHA1, +} + +// supportedPubKeyAuthAlgos specifies the supported client public key +// authentication algorithms. Note that this doesn't include certificate types +// since those use the underlying algorithm. This list is sent to the client if +// it supports the server-sig-algs extension. Order is irrelevant. +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=142 +var supportedPubKeyAuthAlgos = []string{ + ssh.KeyAlgoED25519, + ssh.KeyAlgoSKED25519, ssh.KeyAlgoSKECDSA256, + ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521, + ssh.KeyAlgoRSASHA256, ssh.KeyAlgoRSASHA512, ssh.KeyAlgoRSA, + ssh.KeyAlgoDSA, +} + +// supportedCiphers lists ciphers we support but might not recommend. +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=28 +var supportedCiphers = []string{ + "aes128-ctr", "aes192-ctr", "aes256-ctr", + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "arcfour256", "arcfour128", "arcfour", + aes128cbcID, + tripledescbcID, +} + +// preferredCiphers specifies the default preference for ciphers. +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=37 +var preferredCiphers = []string{ + "aes128-gcm@openssh.com", gcm256CipherID, + chacha20Poly1305ID, + "aes128-ctr", "aes192-ctr", "aes256-ctr", +} + +// supportedMACs specifies a default set of MAC algorithms in preference order. +// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed +// because they have reached the end of their useful life. +// https://cs.opensource.google/go/x/crypto/+/refs/tags/v0.22.0:ssh/common.go;l=85 +var supportedMACs = []string{ + "hmac-sha2-256-etm@openssh.com", "hmac-sha2-512-etm@openssh.com", "hmac-sha2-256", "hmac-sha2-512", "hmac-sha1", "hmac-sha1-96", +} + +func sshPubKeyAuth(c ssh.ConnMetadata, key ssh.PublicKey) (*ssh.Permissions, error) { + return authenticateSSHConnection(c, key, nil) +} + +func sshPasswordAuth(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { + return authenticateSSHConnection(c, nil, pass) +} + +func authenticateSSHConnection(c ssh.ConnMetadata, key ssh.PublicKey, pass []byte) (*ssh.Permissions, error) { + user, found := strings.CutSuffix(c.User(), "=ldap") + if found { + if !globalIAMSys.LDAPConfig.Enabled() { + return nil, errSFTPLDAPNotEnabled + } + return processLDAPAuthentication(key, pass, user) + } + + user, found = strings.CutSuffix(c.User(), "=svc") + if found { + goto internalAuth + } + + if globalIAMSys.LDAPConfig.Enabled() { + perms, _ := processLDAPAuthentication(key, pass, user) + if perms != nil { + return perms, nil + } + } + +internalAuth: + ui, ok := globalIAMSys.GetUser(context.Background(), user) + if !ok { + return nil, errNoSuchUser + } + + if globalSFTPTrustedCAPubkey != nil && pass == nil { + err := validateClientKeyIsTrusted(c, key) + if err != nil { + return nil, errAuthentication + } + } else { + // Temporary credentials are not allowed. + if ui.Credentials.IsTemp() { + return nil, errAuthentication + } + if subtle.ConstantTimeCompare([]byte(ui.Credentials.SecretKey), pass) != 1 { + return nil, errAuthentication + } + } + + copts := map[string]string{ + "AccessKey": ui.Credentials.AccessKey, + "SecretKey": ui.Credentials.SecretKey, + } + if ui.Credentials.IsTemp() { + copts["SessionToken"] = ui.Credentials.SessionToken + } + + return &ssh.Permissions{ + CriticalOptions: copts, + Extensions: make(map[string]string), + }, nil +} + +func processLDAPAuthentication(key ssh.PublicKey, pass []byte, user string) (perms *ssh.Permissions, err error) { + var lookupResult *xldap.DNSearchResult + var targetGroups []string + + if pass == nil && key == nil { + return nil, errAuthentication + } + + if pass != nil { + sa, _, err := globalIAMSys.getServiceAccount(context.Background(), user) + if err == nil { + if subtle.ConstantTimeCompare([]byte(sa.Credentials.SecretKey), pass) != 1 { + return nil, errAuthentication + } + + return &ssh.Permissions{ + CriticalOptions: map[string]string{ + "AccessKey": sa.Credentials.AccessKey, + "SecretKey": sa.Credentials.SecretKey, + }, + Extensions: make(map[string]string), + }, nil + } + + if !errors.Is(err, errNoSuchServiceAccount) { + return nil, err + } + + lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.Bind(user, string(pass)) + if err != nil { + return nil, err + } + } else if key != nil { + lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(user) + if err != nil { + return nil, err + } + } + + if lookupResult == nil { + return nil, errNoSuchUser + } + + ldapPolicies, _ := globalIAMSys.PolicyDBGet(lookupResult.NormDN, targetGroups...) + if len(ldapPolicies) == 0 { + return nil, errSFTPUserHasNoPolicies + } + + claims := make(map[string]any) + for attribKey, attribValue := range lookupResult.Attributes { + // we skip multi-value attributes here, as they cannot + // be stored in the critical options. + if len(attribValue) != 1 { + continue + } + + if attribKey == "sshPublicKey" && key != nil { + key2, _, _, _, err := ssh.ParseAuthorizedKey([]byte(attribValue[0])) + if err != nil { + return nil, errSFTPPublicKeyBadFormat + } + + if subtle.ConstantTimeCompare(key2.Marshal(), key.Marshal()) != 1 { + return nil, errAuthentication + } + } + // Save each attribute to claims. + claims[ldapAttribPrefix+attribKey] = attribValue[0] + } + + if key != nil { + // If a key was provided, we expect the user to have an sshPublicKey + // attribute. + if _, ok := claims[ldapAttribPrefix+"sshPublicKey"]; !ok { + return nil, errAuthentication + } + } + + expiryDur, err := globalIAMSys.LDAPConfig.GetExpiryDuration("") + if err != nil { + return nil, err + } + + claims[expClaim] = UTCNow().Add(expiryDur).Unix() + claims[ldapUserN] = user + claims[ldapUser] = lookupResult.NormDN + + cred, err := auth.GetNewCredentialsWithMetadata(claims, globalActiveCred.SecretKey) + if err != nil { + return nil, err + } + + // Set the parent of the temporary access key, this is useful + // in obtaining service accounts by this cred. + cred.ParentUser = lookupResult.NormDN + + // Set this value to LDAP groups, LDAP user can be part + // of large number of groups + cred.Groups = targetGroups + + // Set the newly generated credentials, policyName is empty on purpose + // LDAP policies are applied automatically using their ldapUser, ldapGroups + // mapping. + updatedAt, err := globalIAMSys.SetTempUser(context.Background(), cred.AccessKey, cred, "") + if err != nil { + return nil, err + } + + replLogIf(context.Background(), globalSiteReplicationSys.IAMChangeHook(context.Background(), madmin.SRIAMItem{ + Type: madmin.SRIAMItemSTSAcc, + STSCredential: &madmin.SRSTSCredential{ + AccessKey: cred.AccessKey, + SecretKey: cred.SecretKey, + SessionToken: cred.SessionToken, + ParentUser: cred.ParentUser, + }, + UpdatedAt: updatedAt, + })) + + return &ssh.Permissions{ + CriticalOptions: map[string]string{ + "AccessKey": cred.AccessKey, + "SecretKey": cred.SecretKey, + "SessionToken": cred.SessionToken, + }, + Extensions: make(map[string]string), + }, nil +} + +func validateClientKeyIsTrusted(c ssh.ConnMetadata, clientKey ssh.PublicKey) (err error) { + if globalSFTPTrustedCAPubkey == nil { + return errors.New("public key authority validation requested but no ca public key specified.") + } + + cert, ok := clientKey.(*ssh.Certificate) + if !ok { + return errSftpPublicKeyWithoutCert + } + + // ssh.CheckCert called by ssh.Authenticate accepts certificates + // with empty principles list so we block those in here. + if len(cert.ValidPrincipals) == 0 { + return errSftpCertWithoutPrincipals + } + + // Verify that certificate provided by user is issued by trusted CA, + // username in authentication request matches to identities in certificate + // and that certificate type is correct. + checker := ssh.CertChecker{} + checker.IsUserAuthority = func(k ssh.PublicKey) bool { + return subtle.ConstantTimeCompare(globalSFTPTrustedCAPubkey.Marshal(), k.Marshal()) == 1 + } + + _, err = checker.Authenticate(c, clientKey) + return err +} + type sftpLogger struct{} func (s *sftpLogger) Info(tag xsftp.LogType, msg string) { @@ -43,24 +356,56 @@ func (s *sftpLogger) Info(tag xsftp.LogType, msg string) { func (s *sftpLogger) Error(tag xsftp.LogType, err error) { switch tag { case xsftp.AcceptNetworkError: - logger.LogOnceIf(context.Background(), err, "accept-limit-sftp") + sftpLogOnceIf(context.Background(), err, "accept-limit-sftp") case xsftp.AcceptChannelError: - logger.LogOnceIf(context.Background(), err, "accept-channel-sftp") + sftpLogOnceIf(context.Background(), err, "accept-channel-sftp") case xsftp.SSHKeyExchangeError: - logger.LogOnceIf(context.Background(), err, "key-exchange-sftp") + sftpLogOnceIf(context.Background(), err, "key-exchange-sftp") default: - logger.LogOnceIf(context.Background(), err, "unknown-error-sftp") + sftpLogOnceIf(context.Background(), err, "unknown-error-sftp") + } +} + +func filterAlgos(arg string, want []string, allowed []string) []string { + var filteredAlgos []string + found := false + for _, algo := range want { + if len(algo) == 0 { + continue + } + for _, allowedAlgo := range allowed { + algo := strings.ToLower(strings.TrimSpace(algo)) + if algo == allowedAlgo { + filteredAlgos = append(filteredAlgos, algo) + found = true + break + } + } + if !found { + logger.Fatal(fmt.Errorf("unknown algorithm %q passed to --sftp=%s\nValid algorithms: %v", algo, arg, strings.Join(allowed, ", ")), "unable to start SFTP server") + } } + if len(filteredAlgos) == 0 { + logger.Fatal(fmt.Errorf("no valid algorithms passed to --sftp=%s\nValid algorithms: %v", arg, strings.Join(allowed, ", ")), "unable to start SFTP server") + } + return filteredAlgos } func startSFTPServer(args []string) { var ( - port int - publicIP string - sshPrivateKey string + port int + publicIP string + sshPrivateKey string + userCaKeyFile string + disablePassAuth bool ) + allowPubKeys := supportedPubKeyAuthAlgos + allowKexAlgos := preferredKexAlgos + allowCiphers := preferredCiphers + allowMACs := supportedMACs var err error + for _, arg := range args { tokens := strings.SplitN(arg, "=", 2) if len(tokens) != 2 { @@ -82,6 +427,18 @@ func startSFTPServer(args []string) { publicIP = host case "ssh-private-key": sshPrivateKey = tokens[1] + case "pub-key-algos": + allowPubKeys = filterAlgos(arg, strings.Split(tokens[1], ","), supportedPubKeyAuthAlgos) + case "kex-algos": + allowKexAlgos = filterAlgos(arg, strings.Split(tokens[1], ","), supportedKexAlgos) + case "cipher-algos": + allowCiphers = filterAlgos(arg, strings.Split(tokens[1], ","), supportedCiphers) + case "mac-algos": + allowMACs = filterAlgos(arg, strings.Split(tokens[1], ","), supportedMACs) + case "trusted-user-ca-key": + userCaKeyFile = tokens[1] + case "disable-password-auth": + disablePassAuth, _ = strconv.ParseBool(tokens[1]) } } @@ -103,64 +460,45 @@ func startSFTPServer(args []string) { logger.Fatal(fmt.Errorf("invalid arguments passed, private key file is not parseable: %v", err), "unable to start SFTP server") } + if userCaKeyFile != "" { + keyBytes, err := os.ReadFile(userCaKeyFile) + if err != nil { + logger.Fatal(fmt.Errorf("invalid arguments passed, trusted user certificate authority public key file is not accessible: %v", err), "unable to start SFTP server") + } + + globalSFTPTrustedCAPubkey, _, _, _, err = ssh.ParseAuthorizedKey(keyBytes) + if err != nil { + logger.Fatal(fmt.Errorf("invalid arguments passed, trusted user certificate authority public key file is not parseable: %v", err), "unable to start SFTP server") + } + } + // An SSH server is represented by a ServerConfig, which holds // certificate details and handles authentication of ServerConns. sshConfig := &ssh.ServerConfig{ - PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { - if globalIAMSys.LDAPConfig.Enabled() { - sa, _, err := globalIAMSys.getServiceAccount(context.Background(), c.User()) - if err != nil && !errors.Is(err, errNoSuchServiceAccount) { - return nil, err - } - if errors.Is(err, errNoSuchServiceAccount) { - targetUser, targetGroups, err := globalIAMSys.LDAPConfig.Bind(c.User(), string(pass)) - if err != nil { - return nil, err - } - ldapPolicies, _ := globalIAMSys.PolicyDBGet(targetUser, targetGroups...) - if len(ldapPolicies) == 0 { - return nil, errAuthentication - } - return &ssh.Permissions{ - CriticalOptions: map[string]string{ - ldapUser: targetUser, - ldapUserN: c.User(), - }, - Extensions: make(map[string]string), - }, nil - } - if subtle.ConstantTimeCompare([]byte(sa.Credentials.SecretKey), pass) == 1 { - return &ssh.Permissions{ - CriticalOptions: map[string]string{ - "accessKey": c.User(), - }, - Extensions: make(map[string]string), - }, nil - } - return nil, errAuthentication - } - - ui, ok := globalIAMSys.GetUser(context.Background(), c.User()) - if !ok { - return nil, errNoSuchUser - } - - if subtle.ConstantTimeCompare([]byte(ui.Credentials.SecretKey), pass) == 1 { - return &ssh.Permissions{ - CriticalOptions: map[string]string{ - "accessKey": c.User(), - }, - Extensions: make(map[string]string), - }, nil - } - return nil, errAuthentication + Config: ssh.Config{ + KeyExchanges: allowKexAlgos, + Ciphers: allowCiphers, + MACs: allowMACs, }, + PublicKeyAuthAlgorithms: allowPubKeys, + PublicKeyCallback: sshPubKeyAuth, + } + + if !disablePassAuth { + sshConfig.PasswordCallback = sshPasswordAuth + } else { + sshConfig.PasswordCallback = nil } sshConfig.AddHostKey(private) handleSFTPSession := func(channel ssh.Channel, sconn *ssh.ServerConn) { - server := sftp.NewRequestServer(channel, NewSFTPDriver(sconn.Permissions), sftp.WithRSAllocator()) + var remoteIP string + + if host, _, err := net.SplitHostPort(sconn.RemoteAddr().String()); err == nil { + remoteIP = host + } + server := sftp.NewRequestServer(channel, NewSFTPDriver(sconn.Permissions, remoteIP), sftp.WithRSAllocator()) defer server.Close() server.Serve() } diff --git a/cmd/sftp-server_test.go b/cmd/sftp-server_test.go new file mode 100644 index 0000000000000..0064230809ee3 --- /dev/null +++ b/cmd/sftp-server_test.go @@ -0,0 +1,349 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "errors" + "fmt" + "net" + "os" + "testing" + + "github.com/minio/madmin-go/v3" + "golang.org/x/crypto/ssh" +) + +type MockConnMeta struct { + username string +} + +func (m *MockConnMeta) User() string { + return m.username +} + +func (m *MockConnMeta) SessionID() []byte { + return []byte{} +} + +func (m *MockConnMeta) ClientVersion() []byte { + return []byte{} +} + +func (m *MockConnMeta) ServerVersion() []byte { + return []byte{} +} + +func (m *MockConnMeta) RemoteAddr() net.Addr { + return nil +} + +func (m *MockConnMeta) LocalAddr() net.Addr { + return nil +} + +func newSSHConnMock(username string) ssh.ConnMetadata { + return &MockConnMeta{username: username} +} + +func TestSFTPAuthentication(t *testing.T) { + for i, testCase := range iamTestSuites { + t.Run( + fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription), + func(t *testing.T) { + c := &check{t, testCase.serverType} + suite := testCase + + suite.SetUpSuite(c) + + suite.SFTPServiceAccountLogin(c) + suite.SFTPInvalidServiceAccountPassword(c) + + // LDAP tests + ldapServer := os.Getenv(EnvTestLDAPServer) + if ldapServer == "" { + c.Skipf("Skipping LDAP test as no LDAP server is provided via %s", EnvTestLDAPServer) + } + + suite.SetUpLDAP(c, ldapServer) + + suite.SFTPFailedAuthDueToMissingPolicy(c) + suite.SFTPFailedAuthDueToInvalidUser(c) + suite.SFTPFailedForcedServiceAccountAuthOnLDAPUser(c) + suite.SFTPFailedAuthDueToInvalidPassword(c) + + suite.SFTPValidLDAPLoginWithPassword(c) + + suite.SFTPPublicKeyAuthentication(c) + suite.SFTPFailedPublicKeyAuthenticationInvalidKey(c) + suite.SFTPPublicKeyAuthNoPubKey(c) + + suite.TearDownSuite(c) + }, + ) + } +} + +func (s *TestSuiteIAM) SFTPFailedPublicKeyAuthenticationInvalidKey(c *check) { + keyBytes, err := os.ReadFile("./testdata/invalid_test_key.pub") + if err != nil { + c.Fatalf("could not read test key file: %s", err) + } + + testKey, _, _, _, err := ssh.ParseAuthorizedKey(keyBytes) + if err != nil { + c.Fatalf("could not parse test key file: %s", err) + } + + newSSHCon := newSSHConnMock("dillon=ldap") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err == nil || !errors.Is(err, errAuthentication) { + c.Fatalf("expected err(%s) but got (%s)", errAuthentication, err) + } + + newSSHCon = newSSHConnMock("dillon") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err == nil || !errors.Is(err, errNoSuchUser) { + c.Fatalf("expected err(%s) but got (%s)", errNoSuchUser, err) + } +} + +func (s *TestSuiteIAM) SFTPPublicKeyAuthentication(c *check) { + keyBytes, err := os.ReadFile("./testdata/dillon_test_key.pub") + if err != nil { + c.Fatalf("could not read test key file: %s", err) + } + + testKey, _, _, _, err := ssh.ParseAuthorizedKey(keyBytes) + if err != nil { + c.Fatalf("could not parse test key file: %s", err) + } + + newSSHCon := newSSHConnMock("dillon=ldap") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err != nil { + c.Fatalf("expected no error but got(%s)", err) + } + + newSSHCon = newSSHConnMock("dillon") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err != nil { + c.Fatalf("expected no error but got(%s)", err) + } +} + +// A user without an sshpubkey attribute in LDAP (here: fahim) should not be +// able to authenticate. +func (s *TestSuiteIAM) SFTPPublicKeyAuthNoPubKey(c *check) { + keyBytes, err := os.ReadFile("./testdata/dillon_test_key.pub") + if err != nil { + c.Fatalf("could not read test key file: %s", err) + } + + testKey, _, _, _, err := ssh.ParseAuthorizedKey(keyBytes) + if err != nil { + c.Fatalf("could not parse test key file: %s", err) + } + + newSSHCon := newSSHConnMock("fahim=ldap") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err == nil { + c.Fatalf("expected error but got none") + } + + newSSHCon = newSSHConnMock("fahim") + _, err = sshPubKeyAuth(newSSHCon, testKey) + if err == nil { + c.Fatalf("expected error but got none") + } +} + +func (s *TestSuiteIAM) SFTPFailedAuthDueToMissingPolicy(c *check) { + newSSHCon := newSSHConnMock("dillon=ldap") + _, err := sshPasswordAuth(newSSHCon, []byte("dillon")) + if err == nil || !errors.Is(err, errSFTPUserHasNoPolicies) { + c.Fatalf("expected err(%s) but got (%s)", errSFTPUserHasNoPolicies, err) + } + + newSSHCon = newSSHConnMock("dillon") + _, err = sshPasswordAuth(newSSHCon, []byte("dillon")) + if err == nil || !errors.Is(err, errNoSuchUser) { + c.Fatalf("expected err(%s) but got (%s)", errNoSuchUser, err) + } +} + +func (s *TestSuiteIAM) SFTPFailedAuthDueToInvalidUser(c *check) { + newSSHCon := newSSHConnMock("dillon_error") + _, err := sshPasswordAuth(newSSHCon, []byte("dillon_error")) + if err == nil || !errors.Is(err, errNoSuchUser) { + c.Fatalf("expected err(%s) but got (%s)", errNoSuchUser, err) + } +} + +func (s *TestSuiteIAM) SFTPFailedForcedServiceAccountAuthOnLDAPUser(c *check) { + newSSHCon := newSSHConnMock("dillon=svc") + _, err := sshPasswordAuth(newSSHCon, []byte("dillon")) + if err == nil || !errors.Is(err, errNoSuchUser) { + c.Fatalf("expected err(%s) but got (%s)", errNoSuchUser, err) + } +} + +func (s *TestSuiteIAM) SFTPFailedAuthDueToInvalidPassword(c *check) { + newSSHCon := newSSHConnMock("dillon") + _, err := sshPasswordAuth(newSSHCon, []byte("dillon_error")) + if err == nil || !errors.Is(err, errNoSuchUser) { + c.Fatalf("expected err(%s) but got (%s)", errNoSuchUser, err) + } +} + +func (s *TestSuiteIAM) SFTPInvalidServiceAccountPassword(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + accessKey, secretKey := mustGenerateCredentials(c) + err := s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) + if err != nil { + c.Fatalf("Unable to set user: %v", err) + } + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + User: accessKey, + } + if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + newSSHCon := newSSHConnMock(accessKey + "=svc") + _, err = sshPasswordAuth(newSSHCon, []byte("invalid")) + if err == nil || !errors.Is(err, errAuthentication) { + c.Fatalf("expected err(%s) but got (%s)", errAuthentication, err) + } + + newSSHCon = newSSHConnMock(accessKey) + _, err = sshPasswordAuth(newSSHCon, []byte("invalid")) + if err == nil || !errors.Is(err, errAuthentication) { + c.Fatalf("expected err(%s) but got (%s)", errAuthentication, err) + } +} + +func (s *TestSuiteIAM) SFTPServiceAccountLogin(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + accessKey, secretKey := mustGenerateCredentials(c) + err := s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) + if err != nil { + c.Fatalf("Unable to set user: %v", err) + } + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + User: accessKey, + } + if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + newSSHCon := newSSHConnMock(accessKey + "=svc") + _, err = sshPasswordAuth(newSSHCon, []byte(secretKey)) + if err != nil { + c.Fatalf("expected no error but got (%s)", err) + } + + newSSHCon = newSSHConnMock(accessKey) + _, err = sshPasswordAuth(newSSHCon, []byte(secretKey)) + if err != nil { + c.Fatalf("expected no error but got (%s)", err) + } +} + +func (s *TestSuiteIAM) SFTPValidLDAPLoginWithPassword(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + // we need to do this so that the user has a policy before authentication. + // ldap user accounts without policies are denied access in sftp. + policy := "mypolicy" + policyBytes := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::BUCKET/*" + ] + } + ] +}`) + + err := s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("policy add error: %v", err) + } + + { + userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDN, + } + if _, err := s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + newSSHCon := newSSHConnMock("dillon=ldap") + _, err = sshPasswordAuth(newSSHCon, []byte("dillon")) + if err != nil { + c.Fatal("Password authentication failed for user (dillon):", err) + } + + newSSHCon = newSSHConnMock("dillon") + _, err = sshPasswordAuth(newSSHCon, []byte("dillon")) + if err != nil { + c.Fatal("Password authentication failed for user (dillon):", err) + } + } + { + userDN := "uid=fahim,ou=people,ou=swengg,dc=min,dc=io" + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDN, + } + if _, err := s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + newSSHCon := newSSHConnMock("fahim=ldap") + _, err = sshPasswordAuth(newSSHCon, []byte("fahim")) + if err != nil { + c.Fatal("Password authentication failed for user (fahim):", err) + } + + newSSHCon = newSSHConnMock("fahim") + _, err = sshPasswordAuth(newSSHCon, []byte("fahim")) + if err != nil { + c.Fatal("Password authentication failed for user (fahim):", err) + } + } +} diff --git a/cmd/signals.go b/cmd/signals.go index f5159a983d8ec..1b2f3ffa2ff2e 100644 --- a/cmd/signals.go +++ b/cmd/signals.go @@ -23,14 +23,33 @@ import ( "net/http" "os" "strings" + "time" "github.com/coreos/go-systemd/v22/daemon" "github.com/minio/minio/internal/logger" ) +func shutdownHealMRFWithTimeout() { + const shutdownTimeout = time.Minute + + finished := make(chan struct{}) + go func() { + globalMRFState.shutdown() + close(finished) + }() + select { + case <-time.After(shutdownTimeout): + case <-finished: + } +} + func handleSignals() { // Custom exit function exit := func(success bool) { + if globalLoggerOutput != nil { + globalLoggerOutput.Close() + } + // If global profiler is set stop before we exit. globalProfilerMu.Lock() defer globalProfilerMu.Unlock() @@ -46,21 +65,26 @@ func handleSignals() { } stopProcess := func() bool { + shutdownHealMRFWithTimeout() // this can take time sometimes, it needs to be executed + // before stopping s3 operations + // send signal to various go-routines that they need to quit. cancelGlobalContext() if httpServer := newHTTPServerFn(); httpServer != nil { if err := httpServer.Shutdown(); err != nil && !errors.Is(err, http.ErrServerClosed) { - logger.LogIf(context.Background(), err) + shutdownLogIf(context.Background(), err) } } if objAPI := newObjectLayerFn(); objAPI != nil { - logger.LogIf(context.Background(), objAPI.Shutdown(context.Background())) + shutdownLogIf(context.Background(), objAPI.Shutdown(context.Background())) } - if srv := newConsoleServerFn(); srv != nil { - logger.LogIf(context.Background(), srv.Shutdown()) + if globalBrowserEnabled { + if srv := newConsoleServerFn(); srv != nil { + shutdownLogIf(context.Background(), srv.Shutdown()) + } } if globalEventNotifier != nil { @@ -73,7 +97,7 @@ func handleSignals() { for { select { case err := <-globalHTTPServerErrorCh: - logger.LogIf(context.Background(), err) + shutdownLogIf(context.Background(), err) exit(stopProcess()) case osSignal := <-globalOSSignalCh: logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String())) @@ -89,7 +113,7 @@ func handleSignals() { if rerr == nil { daemon.SdNotify(false, daemon.SdNotifyReady) } - logger.LogIf(context.Background(), rerr) + shutdownLogIf(context.Background(), rerr) exit(stop && rerr == nil) case serviceStop: logger.Info("Stopping on service signal") diff --git a/cmd/signature-v2.go b/cmd/signature-v2.go index bc88bab2017b4..1fd42ba70233b 100644 --- a/cmd/signature-v2.go +++ b/cmd/signature-v2.go @@ -95,7 +95,7 @@ func doesPolicySignatureV2Match(formValues http.Header) (auth.Credentials, APIEr // Escape encodedQuery string into unescaped list of query params, returns error // if any while unescaping the values. func unescapeQueries(encodedQuery string) (unescapedQueries []string, err error) { - for _, query := range strings.Split(encodedQuery, "&") { + for query := range strings.SplitSeq(encodedQuery, "&") { var unescapedQuery string unescapedQuery, err = url.QueryUnescape(query) if err != nil { diff --git a/cmd/signature-v2_test.go b/cmd/signature-v2_test.go index 5125a952cef5a..b2c2b0127db61 100644 --- a/cmd/signature-v2_test.go +++ b/cmd/signature-v2_test.go @@ -32,7 +32,7 @@ func TestResourceListSorting(t *testing.T) { sortedResourceList := make([]string, len(resourceList)) copy(sortedResourceList, resourceList) sort.Strings(sortedResourceList) - for i := 0; i < len(resourceList); i++ { + for i := range resourceList { if resourceList[i] != sortedResourceList[i] { t.Errorf("Expected resourceList[%d] = \"%s\", resourceList is not correctly sorted.", i, sortedResourceList[i]) break @@ -42,7 +42,7 @@ func TestResourceListSorting(t *testing.T) { // Tests presigned v2 signature. func TestDoesPresignedV2SignatureMatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -159,13 +159,12 @@ func TestDoesPresignedV2SignatureMatch(t *testing.T) { t.Errorf("(%d) expected to get success, instead got %s", i, niceError(errCode)) } } - } } // TestValidateV2AuthHeader - Tests validate the logic of V2 Authorization header validator. func TestValidateV2AuthHeader(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -239,7 +238,7 @@ func TestValidateV2AuthHeader(t *testing.T) { } func TestDoesPolicySignatureV2Match(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) diff --git a/cmd/signature-v4-parser.go b/cmd/signature-v4-parser.go index 62ba6f7696501..f6866cb858870 100644 --- a/cmd/signature-v4-parser.go +++ b/cmd/signature-v4-parser.go @@ -75,7 +75,7 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) if creds[0] != "Credential" { return ch, ErrMissingCredTag } - credElements := strings.Split(strings.TrimSpace(creds[1]), SlashSeparator) + credElements := strings.Split(strings.TrimRight(strings.TrimSpace(creds[1]), SlashSeparator), SlashSeparator) if len(credElements) < 5 { return ch, ErrCredMalformed } diff --git a/cmd/signature-v4-parser_test.go b/cmd/signature-v4-parser_test.go index b3fb93394e7ec..3d9001334cf69 100644 --- a/cmd/signature-v4-parser_test.go +++ b/cmd/signature-v4-parser_test.go @@ -236,6 +236,25 @@ func TestParseCredentialHeader(t *testing.T) { "aws4_request"), expectedErrCode: ErrNone, }, + // Test Case - 12. + // Test case with right inputs but trailing `/`. Expected to return a valid CredentialHeader. + // "aws4_request" is the valid request version. + { + inputCredentialStr: generateCredentialStr( + "Z7IXGOO6BZ0REAN1Q26I", + sampleTimeStr, + "us-west-1", + "s3", + "aws4_request/"), + expectedCredentials: generateCredentials( + t, + "Z7IXGOO6BZ0REAN1Q26I", + sampleTimeStr, + "us-west-1", + "s3", + "aws4_request"), + expectedErrCode: ErrNone, + }, } for i, testCase := range testCases { @@ -298,7 +317,6 @@ func TestParseSignature(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%s\", but got \"%s\". ", i+1, testCase.expectedSignStr, actualSignStr) } } - } } @@ -343,7 +361,6 @@ func TestParseSignedHeaders(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%v\", but got \"%v\". ", i+1, testCase.expectedSignedHeaders, actualSignedHeaders) } } - } } @@ -514,7 +531,6 @@ func TestParseSignV4(t *testing.T) { t.Errorf("Test %d: Expected the result to be \"%v\", but got \"%v\". ", i+1, testCase.expectedAuthField, parsedAuthField.SignedHeaders) } } - } } @@ -880,6 +896,5 @@ func TestParsePreSignV4(t *testing.T) { t.Errorf("Test %d: Expected date to be %v, but got %v", i+1, testCase.expectedPreSignValues.Date.UTC().Format(iso8601Format), parsedPreSign.Date.UTC().Format(iso8601Format)) } } - } } diff --git a/cmd/signature-v4-utils.go b/cmd/signature-v4-utils.go index 07374858b09af..1569dec1c772e 100644 --- a/cmd/signature-v4-utils.go +++ b/cmd/signature-v4-utils.go @@ -23,6 +23,7 @@ import ( "encoding/hex" "io" "net/http" + "slices" "strconv" "strings" @@ -30,8 +31,7 @@ import ( "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" - "golang.org/x/exp/slices" + "github.com/minio/pkg/v3/policy" ) // http Header "x-amz-content-sha256" == "UNSIGNED-PAYLOAD" indicates that the @@ -152,11 +152,14 @@ func checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, A // Check if server has initialized, then only proceed // to check for IAM users otherwise its okay for clients // to retry with 503 errors when server is coming up. - return auth.Credentials{}, false, ErrServerNotInitialized + return auth.Credentials{}, false, ErrIAMNotInitialized } // Check if the access key is part of users credentials. - u, ok := globalIAMSys.GetUser(r.Context(), accessKey) + u, ok, err := globalIAMSys.CheckKey(r.Context(), accessKey) + if err != nil { + return auth.Credentials{}, false, ErrIAMNotInitialized + } if !ok { // Credentials could be valid but disabled - return a different // error in such a scenario. diff --git a/cmd/signature-v4-utils_test.go b/cmd/signature-v4-utils_test.go index be724ec35cd22..74830fc9a3ef4 100644 --- a/cmd/signature-v4-utils_test.go +++ b/cmd/signature-v4-utils_test.go @@ -30,7 +30,7 @@ import ( ) func TestCheckValid(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() objLayer, fsDir, err := prepareFS(ctx) @@ -75,10 +75,13 @@ func TestCheckValid(t *testing.T) { t.Fatalf("unable create credential, %s", err) } - globalIAMSys.CreateUser(ctx, ucreds.AccessKey, madmin.AddOrUpdateUserReq{ + _, err = globalIAMSys.CreateUser(ctx, ucreds.AccessKey, madmin.AddOrUpdateUserReq{ SecretKey: ucreds.SecretKey, Status: madmin.AccountEnabled, }) + if err != nil { + t.Fatalf("unable create credential, %s", err) + } _, owner, s3Err = checkKeyValid(req, ucreds.AccessKey) if s3Err != ErrNone { @@ -88,6 +91,26 @@ func TestCheckValid(t *testing.T) { if owner { t.Fatalf("Expected owner to be 'false', found %t", owner) } + + _, err = globalIAMSys.PolicyDBSet(ctx, ucreds.AccessKey, "consoleAdmin", regUser, false) + if err != nil { + t.Fatalf("unable to attach policy to credential, %s", err) + } + + time.Sleep(4 * time.Second) + + policies, err := globalIAMSys.PolicyDBGet(ucreds.AccessKey) + if err != nil { + t.Fatalf("unable to get policy to credential, %s", err) + } + + if len(policies) == 0 { + t.Fatal("no policies found") + } + + if policies[0] != "consoleAdmin" { + t.Fatalf("expected 'consoleAdmin', %s", policies[0]) + } } // TestSkipContentSha256Cksum - Test validate the logic which decides whether diff --git a/cmd/signature-v4.go b/cmd/signature-v4.go index ad292ea70b675..ceb8b4b2f564f 100644 --- a/cmd/signature-v4.go +++ b/cmd/signature-v4.go @@ -60,7 +60,7 @@ const ( // getCanonicalHeaders generate a list of request headers with their values func getCanonicalHeaders(signedHeaders http.Header) string { var headers []string - vals := make(http.Header) + vals := make(http.Header, len(signedHeaders)) for k, vv := range signedHeaders { k = strings.ToLower(k) headers = append(headers, k) @@ -154,7 +154,7 @@ func getSignature(signingKey []byte, stringToSign string) string { // Check to see if Policy is signed correctly. func doesPolicySignatureMatch(formValues http.Header) (auth.Credentials, APIErrorCode) { // For SignV2 - Signature field will be valid - if _, ok := formValues["Signature"]; ok { + if _, ok := formValues[xhttp.AmzSignatureV2]; ok { return doesPolicySignatureV2Match(formValues) } return doesPolicySignatureV4Match(formValues) @@ -175,7 +175,7 @@ func compareSignatureV4(sig1, sig2 string) bool { // returns ErrNone if the signature matches. func doesPolicySignatureV4Match(formValues http.Header) (auth.Credentials, APIErrorCode) { // Server region. - region := globalSite.Region + region := globalSite.Region() // Parse credential tag. credHeader, s3Err := parseCredentialHeader("Credential="+formValues.Get(xhttp.AmzCredential), region, serviceS3) diff --git a/cmd/signature-v4_test.go b/cmd/signature-v4_test.go index 77e38f3b008a2..a0f5d8155f313 100644 --- a/cmd/signature-v4_test.go +++ b/cmd/signature-v4_test.go @@ -37,6 +37,12 @@ func niceError(code APIErrorCode) string { } func TestDoesPolicySignatureMatch(t *testing.T) { + _, fsDir, err := prepareFS(t.Context()) + if err != nil { + t.Fatal(err) + } + defer removeRoots([]string{fsDir}) + credentialTemplate := "%s/%s/%s/s3/aws4_request" now := UTCNow() accessKey := globalActiveCred.AccessKey @@ -94,7 +100,7 @@ func TestDoesPolicySignatureMatch(t *testing.T) { } func TestDoesPresignedSignatureMatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() obj, fsDir, err := prepareFS(ctx) @@ -111,7 +117,7 @@ func TestDoesPresignedSignatureMatch(t *testing.T) { now := UTCNow() credentialTemplate := "%s/%s/%s/s3/aws4_request" - region := globalSite.Region + region := globalSite.Region() accessKeyID := globalActiveCred.AccessKey testCases := []struct { queryParams map[string]string diff --git a/cmd/site-replication-metrics.go b/cmd/site-replication-metrics.go index 4dd3b8d3baa11..bb3715313b29d 100644 --- a/cmd/site-replication-metrics.go +++ b/cmd/site-replication-metrics.go @@ -19,6 +19,7 @@ package cmd import ( "fmt" + "maps" "sync" "sync/atomic" "time" @@ -54,9 +55,7 @@ func (rt *RTimedMetrics) toMetric() madmin.TimedErrStats { return madmin.TimedErrStats{} } errCounts := make(map[string]int) - for k, v := range rt.ErrCounts { - errCounts[k] = v - } + maps.Copy(errCounts, rt.ErrCounts) minuteTotals := rt.LastMinute.getTotal() hourTotals := rt.LastHour.getTotal() return madmin.TimedErrStats{ @@ -99,9 +98,7 @@ func (rt *RTimedMetrics) merge(o RTimedMetrics) (n RTimedMetrics) { n.LastHour = n.LastHour.merge(rt.LastHour) n.LastHour = n.LastHour.merge(o.LastHour) n.ErrCounts = make(map[string]int) - for k, v := range rt.ErrCounts { - n.ErrCounts[k] = v - } + maps.Copy(n.ErrCounts, rt.ErrCounts) for k, v := range o.ErrCounts { n.ErrCounts[k] += v } @@ -264,7 +261,7 @@ type SRMetric struct { ReplicatedCount int64 `json:"replicatedCount"` // Failed captures replication errors in various time windows - Failed madmin.TimedErrStats `json:"failed,omitempty"` + Failed madmin.TimedErrStats `json:"failed"` XferStats map[RMetricName]XferStats `json:"transferSummary"` } diff --git a/cmd/site-replication-metrics_gen.go b/cmd/site-replication-metrics_gen.go index 7cba78109cb15..2b6272e3b7b91 100644 --- a/cmd/site-replication-metrics_gen.go +++ b/cmd/site-replication-metrics_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -209,19 +209,17 @@ func (z *RTimedMetrics) DecodeMsg(dc *msgp.Reader) (err error) { if z.ErrCounts == nil { z.ErrCounts = make(map[string]int, zb0003) } else if len(z.ErrCounts) > 0 { - for key := range z.ErrCounts { - delete(z.ErrCounts, key) - } + clear(z.ErrCounts) } for zb0003 > 0 { zb0003-- var za0001 string - var za0002 int za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "ErrCounts") return } + var za0002 int za0002, err = dc.ReadInt() if err != nil { err = msgp.WrapError(err, "ErrCounts", za0001) @@ -426,14 +424,12 @@ func (z *RTimedMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.ErrCounts == nil { z.ErrCounts = make(map[string]int, zb0003) } else if len(z.ErrCounts) > 0 { - for key := range z.ErrCounts { - delete(z.ErrCounts, key) - } + clear(z.ErrCounts) } for zb0003 > 0 { - var za0001 string var za0002 int zb0003-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "ErrCounts") @@ -839,19 +835,17 @@ func (z *SRMetricsSummary) DecodeMsg(dc *msgp.Reader) (err error) { if z.Metrics == nil { z.Metrics = make(map[string]SRMetric, zb0002) } else if len(z.Metrics) > 0 { - for key := range z.Metrics { - delete(z.Metrics, key) - } + clear(z.Metrics) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 SRMetric za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Metrics") return } + var za0002 SRMetric err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Metrics", za0001) @@ -1070,14 +1064,12 @@ func (z *SRMetricsSummary) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Metrics == nil { z.Metrics = make(map[string]SRMetric, zb0002) } else if len(z.Metrics) > 0 { - for key := range z.Metrics { - delete(z.Metrics, key) - } + clear(z.Metrics) } for zb0002 > 0 { - var za0001 string var za0002 SRMetric zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Metrics") @@ -1161,19 +1153,17 @@ func (z *SRStats) DecodeMsg(dc *msgp.Reader) (err error) { if z.M == nil { z.M = make(map[string]*SRStatus, zb0002) } else if len(z.M) > 0 { - for key := range z.M { - delete(z.M, key) - } + clear(z.M) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 *SRStatus za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "M") return } + var za0002 *SRStatus if dc.IsNil() { err = dc.ReadNil() if err != nil { @@ -1327,14 +1317,12 @@ func (z *SRStats) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.M == nil { z.M = make(map[string]*SRStatus, zb0002) } else if len(z.M) > 0 { - for key := range z.M { - delete(z.M, key) - } + clear(z.M) } for zb0002 > 0 { - var za0001 string var za0002 *SRStatus zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "M") diff --git a/cmd/site-replication-metrics_gen_test.go b/cmd/site-replication-metrics_gen_test.go index 0aa1598d73299..9e47381c99cad 100644 --- a/cmd/site-replication-metrics_gen_test.go +++ b/cmd/site-replication-metrics_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/site-replication-utils.go b/cmd/site-replication-utils.go index b6b41ca5fc6e3..192275845d97b 100644 --- a/cmd/site-replication-utils.go +++ b/cmd/site-replication-utils.go @@ -19,6 +19,7 @@ package cmd import ( "context" + "maps" "math/rand" "sync" "time" @@ -45,9 +46,7 @@ func (s *SiteResyncStatus) clone() SiteResyncStatus { } o := *s o.BucketStatuses = make(map[string]ResyncStatusType, len(s.BucketStatuses)) - for b, st := range s.BucketStatuses { - o.BucketStatuses[b] = st - } + maps.Copy(o.BucketStatuses, s.BucketStatuses) return o } @@ -88,11 +87,9 @@ func (sm *siteResyncMetrics) init(ctx context.Context) { <-ctx.Done() return } - duration := time.Duration(r.Float64() * float64(time.Second*10)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Second*10)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } } diff --git a/cmd/site-replication-utils_gen.go b/cmd/site-replication-utils_gen.go index 81f0ff4e95c93..f02eb5c7fd599 100644 --- a/cmd/site-replication-utils_gen.go +++ b/cmd/site-replication-utils_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -52,19 +52,17 @@ func (z *SiteResyncStatus) DecodeMsg(dc *msgp.Reader) (err error) { if z.BucketStatuses == nil { z.BucketStatuses = make(map[string]ResyncStatusType, zb0002) } else if len(z.BucketStatuses) > 0 { - for key := range z.BucketStatuses { - delete(z.BucketStatuses, key) - } + clear(z.BucketStatuses) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 ResyncStatusType za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "BucketStatuses") return } + var za0002 ResyncStatusType err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "BucketStatuses", za0001) @@ -260,14 +258,12 @@ func (z *SiteResyncStatus) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.BucketStatuses == nil { z.BucketStatuses = make(map[string]ResyncStatusType, zb0002) } else if len(z.BucketStatuses) > 0 { - for key := range z.BucketStatuses { - delete(z.BucketStatuses, key) - } + clear(z.BucketStatuses) } for zb0002 > 0 { - var za0001 string var za0002 ResyncStatusType zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "BucketStatuses") diff --git a/cmd/site-replication-utils_gen_test.go b/cmd/site-replication-utils_gen_test.go index 77a68632c9e5f..a982c6c25b231 100644 --- a/cmd/site-replication-utils_gen_test.go +++ b/cmd/site-replication-utils_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/site-replication.go b/cmd/site-replication.go index 6b30c59eeddfc..0b2f26f76ca53 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -26,10 +26,12 @@ import ( "encoding/xml" "errors" "fmt" + "maps" "math/rand" "net/url" "reflect" "runtime" + "slices" "sort" "strings" "sync" @@ -37,7 +39,6 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" - minioClient "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/replication" "github.com/minio/minio-go/v7/pkg/set" @@ -45,7 +46,9 @@ import ( "github.com/minio/minio/internal/bucket/lifecycle" sreplication "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/policy" + xldap "github.com/minio/pkg/v3/ldap" + "github.com/minio/pkg/v3/policy" + "github.com/puzpuzpuz/xsync/v3" ) const ( @@ -237,13 +240,11 @@ func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error if err == nil { break } - logger.LogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") + replLogOnceIf(context.Background(), fmt.Errorf("unable to initialize site replication subsystem: (%w)", err), "site-relication-init") - duration := time.Duration(r.Float64() * float64(time.Minute)) - if duration < time.Second { + duration := max(time.Duration(r.Float64()*float64(time.Minute)), // Make sure to sleep at least a second to avoid high CPU ticks. - duration = time.Second - } + time.Second) time.Sleep(duration) } c.RLock() @@ -312,7 +313,7 @@ func (c *SiteReplicationSys) saveToDisk(ctx context.Context, state srState) erro } for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } c.Lock() @@ -333,7 +334,7 @@ func (c *SiteReplicationSys) removeFromDisk(ctx context.Context) error { } for _, err := range globalNotificationSys.ReloadSiteReplicationConfig(ctx) { - logger.LogIf(ctx, err) + replLogIf(ctx, err) } c.Lock() @@ -389,7 +390,7 @@ func (c *SiteReplicationSys) getSiteStatuses(ctx context.Context, sites ...madmi self: info.DeploymentID == globalDeploymentID(), }) } - return + return psi, err } // AddPeerClusters - add cluster sites for replication configuration. @@ -476,8 +477,8 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi var secretKey string var svcCred auth.Credentials sa, _, err := globalIAMSys.getServiceAccount(ctx, siteReplicatorSvcAcc) - switch { - case err == errNoSuchServiceAccount: + switch err { + case errNoSuchServiceAccount: _, secretKey, err = auth.GenerateCredentials() if err != nil { return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err)) @@ -490,7 +491,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi if err != nil { return madmin.ReplicateAddStatus{}, errSRServiceAccount(fmt.Errorf("unable to create local service account: %w", err)) } - case err == nil: + case nil: svcCred = sa.Credentials secretKey = svcCred.SecretKey default: @@ -595,7 +596,7 @@ func (c *SiteReplicationSys) AddPeerClusters(ctx context.Context, psites []madmi } if !globalSiteReplicatorCred.IsValid() { - globalSiteReplicatorCred.Set(svcCred) + globalSiteReplicatorCred.Set(svcCred.SecretKey) } result := madmin.ReplicateAddStatus{ Success: true, @@ -657,7 +658,7 @@ func (c *SiteReplicationSys) PeerJoinReq(ctx context.Context, arg madmin.SRPeerJ return errSRBackendIssue(fmt.Errorf("unable to save cluster-replication state to drive on %s: %v", ourName, err)) } if !globalSiteReplicatorCred.IsValid() { - globalSiteReplicatorCred.Set(sa) + globalSiteReplicatorCred.Set(sa.SecretKey) } return nil @@ -676,7 +677,7 @@ func (c *SiteReplicationSys) GetIDPSettings(ctx context.Context) madmin.IDPSetti } s.OpenID = globalIAMSys.OpenIDConfig.GetSettings() if s.OpenID.Enabled { - s.OpenID.Region = globalSite.Region + s.OpenID.Region = globalSite.Region() } return s } @@ -719,7 +720,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration var wg sync.WaitGroup var resultsMu sync.RWMutex for _, info := range infos.Sites { - info := info // will call siteNetperf, means call others's adminAPISiteReplicationDevNull if globalDeploymentID() == info.DeploymentID { wg.Add(1) @@ -736,7 +736,6 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration resultsMu.Lock() results.NodeResults = append(results.NodeResults, result) resultsMu.Unlock() - return }() continue } @@ -754,11 +753,10 @@ func (c *SiteReplicationSys) Netperf(ctx context.Context, duration time.Duration resultsMu.Lock() results.NodeResults = append(results.NodeResults, result) resultsMu.Unlock() - return }() } wg.Wait() - return + return results, err } // GetClusterInfo - returns site replication information. @@ -855,12 +853,7 @@ func (c *SiteReplicationSys) MakeBucketHook(ctx context.Context, bucket string, if err := errors.Unwrap(makeBucketConcErr); err != nil { return err } - - if err := errors.Unwrap(makeRemotesConcErr); err != nil { - return err - } - - return nil + return errors.Unwrap(makeRemotesConcErr) } // DeleteBucketHook - called during a regular delete bucket call when cluster @@ -1040,7 +1033,6 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context, if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil { return wrapSRErr(err) } - } // no replication rule for this peer or target ARN missing in bucket targets if targetARN == "" { @@ -1132,7 +1124,7 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context, if err != nil { return err } - sameTarget, apiErr := validateReplicationDestination(ctx, bucket, newReplicationConfig, true) + sameTarget, apiErr := validateReplicationDestination(ctx, bucket, newReplicationConfig, &validateReplicationDestinationOptions{CheckRemoteBucket: true}) if apiErr != noError { return fmt.Errorf("bucket replication config validation error: %#v", apiErr) } @@ -1185,7 +1177,7 @@ func (c *SiteReplicationSys) PeerBucketDeleteHandler(ctx context.Context, bucket if err != nil { if globalDNSConfig != nil { if err2 := globalDNSConfig.Put(bucket); err2 != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2)) + replLogIf(ctx, fmt.Errorf("Unable to restore bucket DNS entry %w, please fix it manually", err2)) } } return err @@ -1281,7 +1273,13 @@ func (c *SiteReplicationSys) PeerIAMUserChangeHandler(ctx context.Context, chang // only changing the account status. _, err = globalIAMSys.SetUserStatus(ctx, change.AccessKey, userReq.Status) } else { - _, err = globalIAMSys.CreateUser(ctx, change.AccessKey, userReq) + // We don't allow internal user creation with LDAP enabled for now + // (both sites must have LDAP disabled). + if globalIAMSys.LDAPConfig.Enabled() { + err = errIAMActionNotAllowed + } else { + _, err = globalIAMSys.CreateUser(ctx, change.AccessKey, userReq) + } } } if err != nil { @@ -1311,8 +1309,14 @@ func (c *SiteReplicationSys) PeerGroupInfoChangeHandler(ctx context.Context, cha if updReq.Status != "" && len(updReq.Members) == 0 { _, err = globalIAMSys.SetGroupStatus(ctx, updReq.Group, updReq.Status == madmin.GroupEnabled) } else { - _, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members) - if err == nil && updReq.Status != madmin.GroupEnabled { + if globalIAMSys.LDAPConfig.Enabled() { + // We don't allow internal group manipulation in this API when + // LDAP is enabled for now (both sites must have LDAP disabled). + err = errIAMActionNotAllowed + } else { + _, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members) + } + if err == nil && updReq.Status != "" { _, err = globalIAMSys.SetGroupStatus(ctx, updReq.Group, updReq.Status == madmin.GroupEnabled) } } @@ -1397,7 +1401,6 @@ func (c *SiteReplicationSys) PeerSvcAccChangeHandler(ctx context.Context, change if err := globalIAMSys.DeleteServiceAccount(ctx, change.Delete.AccessKey, true); err != nil { return wrapSRErr(err) } - } return nil @@ -1416,7 +1419,40 @@ func (c *SiteReplicationSys) PeerPolicyMappingHandler(ctx context.Context, mappi } } - _, err := globalIAMSys.PolicyDBSet(ctx, mapping.UserOrGroup, mapping.Policy, IAMUserType(mapping.UserType), mapping.IsGroup) + // When LDAP is enabled, we verify that the user or group exists in LDAP and + // use the normalized form of the entityName (which will be an LDAP DN). + userType := IAMUserType(mapping.UserType) + isGroup := mapping.IsGroup + entityName := mapping.UserOrGroup + + if globalIAMSys.GetUsersSysType() == LDAPUsersSysType && userType == stsUser { + // Validate that the user or group exists in LDAP and use the normalized + // form of the entityName (which will be an LDAP DN). + var err error + if isGroup { + var foundGroupDN *xldap.DNSearchResult + var underBaseDN bool + if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil { + iamLogIf(ctx, err) + } else if foundGroupDN == nil || !underBaseDN { + return wrapSRErr(errNoSuchGroup) + } + entityName = foundGroupDN.NormDN + } else { + var foundUserDN *xldap.DNSearchResult + if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil { + iamLogIf(ctx, err) + } else if foundUserDN == nil { + return wrapSRErr(errNoSuchUser) + } + entityName = foundUserDN.NormDN + } + if err != nil { + return wrapSRErr(err) + } + } + + _, err := globalIAMSys.PolicyDBSet(ctx, entityName, mapping.Policy, userType, isGroup) if err != nil { return wrapSRErr(err) } @@ -2044,26 +2080,28 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context, addOpts madmin. // Followed by group policy mapping { // Replicate policy mappings on local to all peers. - groupPolicyMap := make(map[string]MappedPolicy) + groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]() errG := globalIAMSys.store.loadMappedPolicies(ctx, unknownIAMUserType, true, groupPolicyMap) if errG != nil { return errSRBackendIssue(errG) } - for group, mp := range groupPolicyMap { - err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + var err error + groupPolicyMap.Range(func(k string, mp MappedPolicy) bool { + err = c.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ - UserOrGroup: group, + UserOrGroup: k, UserType: int(unknownIAMUserType), IsGroup: true, Policy: mp.Policies, }, UpdatedAt: mp.UpdatedAt, }) - if err != nil { - return errSRIAMError(err) - } + return err == nil + }) + if err != nil { + return errSRIAMError(err) } } @@ -2110,7 +2148,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context, addOpts madmin. SecretKey: acc.Credentials.SecretKey, Groups: acc.Credentials.Groups, Claims: claims, - SessionPolicy: json.RawMessage(policyJSON), + SessionPolicy: policyJSON, Status: acc.Credentials.Status, Name: acc.Credentials.Name, Description: acc.Credentials.Description, @@ -2128,14 +2166,14 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context, addOpts madmin. // Followed by policy mapping for the userAccounts we previously synced. { // Replicate policy mappings on local to all peers. - userPolicyMap := make(map[string]MappedPolicy) + userPolicyMap := xsync.NewMapOf[string, MappedPolicy]() errU := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap) if errU != nil { return errSRBackendIssue(errU) } - - for user, mp := range userPolicyMap { - err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + var err error + userPolicyMap.Range(func(user string, mp MappedPolicy) bool { + err = c.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: user, @@ -2145,23 +2183,25 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context, addOpts madmin. }, UpdatedAt: mp.UpdatedAt, }) - if err != nil { - return errSRIAMError(err) - } + return err == nil + }) + if err != nil { + return errSRIAMError(err) } } // and finally followed by policy mappings for for STS users. { // Replicate policy mappings on local to all peers. - stsPolicyMap := make(map[string]MappedPolicy) + stsPolicyMap := xsync.NewMapOf[string, MappedPolicy]() errU := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, stsPolicyMap) if errU != nil { return errSRBackendIssue(errU) } - for user, mp := range stsPolicyMap { - err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + var err error + stsPolicyMap.Range(func(user string, mp MappedPolicy) bool { + err = c.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemPolicyMapping, PolicyMapping: &madmin.SRPolicyMapping{ UserOrGroup: user, @@ -2171,9 +2211,10 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context, addOpts madmin. }, UpdatedAt: mp.UpdatedAt, }) - if err != nil { - return errSRIAMError(err) - } + return err == nil + }) + if err != nil { + return errSRIAMError(err) } } @@ -2203,10 +2244,18 @@ func (c *SiteReplicationSys) toErrorFromErrMap(errMap map[string]error, actionNa return nil } + // Get ordered list of keys of errMap + keys := []string{} + for d := range errMap { + keys = append(keys, d) + } + sort.Strings(keys) + var success int msgs := []string{} - for d, err := range errMap { + for _, d := range keys { name := c.state.Peers[d].Name + err := errMap[d] if err == nil { msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): succeeded", actionName, name, d)) success++ @@ -2214,7 +2263,7 @@ func (c *SiteReplicationSys) toErrorFromErrMap(errMap map[string]error, actionNa msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): failed(%v)", actionName, name, d, err)) } } - if success == len(errMap) { + if success == len(keys) { return nil } return fmt.Errorf("Site replication error(s): \n%s", strings.Join(msgs, "\n")) @@ -2541,17 +2590,17 @@ func (c *SiteReplicationSys) RemoveRemoteTargetsForEndpoint(ctx context.Context, } targets, terr := globalBucketTargetSys.ListBucketTargets(ctx, t.SourceBucket) if terr != nil { - return err + return terr } tgtBytes, terr := json.Marshal(&targets) if terr != nil { - return err + return terr } if _, err = globalBucketMetadataSys.Update(ctx, t.SourceBucket, bucketTargetsFile, tgtBytes); err != nil { return err } } - return + return err } // Other helpers @@ -2564,15 +2613,14 @@ func getAdminClient(endpoint, accessKey, secretKey string) (*madmin.AdminClient, if globalBucketTargetSys.isOffline(epURL) { return nil, RemoteTargetConnectionErr{Endpoint: epURL.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", epURL.String())} } - client, err := madmin.New(epURL.Host, accessKey, secretKey, epURL.Scheme == "https") - if err != nil { - return nil, err - } - client.SetCustomTransport(globalRemoteTargetTransport) - return client, nil + return madmin.NewWithOptions(epURL.Host, &madmin.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: epURL.Scheme == "https", + Transport: globalRemoteTargetTransport, + }) } -func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) { +func getS3Client(pc madmin.PeerSite) (*minio.Client, error) { ep, err := url.Parse(pc.Endpoint) if err != nil { return nil, err @@ -2581,7 +2629,7 @@ func getS3Client(pc madmin.PeerSite) (*minioClient.Client, error) { return nil, RemoteTargetConnectionErr{Endpoint: ep.String(), Err: fmt.Errorf("remote target is offline for endpoint %s", ep.String())} } - return minioClient.New(ep.Host, &minioClient.Options{ + return minio.New(ep.Host, &minio.Options{ Creds: credentials.NewStaticV4(pc.AccessKey, pc.SecretKey, ""), Secure: ep.Scheme == "https", Transport: globalRemoteTargetTransport, @@ -2724,7 +2772,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O } } - return + return info, err } const ( @@ -2782,9 +2830,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O info.Enabled = true info.Sites = make(map[string]madmin.PeerInfo, len(c.state.Peers)) - for d, peer := range c.state.Peers { - info.Sites[d] = peer - } + maps.Copy(info.Sites, c.state.Peers) info.UpdatedAt = c.state.UpdatedAt var maxBuckets int @@ -3007,7 +3053,6 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O sum.ReplicatedGroupPolicyMappings++ info.StatsSummary[ps.DeploymentID] = sum } - } } @@ -3054,7 +3099,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O var policies []*policy.Policy uPolicyCount := 0 for _, ps := range pslc { - plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.SRIAMPolicy.Policy))) + plcy, err := policy.ParseConfig(bytes.NewReader([]byte(ps.Policy))) if err != nil { continue } @@ -3271,7 +3316,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O uRuleCount := 0 for _, rl := range ilmExpRules { var rule lifecycle.Rule - if err := xml.Unmarshal([]byte(rl.ILMExpiryRule.ILMRule), &rule); err != nil { + if err := xml.Unmarshal([]byte(rl.ILMRule), &rule); err != nil { continue } rules = append(rules, &rule) @@ -3327,7 +3372,7 @@ func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI O info.MaxGroups = len(groupDescStats) info.MaxPolicies = len(policyStats) info.MaxILMExpiryRules = len(ilmExpiryRuleStats) - return + return info, err } // isReplicated returns true if count of replicated matches the number of @@ -3548,7 +3593,7 @@ func isILMExpRuleReplicated(cntReplicated, total int, rules []*lifecycle.Rule) b if err != nil { return false } - if !(string(prevRData) == string(rData)) { + if string(prevRData) != string(rData) { return false } } @@ -3768,9 +3813,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI info.ILMExpiryRules[opts.EntityValue] = rule } } else { - for id, rule := range allRules { - info.ILMExpiryRules[id] = rule - } + maps.Copy(info.ILMExpiryRules, allRules) } } if opts.PeerState { @@ -3783,12 +3826,12 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI if opts.Users || opts.Entity == madmin.SRUserEntity { // Replicate policy mappings on local to all peers. - userPolicyMap := make(map[string]MappedPolicy) - stsPolicyMap := make(map[string]MappedPolicy) - svcPolicyMap := make(map[string]MappedPolicy) + userPolicyMap := xsync.NewMapOf[string, MappedPolicy]() + stsPolicyMap := xsync.NewMapOf[string, MappedPolicy]() + svcPolicyMap := xsync.NewMapOf[string, MappedPolicy]() if opts.Entity == madmin.SRUserEntity { if mp, ok := globalIAMSys.store.GetMappedPolicy(opts.EntityValue, false); ok { - userPolicyMap[opts.EntityValue] = mp + userPolicyMap.Store(opts.EntityValue, mp) } } else { stsErr := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, stsPolicyMap) @@ -3804,34 +3847,23 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI return info, errSRBackendIssue(svcErr) } } - info.UserPolicies = make(map[string]madmin.SRPolicyMapping, len(userPolicyMap)) - for user, mp := range userPolicyMap { - info.UserPolicies[user] = madmin.SRPolicyMapping{ - IsGroup: false, - UserOrGroup: user, - UserType: int(regUser), - Policy: mp.Policies, - UpdatedAt: mp.UpdatedAt, - } - } - for stsU, mp := range stsPolicyMap { - info.UserPolicies[stsU] = madmin.SRPolicyMapping{ - IsGroup: false, - UserOrGroup: stsU, - UserType: int(stsUser), - Policy: mp.Policies, - UpdatedAt: mp.UpdatedAt, - } - } - for svcU, mp := range svcPolicyMap { - info.UserPolicies[svcU] = madmin.SRPolicyMapping{ - IsGroup: false, - UserOrGroup: svcU, - UserType: int(svcUser), - Policy: mp.Policies, - UpdatedAt: mp.UpdatedAt, - } + info.UserPolicies = make(map[string]madmin.SRPolicyMapping, userPolicyMap.Size()) + addPolicy := func(t IAMUserType, mp *xsync.MapOf[string, MappedPolicy]) { + mp.Range(func(k string, mp MappedPolicy) bool { + info.UserPolicies[k] = madmin.SRPolicyMapping{ + IsGroup: false, + UserOrGroup: k, + UserType: int(t), + Policy: mp.Policies, + UpdatedAt: mp.UpdatedAt, + } + return true + }) } + addPolicy(regUser, userPolicyMap) + addPolicy(stsUser, stsPolicyMap) + addPolicy(svcUser, svcPolicyMap) + info.UserInfoMap = make(map[string]madmin.UserInfo) if opts.Entity == madmin.SRUserEntity { if ui, err := globalIAMSys.GetUserInfo(ctx, opts.EntityValue); err == nil { @@ -3875,10 +3907,10 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI if opts.Groups || opts.Entity == madmin.SRGroupEntity { // Replicate policy mappings on local to all peers. - groupPolicyMap := make(map[string]MappedPolicy) + groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]() if opts.Entity == madmin.SRGroupEntity { if mp, ok := globalIAMSys.store.GetMappedPolicy(opts.EntityValue, true); ok { - groupPolicyMap[opts.EntityValue] = mp + groupPolicyMap.Store(opts.EntityValue, mp) } } else { stsErr := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap) @@ -3891,15 +3923,16 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } } - info.GroupPolicies = make(map[string]madmin.SRPolicyMapping, len(c.state.Peers)) - for group, mp := range groupPolicyMap { + info.GroupPolicies = make(map[string]madmin.SRPolicyMapping, groupPolicyMap.Size()) + groupPolicyMap.Range(func(group string, mp MappedPolicy) bool { info.GroupPolicies[group] = madmin.SRPolicyMapping{ IsGroup: true, UserOrGroup: group, Policy: mp.Policies, UpdatedAt: mp.UpdatedAt, } - } + return true + }) info.GroupDescMap = make(map[string]madmin.GroupDesc) if opts.Entity == madmin.SRGroupEntity { if gd, err := globalIAMSys.GetGroupDescription(opts.EntityValue); err == nil { @@ -3918,9 +3951,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI return info, errSRBackendIssue(errG) } } - for group, d := range groupDescMap { - info.GroupDescMap[group] = d - } + maps.Copy(info.GroupDescMap, groupDescMap) } } // cache SR metadata info for IAM @@ -4078,7 +4109,7 @@ func (c *SiteReplicationSys) EditPeerCluster(ctx context.Context, peer madmin.Pe wg.Wait() for dID, err := range errs { - logger.LogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit") + replLogOnceIf(ctx, fmt.Errorf("unable to update peer %s: %w", state.Peers[dID].Name, err), "site-relication-edit") } // we can now save the cluster replication configuration state. @@ -4145,21 +4176,21 @@ func (c *SiteReplicationSys) updateTargetEndpoints(ctx context.Context, prevInfo } err := globalBucketTargetSys.SetTarget(ctx, bucket, &bucketTarget, true) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err)) + replLogIf(ctx, c.annotatePeerErr(peer.Name, "Bucket target creation error", err)) continue } targets, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket) if err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) continue } tgtBytes, err := json.Marshal(&targets) if err != nil { - logger.LogIf(ctx, err) + bugLogIf(ctx, err) continue } if _, err = globalBucketMetadataSys.Update(ctx, bucket, bucketTargetsFile, tgtBytes); err != nil { - logger.LogIf(ctx, err) + replLogIf(ctx, err) continue } } @@ -4374,7 +4405,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj // If latest peers ILM expiry flags are equal to current peer, no need to heal flagEqual := true for id, peer := range latestPeers { - if !(ps.Peers[id].ReplicateILMExpiry == peer.ReplicateILMExpiry) { + if ps.Peers[id].ReplicateILMExpiry != peer.ReplicateILMExpiry { flagEqual = false break } @@ -4394,7 +4425,7 @@ func (c *SiteReplicationSys) healILMExpiryConfig(ctx context.Context, objAPI Obj return wrapSRErr(err) } if err = admClient.SRStateEdit(ctx, madmin.SRStateEditReq{Peers: latestPeers, UpdatedAt: lastUpdate}); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit, + replLogIf(ctx, c.annotatePeerErr(ps.Name, siteReplicationEdit, fmt.Errorf("Unable to heal site replication state for peer %s from peer %s : %w", ps.Name, latestPeerName, err))) } @@ -4408,6 +4439,7 @@ func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer return err } ilmExpiryCfgHealed := false + opts := validateReplicationDestinationOptions{CheckReady: true} for _, bi := range buckets { bucket := bi.Name info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{ @@ -4427,7 +4459,7 @@ func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer c.healVersioningMetadata(ctx, objAPI, bucket, info) c.healOLockConfigMetadata(ctx, objAPI, bucket, info) c.healSSEMetadata(ctx, objAPI, bucket, info) - c.healBucketReplicationConfig(ctx, objAPI, bucket, info) + c.healBucketReplicationConfig(ctx, objAPI, bucket, info, &opts) c.healBucketPolicies(ctx, objAPI, bucket, info) c.healTagMetadata(ctx, objAPI, bucket, info) c.healBucketQuotaConfig(ctx, objAPI, bucket, info) @@ -4497,7 +4529,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketLifecycleConfig, finalConfigData); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal bucket ILM expiry data from peer site %s : %w", latestPeerName, err)) } continue } @@ -4513,7 +4545,7 @@ func (c *SiteReplicationSys) healBucketILMExpiry(ctx context.Context, objAPI Obj ExpiryLCConfig: latestExpLCConfig, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal bucket ILM expiry data for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4570,7 +4602,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal tagging metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4586,7 +4618,7 @@ func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectL Tags: latestTaggingConfig, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } } @@ -4634,7 +4666,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal bucket policy metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4650,7 +4682,7 @@ func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI Obje Policy: latestIAMPolicy, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal bucket policy metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4709,7 +4741,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal quota metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4726,7 +4758,7 @@ func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI O Quota: latestQuotaConfigBytes, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal quota config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4784,7 +4816,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketVersioningConfig, latestVersioningConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal versioning metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4801,7 +4833,7 @@ func (c *SiteReplicationSys) healVersioningMetadata(ctx context.Context, objAPI UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal versioning config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4859,7 +4891,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal sse metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4876,7 +4908,7 @@ func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectL UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal SSE config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -4934,7 +4966,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI } if dID == globalDeploymentID() { if _, err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err)) + replLogIf(ctx, fmt.Errorf("Unable to heal objectlock config metadata from peer site %s : %w", latestPeerName, err)) } continue } @@ -4951,7 +4983,7 @@ func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, + replLogIf(ctx, c.annotatePeerErr(peerName, replicateBucketMetadata, fmt.Errorf("Unable to heal object lock config metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) } @@ -5127,7 +5159,7 @@ func (c *SiteReplicationSys) healBucket(ctx context.Context, objAPI ObjectLayer, return nil } -func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { +func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo, opts *validateReplicationDestinationOptions) error { bs := info.BucketStats[bucket] c.RLock() @@ -5181,14 +5213,14 @@ func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, ob if rcfg != nil && !replMismatch { // validate remote targets on current cluster for this bucket - _, apiErr := validateReplicationDestination(ctx, bucket, rcfg, false) + _, apiErr := validateReplicationDestination(ctx, bucket, rcfg, opts) if apiErr != noError { replMismatch = true } } if replMismatch { - logger.LogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket))) + replLogOnceIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket)), "heal-bucket-relication-config") } return nil } @@ -5281,7 +5313,10 @@ func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLaye UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err)) + replLogOnceIf( + ctx, + fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err), + fmt.Sprintf("heal-policy-%s", policy)) } } return nil @@ -5342,7 +5377,8 @@ func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI Object UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping from peer site %s -> site %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-policy-%s", user)) } } return nil @@ -5405,7 +5441,9 @@ func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI Objec UpdatedAt: lastUpdate, }) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal IAM group policy mapping for from peer site %s -> site %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-group-policy-%s", group)) } } return nil @@ -5429,12 +5467,12 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, ) for dID, ss := range us { if lastUpdate.IsZero() { - lastUpdate = ss.userInfo.UserInfo.UpdatedAt + lastUpdate = ss.userInfo.UpdatedAt latestID = dID latestUserStat = ss } - if !ss.userInfo.UserInfo.UpdatedAt.IsZero() && ss.userInfo.UserInfo.UpdatedAt.After(lastUpdate) { - lastUpdate = ss.userInfo.UserInfo.UpdatedAt + if !ss.userInfo.UpdatedAt.IsZero() && ss.userInfo.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.userInfo.UpdatedAt latestID = dID latestUserStat = ss } @@ -5466,13 +5504,17 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, if creds.IsServiceAccount() { claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) continue } _, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) continue } @@ -5480,7 +5522,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, if policy != nil { policyJSON, err = json.Marshal(policy) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) continue } } @@ -5494,7 +5538,7 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, SecretKey: creds.SecretKey, Groups: creds.Groups, Claims: claims, - SessionPolicy: json.RawMessage(policyJSON), + SessionPolicy: policyJSON, Status: creds.Status, Name: creds.Name, Description: creds.Description, @@ -5503,7 +5547,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) } continue } @@ -5516,7 +5562,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, // policy. The session token will contain info about policy to // be applied. if !errors.Is(err, errNoSuchUser) { - logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal temporary credentials from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) continue } } else { @@ -5534,7 +5582,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal temporary credentials from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) } continue } @@ -5550,7 +5600,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal user from peer site %s -> %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-user-%s", user)) } } return nil @@ -5614,7 +5666,9 @@ func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer, }, UpdatedAt: lastUpdate, }); err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + replLogOnceIf(ctx, + fmt.Errorf("Unable to heal group from peer site %s -> site %s : %w", latestPeerName, peerName, err), + fmt.Sprintf("heal-group-%s", group)) } } return nil @@ -5631,11 +5685,8 @@ func isGroupDescEqual(g1, g2 madmin.GroupDesc) bool { } for _, v1 := range g1.Members { var found bool - for _, v2 := range g2.Members { - if v1 == v2 { - found = true - break - } + if slices.Contains(g2.Members, v1) { + found = true } if !found { return false @@ -5655,11 +5706,8 @@ func isUserInfoEqual(u1, u2 madmin.UserInfo) bool { } for _, v1 := range u1.MemberOf { var found bool - for _, v2 := range u2.MemberOf { - if v1 == v2 { - found = true - break - } + if slices.Contains(u2.MemberOf, v1) { + found = true } if !found { return false @@ -5740,7 +5788,7 @@ func (c *SiteReplicationSys) startResync(ctx context.Context, objAPI ObjectLayer for _, bi := range buckets { bucket := bi.Name - if _, err := getReplicationConfig(ctx, bucket); err != nil { + if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil { res.Buckets = append(res.Buckets, madmin.ResyncBucketStatus{ ErrDetail: err.Error(), Bucket: bucket, @@ -5790,7 +5838,7 @@ func (c *SiteReplicationSys) startResync(ctx context.Context, objAPI ObjectLayer }) continue } - if err := globalReplicationPool.resyncer.start(ctx, objAPI, resyncOpts{ + if err := globalReplicationPool.Get().resyncer.start(ctx, objAPI, resyncOpts{ bucket: bucket, arn: tgtArn, resyncID: rs.ResyncID, @@ -5885,8 +5933,8 @@ func (c *SiteReplicationSys) cancelResync(ctx context.Context, objAPI ObjectLaye continue } // update resync state for the bucket - globalReplicationPool.resyncer.Lock() - m, ok := globalReplicationPool.resyncer.statusMap[bucket] + globalReplicationPool.Get().resyncer.Lock() + m, ok := globalReplicationPool.Get().resyncer.statusMap[bucket] if !ok { m = newBucketResyncStatus(bucket) } @@ -5896,8 +5944,8 @@ func (c *SiteReplicationSys) cancelResync(ctx context.Context, objAPI ObjectLaye m.TargetsMap[t.Arn] = st m.LastUpdate = UTCNow() } - globalReplicationPool.resyncer.statusMap[bucket] = m - globalReplicationPool.resyncer.Unlock() + globalReplicationPool.Get().resyncer.statusMap[bucket] = m + globalReplicationPool.Get().resyncer.Unlock() } } @@ -5907,7 +5955,7 @@ func (c *SiteReplicationSys) cancelResync(ctx context.Context, objAPI ObjectLaye return res, err } select { - case globalReplicationPool.resyncer.resyncCancelCh <- struct{}{}: + case globalReplicationPool.Get().resyncer.resyncCancelCh <- struct{}{}: case <-ctx.Done(): } @@ -6175,7 +6223,13 @@ func mergeWithCurrentLCConfig(ctx context.Context, bucket string, expLCCfg *stri Rules: rules, ExpiryUpdatedAt: &updatedAt, } - if err := finalLcCfg.Validate(); err != nil { + + rcfg, err := globalBucketObjectLockSys.Get(bucket) + if err != nil { + return nil, err + } + + if err := finalLcCfg.Validate(rcfg); err != nil { return []byte{}, err } finalConfigData, err := xml.Marshal(finalLcCfg) @@ -6195,34 +6249,36 @@ func ilmExpiryReplicationEnabled(sites map[string]madmin.PeerInfo) bool { } type siteReplicatorCred struct { - Creds auth.Credentials + secretKey string sync.RWMutex } // Get or attempt to load site replicator credentials from disk. -func (s *siteReplicatorCred) Get(ctx context.Context) (auth.Credentials, error) { +func (s *siteReplicatorCred) Get(ctx context.Context) (string, error) { s.RLock() - if s.Creds.IsValid() { - s.RUnlock() - return s.Creds, nil - } + secretKey := s.secretKey s.RUnlock() - m := make(map[string]UserIdentity) - if err := globalIAMSys.store.loadUser(ctx, siteReplicatorSvcAcc, svcUser, m); err != nil { - return auth.Credentials{}, err + + if secretKey != "" { + return secretKey, nil + } + + secretKey, err := globalIAMSys.store.loadSecretKey(ctx, siteReplicatorSvcAcc, svcUser) + if err != nil { + return "", err } - s.Set(m[siteReplicatorSvcAcc].Credentials) - return m[siteReplicatorSvcAcc].Credentials, nil + s.Set(secretKey) + return secretKey, nil } -func (s *siteReplicatorCred) Set(c auth.Credentials) { +func (s *siteReplicatorCred) Set(secretKey string) { s.Lock() defer s.Unlock() - s.Creds = c + s.secretKey = secretKey } func (s *siteReplicatorCred) IsValid() bool { s.RLock() defer s.RUnlock() - return s.Creds.IsValid() + return s.secretKey != "" } diff --git a/cmd/speedtest.go b/cmd/speedtest.go index 7226686f304ad..f91db787d011e 100644 --- a/cmd/speedtest.go +++ b/cmd/speedtest.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -27,6 +27,7 @@ import ( "github.com/minio/dperf/pkg/dperf" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/auth" xioutil "github.com/minio/minio/internal/ioutil" ) @@ -41,6 +42,8 @@ type speedTestOpts struct { storageClass string bucketName string enableSha256 bool + enableMultipart bool + creds auth.Credentials } // Get the max throughput and iops numbers. @@ -99,7 +102,7 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT var totalUploadTimes madmin.TimeDurations var totalDownloadTimes madmin.TimeDurations var totalDownloadTTFB madmin.TimeDurations - for i := 0; i < len(throughputHighestResults); i++ { + for i := range len(throughputHighestResults) { errStr := "" if throughputHighestResults[i].Error != "" { errStr = throughputHighestResults[i].Error @@ -107,12 +110,14 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT // if the default concurrency yields zero results, throw an error. if throughputHighestResults[i].Downloads == 0 && opts.concurrencyStart == concurrency { - errStr = fmt.Sprintf("no results for downloads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration) + errStr = fmt.Sprintf("no results for downloads upon first attempt, concurrency %d and duration %s", + opts.concurrencyStart, opts.duration) } // if the default concurrency yields zero results, throw an error. if throughputHighestResults[i].Uploads == 0 && opts.concurrencyStart == concurrency { - errStr = fmt.Sprintf("no results for uploads upon first attempt, concurrency %d and duration %s", opts.concurrencyStart, opts.duration) + errStr = fmt.Sprintf("no results for uploads upon first attempt, concurrency %d and duration %s", + opts.concurrencyStart, opts.duration) } result.PUTStats.Servers = append(result.PUTStats.Servers, madmin.SpeedTestStatServer{ @@ -160,12 +165,14 @@ func objectSpeedTest(ctx context.Context, opts speedTestOpts) chan madmin.SpeedT } sopts := speedTestOpts{ - objectSize: opts.objectSize, - concurrency: concurrency, - duration: opts.duration, - storageClass: opts.storageClass, - bucketName: opts.bucketName, - enableSha256: opts.enableSha256, + objectSize: opts.objectSize, + concurrency: concurrency, + duration: opts.duration, + storageClass: opts.storageClass, + bucketName: opts.bucketName, + enableSha256: opts.enableSha256, + enableMultipart: opts.enableMultipart, + creds: opts.creds, } results := globalNotificationSys.SpeedTest(ctx, sopts) diff --git a/cmd/storage-datatypes.go b/cmd/storage-datatypes.go index 672cd8bedd3cc..be36fa3595425 100644 --- a/cmd/storage-datatypes.go +++ b/cmd/storage-datatypes.go @@ -21,8 +21,12 @@ import ( "time" "github.com/minio/minio/internal/crypto" + "github.com/minio/minio/internal/grid" + xioutil "github.com/minio/minio/internal/ioutil" ) +//msgp:clearomitted + //go:generate msgp -file=$GOFILE // DeleteOptions represents the disk level delete options available for the APIs @@ -31,6 +35,8 @@ type DeleteOptions struct { Recursive bool `msg:"r"` Immediate bool `msg:"i"` UndoWrite bool `msg:"u"` + // OldDataDir of the previous object + OldDataDir string `msg:"o,omitempty"` // old data dir used only when to revert a rename() } // BaseOptions represents common options for all Storage API calls @@ -95,8 +101,6 @@ type VolsInfo []VolInfo // VolInfo - represents volume stat information. // The above means that any added/deleted fields are incompatible. // -// The above means that any added/deleted fields are incompatible. -// //msgp:tuple VolInfo type VolInfo struct { // Name of the volume. @@ -104,6 +108,12 @@ type VolInfo struct { // Date and time when the volume was created. Created time.Time + + // total VolInfo counts + count int + + // Date and time when the volume was deleted, if Deleted + Deleted time.Time } // FilesInfo represent a list of files, additionally @@ -150,6 +160,15 @@ func (f *FileInfoVersions) findVersionIndex(v string) int { if f == nil || v == "" { return -1 } + if v == nullVersionID { + for i, ver := range f.Versions { + if ver.VersionID == "" { + return i + } + } + return -1 + } + for i, ver := range f.Versions { if ver.VersionID == v { return i @@ -251,6 +270,24 @@ type FileInfo struct { Versioned bool `msg:"vs"` } +func (fi FileInfo) shardSize() int64 { + return ceilFrac(fi.Erasure.BlockSize, int64(fi.Erasure.DataBlocks)) +} + +// ShardFileSize - returns final erasure size from original size. +func (fi FileInfo) ShardFileSize(totalLength int64) int64 { + if totalLength == 0 { + return 0 + } + if totalLength == -1 { + return -1 + } + numShards := totalLength / fi.Erasure.BlockSize + lastBlockSize := totalLength % fi.Erasure.BlockSize + lastShardSize := ceilFrac(lastBlockSize, int64(fi.Erasure.DataBlocks)) + return numShards*fi.shardSize() + lastShardSize +} + // ShallowCopy - copies minimal information for READ MRF checks. func (fi FileInfo) ShallowCopy() (n FileInfo) { n.Volume = fi.Volume @@ -258,7 +295,7 @@ func (fi FileInfo) ShallowCopy() (n FileInfo) { n.VersionID = fi.VersionID n.Deleted = fi.Deleted n.Erasure = fi.Erasure - return + return n } // WriteQuorum returns expected write quorum for this FileInfo @@ -361,24 +398,24 @@ func newFileInfo(object string, dataBlocks, parityBlocks int) (fi FileInfo) { // ReadMultipleReq contains information of multiple files to read from disk. type ReadMultipleReq struct { - Bucket string // Bucket. Can be empty if multiple buckets. - Prefix string // Shared prefix of all files. Can be empty. Will be joined to filename without modification. - Files []string // Individual files to read. - MaxSize int64 // Return error if size is exceed. - MetadataOnly bool // Read as XL meta and truncate data. - AbortOn404 bool // Stop reading after first file not found. - MaxResults int // Stop after this many successful results. <= 0 means all. + Bucket string `msg:"bk"` // Bucket. Can be empty if multiple buckets. + Prefix string `msg:"pr,omitempty"` // Shared prefix of all files. Can be empty. Will be joined to filename without modification. + Files []string `msg:"fl"` // Individual files to read. + MaxSize int64 `msg:"ms"` // Return error if size is exceed. + MetadataOnly bool `msg:"mo"` // Read as XL meta and truncate data. + AbortOn404 bool `msg:"ab"` // Stop reading after first file not found. + MaxResults int `msg:"mr"` // Stop after this many successful results. <= 0 means all. } // ReadMultipleResp contains a single response from a ReadMultipleReq. type ReadMultipleResp struct { - Bucket string // Bucket as given by request. - Prefix string // Prefix as given by request. - File string // File name as given in request. - Exists bool // Returns whether the file existed on disk. - Error string // Returns any error when reading. - Data []byte // Contains all data of file. - Modtime time.Time // Modtime of file on disk. + Bucket string `msg:"bk"` // Bucket as given by request. + Prefix string `msg:"pr,omitempty"` // Prefix as given by request. + File string `msg:"fl"` // File name as given in request. + Exists bool `msg:"ex"` // Returns whether the file existed on disk. + Error string `msg:"er,omitempty"` // Returns any error when reading. + Data []byte `msg:"d"` // Contains all data of file. + Modtime time.Time `msg:"m"` // Modtime of file on disk. } // DeleteVersionHandlerParams are parameters for DeleteVersionHandler @@ -433,6 +470,27 @@ type RenameDataHandlerParams struct { Opts RenameOptions `msg:"ro"` } +// RenameDataInlineHandlerParams are parameters for RenameDataHandler with a buffer for inline data. +type RenameDataInlineHandlerParams struct { + RenameDataHandlerParams `msg:"p"` +} + +func newRenameDataInlineHandlerParams() *RenameDataInlineHandlerParams { + buf := grid.GetByteBufferCap(32 + 16<<10) + return &RenameDataInlineHandlerParams{RenameDataHandlerParams{FI: FileInfo{Data: buf[:0]}}} +} + +// Recycle will reuse the memory allocated for the FileInfo data. +func (r *RenameDataInlineHandlerParams) Recycle() { + if r == nil { + return + } + if cap(r.FI.Data) >= xioutil.SmallBlock { + grid.PutByteBuffer(r.FI.Data) + r.FI.Data = nil + } +} + // RenameFileHandlerParams are parameters for RenameFileHandler. type RenameFileHandlerParams struct { DiskID string `msg:"id"` @@ -442,6 +500,17 @@ type RenameFileHandlerParams struct { DstFilePath string `msg:"dp"` } +// RenamePartHandlerParams are parameters for RenamePartHandler. +type RenamePartHandlerParams struct { + DiskID string `msg:"id"` + SrcVolume string `msg:"sv"` + SrcFilePath string `msg:"sp"` + DstVolume string `msg:"dv"` + DstFilePath string `msg:"dp"` + Meta []byte `msg:"m"` + SkipParent string `msg:"kp"` +} + // ReadAllHandlerParams are parameters for ReadAllHandler. type ReadAllHandlerParams struct { DiskID string `msg:"id"` @@ -458,16 +527,60 @@ type WriteAllHandlerParams struct { } // RenameDataResp - RenameData()'s response. +// Provides information about the final state of Rename() +// - on xl.meta (array of versions) on disk to check for version disparity +// - on rewrite dataDir on disk that must be additionally purged +// only after as a 2-phase call, allowing the older dataDir to +// hang-around in-case we need some form of recovery. type RenameDataResp struct { - Signature uint64 `msg:"sig"` + Sign []byte `msg:"s"` + OldDataDir string `msg:"od"` // contains '', it is designed to be passed as value to Delete(bucket, pathJoin(object, dataDir)) +} + +const ( + checkPartUnknown int = iota + + // Changing the order can cause a data loss + // when running two nodes with incompatible versions + checkPartSuccess + checkPartDiskNotFound + checkPartVolumeNotFound + checkPartFileNotFound + checkPartFileCorrupt +) + +// CheckPartsResp is a response of the storage CheckParts and VerifyFile APIs +type CheckPartsResp struct { + Results []int `msg:"r"` } // LocalDiskIDs - GetLocalIDs response. type LocalDiskIDs struct { - IDs []string + IDs []string `msg:"i"` } // ListDirResult - ListDir()'s response. type ListDirResult struct { Entries []string `msg:"e"` } + +// ReadPartsReq - send multiple part paths to read from +type ReadPartsReq struct { + Paths []string `msg:"p"` +} + +// ReadPartsResp - is the response for ReadPartsReq +type ReadPartsResp struct { + Infos []*ObjectPartInfo `msg:"is"` +} + +// DeleteBulkReq - send multiple paths in same delete request. +type DeleteBulkReq struct { + Paths []string `msg:"p"` +} + +// DeleteVersionsErrsResp - collection of delete errors +// for bulk version deletes +type DeleteVersionsErrsResp struct { + Errs []string `msg:"e"` +} diff --git a/cmd/storage-datatypes_gen.go b/cmd/storage-datatypes_gen.go index d383a8cfefd12..3671bb2e9d850 100644 --- a/cmd/storage-datatypes_gen.go +++ b/cmd/storage-datatypes_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -38,6 +38,7 @@ func (z *BaseOptions) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z BaseOptions) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -49,6 +50,7 @@ func (z BaseOptions) EncodeMsg(en *msgp.Writer) (err error) { func (z BaseOptions) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } @@ -272,7 +274,7 @@ func (z *CheckPartsHandlerParams) Msgsize() (s int) { } // DecodeMsg implements msgp.Decodable -func (z *DeleteFileHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *CheckPartsResp) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -289,29 +291,24 @@ func (z *DeleteFileHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "id": - z.DiskID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "DiskID") - return - } - case "v": - z.Volume, err = dc.ReadString() + case "r": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() if err != nil { - err = msgp.WrapError(err, "Volume") + err = msgp.WrapError(err, "Results") return } - case "fp": - z.FilePath, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "FilePath") - return + if cap(z.Results) >= int(zb0002) { + z.Results = (z.Results)[:zb0002] + } else { + z.Results = make([]int, zb0002) } - case "do": - err = z.Opts.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Opts") - return + for za0001 := range z.Results { + z.Results[za0001], err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Results", za0001) + return + } } default: err = dc.Skip() @@ -325,76 +322,43 @@ func (z *DeleteFileHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *DeleteFileHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "id" - err = en.Append(0x84, 0xa2, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.DiskID) - if err != nil { - err = msgp.WrapError(err, "DiskID") - return - } - // write "v" - err = en.Append(0xa1, 0x76) - if err != nil { - return - } - err = en.WriteString(z.Volume) - if err != nil { - err = msgp.WrapError(err, "Volume") - return - } - // write "fp" - err = en.Append(0xa2, 0x66, 0x70) - if err != nil { - return - } - err = en.WriteString(z.FilePath) +func (z *CheckPartsResp) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "r" + err = en.Append(0x81, 0xa1, 0x72) if err != nil { - err = msgp.WrapError(err, "FilePath") return } - // write "do" - err = en.Append(0xa2, 0x64, 0x6f) + err = en.WriteArrayHeader(uint32(len(z.Results))) if err != nil { + err = msgp.WrapError(err, "Results") return } - err = z.Opts.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Opts") - return + for za0001 := range z.Results { + err = en.WriteInt(z.Results[za0001]) + if err != nil { + err = msgp.WrapError(err, "Results", za0001) + return + } } return } // MarshalMsg implements msgp.Marshaler -func (z *DeleteFileHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { +func (z *CheckPartsResp) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "id" - o = append(o, 0x84, 0xa2, 0x69, 0x64) - o = msgp.AppendString(o, z.DiskID) - // string "v" - o = append(o, 0xa1, 0x76) - o = msgp.AppendString(o, z.Volume) - // string "fp" - o = append(o, 0xa2, 0x66, 0x70) - o = msgp.AppendString(o, z.FilePath) - // string "do" - o = append(o, 0xa2, 0x64, 0x6f) - o, err = z.Opts.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Opts") - return + // map header, size 1 + // string "r" + o = append(o, 0x81, 0xa1, 0x72) + o = msgp.AppendArrayHeader(o, uint32(len(z.Results))) + for za0001 := range z.Results { + o = msgp.AppendInt(o, z.Results[za0001]) } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *DeleteFileHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *CheckPartsResp) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -411,29 +375,24 @@ func (z *DeleteFileHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) return } switch msgp.UnsafeString(field) { - case "id": - z.DiskID, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "DiskID") - return - } - case "v": - z.Volume, bts, err = msgp.ReadStringBytes(bts) + case "r": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "Volume") + err = msgp.WrapError(err, "Results") return } - case "fp": - z.FilePath, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "FilePath") - return + if cap(z.Results) >= int(zb0002) { + z.Results = (z.Results)[:zb0002] + } else { + z.Results = make([]int, zb0002) } - case "do": - bts, err = z.Opts.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "Opts") - return + for za0001 := range z.Results { + z.Results[za0001], bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Results", za0001) + return + } } default: bts, err = msgp.Skip(bts) @@ -448,13 +407,13 @@ func (z *DeleteFileHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *DeleteFileHandlerParams) Msgsize() (s int) { - s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + z.Opts.Msgsize() +func (z *CheckPartsResp) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + (len(z.Results) * (msgp.IntSize)) return } // DecodeMsg implements msgp.Decodable -func (z *DeleteOptions) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *DeleteBulkReq) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -471,46 +430,24 @@ func (z *DeleteOptions) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "BaseOptions": + case "p": var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() + zb0002, err = dc.ReadArrayHeader() if err != nil { - err = msgp.WrapError(err, "BaseOptions") + err = msgp.WrapError(err, "Paths") return } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() + if cap(z.Paths) >= int(zb0002) { + z.Paths = (z.Paths)[:zb0002] + } else { + z.Paths = make([]string, zb0002) + } + for za0001 := range z.Paths { + z.Paths[za0001], err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "BaseOptions") + err = msgp.WrapError(err, "Paths", za0001) return } - switch msgp.UnsafeString(field) { - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "BaseOptions") - return - } - } - } - case "r": - z.Recursive, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Recursive") - return - } - case "i": - z.Immediate, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Immediate") - return - } - case "u": - z.UndoWrite, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "UndoWrite") - return } default: err = dc.Skip() @@ -524,73 +461,43 @@ func (z *DeleteOptions) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *DeleteOptions) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 4 - // write "BaseOptions" - err = en.Append(0x84, 0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) - if err != nil { - return - } - // map header, size 0 - err = en.Append(0x80) - if err != nil { - return - } - // write "r" - err = en.Append(0xa1, 0x72) - if err != nil { - return - } - err = en.WriteBool(z.Recursive) - if err != nil { - err = msgp.WrapError(err, "Recursive") - return - } - // write "i" - err = en.Append(0xa1, 0x69) - if err != nil { - return - } - err = en.WriteBool(z.Immediate) +func (z *DeleteBulkReq) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "p" + err = en.Append(0x81, 0xa1, 0x70) if err != nil { - err = msgp.WrapError(err, "Immediate") return } - // write "u" - err = en.Append(0xa1, 0x75) + err = en.WriteArrayHeader(uint32(len(z.Paths))) if err != nil { + err = msgp.WrapError(err, "Paths") return } - err = en.WriteBool(z.UndoWrite) - if err != nil { - err = msgp.WrapError(err, "UndoWrite") - return + for za0001 := range z.Paths { + err = en.WriteString(z.Paths[za0001]) + if err != nil { + err = msgp.WrapError(err, "Paths", za0001) + return + } } return } // MarshalMsg implements msgp.Marshaler -func (z *DeleteOptions) MarshalMsg(b []byte) (o []byte, err error) { +func (z *DeleteBulkReq) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 4 - // string "BaseOptions" - o = append(o, 0x84, 0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) - // map header, size 0 - o = append(o, 0x80) - // string "r" - o = append(o, 0xa1, 0x72) - o = msgp.AppendBool(o, z.Recursive) - // string "i" - o = append(o, 0xa1, 0x69) - o = msgp.AppendBool(o, z.Immediate) - // string "u" - o = append(o, 0xa1, 0x75) - o = msgp.AppendBool(o, z.UndoWrite) + // map header, size 1 + // string "p" + o = append(o, 0x81, 0xa1, 0x70) + o = msgp.AppendArrayHeader(o, uint32(len(z.Paths))) + for za0001 := range z.Paths { + o = msgp.AppendString(o, z.Paths[za0001]) + } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *DeleteOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *DeleteBulkReq) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -607,46 +514,24 @@ func (z *DeleteOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "BaseOptions": + case "p": var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "BaseOptions") + err = msgp.WrapError(err, "Paths") return } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) + if cap(z.Paths) >= int(zb0002) { + z.Paths = (z.Paths)[:zb0002] + } else { + z.Paths = make([]string, zb0002) + } + for za0001 := range z.Paths { + z.Paths[za0001], bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "BaseOptions") + err = msgp.WrapError(err, "Paths", za0001) return } - switch msgp.UnsafeString(field) { - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "BaseOptions") - return - } - } - } - case "r": - z.Recursive, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Recursive") - return - } - case "i": - z.Immediate, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Immediate") - return - } - case "u": - z.UndoWrite, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "UndoWrite") - return } default: bts, err = msgp.Skip(bts) @@ -661,13 +546,16 @@ func (z *DeleteOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *DeleteOptions) Msgsize() (s int) { - s = 1 + 12 + 1 + 2 + msgp.BoolSize + 2 + msgp.BoolSize + 2 + msgp.BoolSize +func (z *DeleteBulkReq) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + for za0001 := range z.Paths { + s += msgp.StringPrefixSize + len(z.Paths[za0001]) + } return } // DecodeMsg implements msgp.Decodable -func (z *DeleteVersionHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *DeleteFileHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -702,24 +590,12 @@ func (z *DeleteVersionHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "FilePath") return } - case "fdm": - z.ForceDelMarker, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "ForceDelMarker") - return - } case "do": err = z.Opts.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Opts") return } - case "fi": - err = z.FI.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } default: err = dc.Skip() if err != nil { @@ -732,10 +608,10 @@ func (z *DeleteVersionHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *DeleteVersionHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 6 +func (z *DeleteFileHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 4 // write "id" - err = en.Append(0x86, 0xa2, 0x69, 0x64) + err = en.Append(0x84, 0xa2, 0x69, 0x64) if err != nil { return } @@ -764,16 +640,6 @@ func (z *DeleteVersionHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "FilePath") return } - // write "fdm" - err = en.Append(0xa3, 0x66, 0x64, 0x6d) - if err != nil { - return - } - err = en.WriteBool(z.ForceDelMarker) - if err != nil { - err = msgp.WrapError(err, "ForceDelMarker") - return - } // write "do" err = en.Append(0xa2, 0x64, 0x6f) if err != nil { @@ -784,25 +650,15 @@ func (z *DeleteVersionHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Opts") return } - // write "fi" - err = en.Append(0xa2, 0x66, 0x69) - if err != nil { - return - } - err = z.FI.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } return } // MarshalMsg implements msgp.Marshaler -func (z *DeleteVersionHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { +func (z *DeleteFileHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 6 + // map header, size 4 // string "id" - o = append(o, 0x86, 0xa2, 0x69, 0x64) + o = append(o, 0x84, 0xa2, 0x69, 0x64) o = msgp.AppendString(o, z.DiskID) // string "v" o = append(o, 0xa1, 0x76) @@ -810,9 +666,6 @@ func (z *DeleteVersionHandlerParams) MarshalMsg(b []byte) (o []byte, err error) // string "fp" o = append(o, 0xa2, 0x66, 0x70) o = msgp.AppendString(o, z.FilePath) - // string "fdm" - o = append(o, 0xa3, 0x66, 0x64, 0x6d) - o = msgp.AppendBool(o, z.ForceDelMarker) // string "do" o = append(o, 0xa2, 0x64, 0x6f) o, err = z.Opts.MarshalMsg(o) @@ -820,18 +673,11 @@ func (z *DeleteVersionHandlerParams) MarshalMsg(b []byte) (o []byte, err error) err = msgp.WrapError(err, "Opts") return } - // string "fi" - o = append(o, 0xa2, 0x66, 0x69) - o, err = z.FI.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *DeleteVersionHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *DeleteFileHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -866,24 +712,12 @@ func (z *DeleteVersionHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err err err = msgp.WrapError(err, "FilePath") return } - case "fdm": - z.ForceDelMarker, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ForceDelMarker") - return - } case "do": bts, err = z.Opts.UnmarshalMsg(bts) if err != nil { err = msgp.WrapError(err, "Opts") return } - case "fi": - bts, err = z.FI.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } default: bts, err = msgp.Skip(bts) if err != nil { @@ -897,136 +731,803 @@ func (z *DeleteVersionHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err err } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *DeleteVersionHandlerParams) Msgsize() (s int) { - s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 4 + msgp.BoolSize + 3 + z.Opts.Msgsize() + 3 + z.FI.Msgsize() +func (z *DeleteFileHandlerParams) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + z.Opts.Msgsize() return } // DecodeMsg implements msgp.Decodable -func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *DeleteOptions) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field var zb0001 uint32 - zb0001, err = dc.ReadArrayHeader() + zb0001, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err) return } - if zb0001 != 18 { - err = msgp.ArrayError{Wanted: 18, Got: zb0001} - return - } - z.Total, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Total") - return - } - z.Free, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Free") - return - } - z.Used, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "Used") - return - } - z.UsedInodes, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "UsedInodes") - return - } - z.FreeInodes, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "FreeInodes") - return - } - z.Major, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Major") - return - } - z.Minor, err = dc.ReadUint32() - if err != nil { - err = msgp.WrapError(err, "Minor") - return - } - z.NRRequests, err = dc.ReadUint64() - if err != nil { - err = msgp.WrapError(err, "NRRequests") - return - } - z.FSType, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "FSType") - return - } - z.RootDisk, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "RootDisk") - return - } - z.Healing, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Healing") - return - } - z.Scanning, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Scanning") - return - } - z.Endpoint, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Endpoint") - return - } - z.MountPath, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "MountPath") - return - } - z.ID, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "ID") - return - } - z.Rotational, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Rotational") - return - } - err = z.Metrics.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "Metrics") - return + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "BaseOptions": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + switch msgp.UnsafeString(field) { + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + } + } + case "r": + z.Recursive, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Recursive") + return + } + case "i": + z.Immediate, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Immediate") + return + } + case "u": + z.UndoWrite, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "UndoWrite") + return + } + case "o": + z.OldDataDir, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "OldDataDir") + return + } + zb0001Mask |= 0x1 + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } } - z.Error, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Error") - return + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.OldDataDir = "" } + return } // EncodeMsg implements msgp.Encodable -func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 18 - err = en.Append(0xdc, 0x0, 0x12) - if err != nil { - return +func (z *DeleteOptions) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + _ = zb0001Mask + if z.OldDataDir == "" { + zb0001Len-- + zb0001Mask |= 0x10 } - err = en.WriteUint64(z.Total) + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) if err != nil { - err = msgp.WrapError(err, "Total") return } - err = en.WriteUint64(z.Free) - if err != nil { - err = msgp.WrapError(err, "Free") - return + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "BaseOptions" + err = en.Append(0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) + if err != nil { + return + } + // map header, size 0 + _ = z.BaseOptions + err = en.Append(0x80) + if err != nil { + return + } + // write "r" + err = en.Append(0xa1, 0x72) + if err != nil { + return + } + err = en.WriteBool(z.Recursive) + if err != nil { + err = msgp.WrapError(err, "Recursive") + return + } + // write "i" + err = en.Append(0xa1, 0x69) + if err != nil { + return + } + err = en.WriteBool(z.Immediate) + if err != nil { + err = msgp.WrapError(err, "Immediate") + return + } + // write "u" + err = en.Append(0xa1, 0x75) + if err != nil { + return + } + err = en.WriteBool(z.UndoWrite) + if err != nil { + err = msgp.WrapError(err, "UndoWrite") + return + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "o" + err = en.Append(0xa1, 0x6f) + if err != nil { + return + } + err = en.WriteString(z.OldDataDir) + if err != nil { + err = msgp.WrapError(err, "OldDataDir") + return + } + } } - err = en.WriteUint64(z.Used) - if err != nil { - err = msgp.WrapError(err, "Used") + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *DeleteOptions) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // check for omitted fields + zb0001Len := uint32(5) + var zb0001Mask uint8 /* 5 bits */ + _ = zb0001Mask + if z.OldDataDir == "" { + zb0001Len-- + zb0001Mask |= 0x10 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "BaseOptions" + o = append(o, 0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) + // map header, size 0 + _ = z.BaseOptions + o = append(o, 0x80) + // string "r" + o = append(o, 0xa1, 0x72) + o = msgp.AppendBool(o, z.Recursive) + // string "i" + o = append(o, 0xa1, 0x69) + o = msgp.AppendBool(o, z.Immediate) + // string "u" + o = append(o, 0xa1, 0x75) + o = msgp.AppendBool(o, z.UndoWrite) + if (zb0001Mask & 0x10) == 0 { // if not omitted + // string "o" + o = append(o, 0xa1, 0x6f) + o = msgp.AppendString(o, z.OldDataDir) + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *DeleteOptions) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "BaseOptions": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + switch msgp.UnsafeString(field) { + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "BaseOptions") + return + } + } + } + case "r": + z.Recursive, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Recursive") + return + } + case "i": + z.Immediate, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Immediate") + return + } + case "u": + z.UndoWrite, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UndoWrite") + return + } + case "o": + z.OldDataDir, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OldDataDir") + return + } + zb0001Mask |= 0x1 + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.OldDataDir = "" + } + + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DeleteOptions) Msgsize() (s int) { + s = 1 + 12 + 1 + 2 + msgp.BoolSize + 2 + msgp.BoolSize + 2 + msgp.BoolSize + 2 + msgp.StringPrefixSize + len(z.OldDataDir) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *DeleteVersionHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.DiskID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + case "v": + z.Volume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + case "fp": + z.FilePath, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + case "fdm": + z.ForceDelMarker, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "ForceDelMarker") + return + } + case "do": + err = z.Opts.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Opts") + return + } + case "fi": + err = z.FI.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "FI") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *DeleteVersionHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 6 + // write "id" + err = en.Append(0x86, 0xa2, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.DiskID) + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + // write "v" + err = en.Append(0xa1, 0x76) + if err != nil { + return + } + err = en.WriteString(z.Volume) + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + // write "fp" + err = en.Append(0xa2, 0x66, 0x70) + if err != nil { + return + } + err = en.WriteString(z.FilePath) + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + // write "fdm" + err = en.Append(0xa3, 0x66, 0x64, 0x6d) + if err != nil { + return + } + err = en.WriteBool(z.ForceDelMarker) + if err != nil { + err = msgp.WrapError(err, "ForceDelMarker") + return + } + // write "do" + err = en.Append(0xa2, 0x64, 0x6f) + if err != nil { + return + } + err = z.Opts.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Opts") + return + } + // write "fi" + err = en.Append(0xa2, 0x66, 0x69) + if err != nil { + return + } + err = z.FI.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "FI") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *DeleteVersionHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "id" + o = append(o, 0x86, 0xa2, 0x69, 0x64) + o = msgp.AppendString(o, z.DiskID) + // string "v" + o = append(o, 0xa1, 0x76) + o = msgp.AppendString(o, z.Volume) + // string "fp" + o = append(o, 0xa2, 0x66, 0x70) + o = msgp.AppendString(o, z.FilePath) + // string "fdm" + o = append(o, 0xa3, 0x66, 0x64, 0x6d) + o = msgp.AppendBool(o, z.ForceDelMarker) + // string "do" + o = append(o, 0xa2, 0x64, 0x6f) + o, err = z.Opts.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Opts") + return + } + // string "fi" + o = append(o, 0xa2, 0x66, 0x69) + o, err = z.FI.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "FI") + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *DeleteVersionHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.DiskID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + case "v": + z.Volume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + case "fp": + z.FilePath, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + case "fdm": + z.ForceDelMarker, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ForceDelMarker") + return + } + case "do": + bts, err = z.Opts.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Opts") + return + } + case "fi": + bts, err = z.FI.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "FI") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DeleteVersionHandlerParams) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 4 + msgp.BoolSize + 3 + z.Opts.Msgsize() + 3 + z.FI.Msgsize() + return +} + +// DecodeMsg implements msgp.Decodable +func (z *DeleteVersionsErrsResp) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "e": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Errs") + return + } + if cap(z.Errs) >= int(zb0002) { + z.Errs = (z.Errs)[:zb0002] + } else { + z.Errs = make([]string, zb0002) + } + for za0001 := range z.Errs { + z.Errs[za0001], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Errs", za0001) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *DeleteVersionsErrsResp) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "e" + err = en.Append(0x81, 0xa1, 0x65) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Errs))) + if err != nil { + err = msgp.WrapError(err, "Errs") + return + } + for za0001 := range z.Errs { + err = en.WriteString(z.Errs[za0001]) + if err != nil { + err = msgp.WrapError(err, "Errs", za0001) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *DeleteVersionsErrsResp) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "e" + o = append(o, 0x81, 0xa1, 0x65) + o = msgp.AppendArrayHeader(o, uint32(len(z.Errs))) + for za0001 := range z.Errs { + o = msgp.AppendString(o, z.Errs[za0001]) + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *DeleteVersionsErrsResp) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "e": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Errs") + return + } + if cap(z.Errs) >= int(zb0002) { + z.Errs = (z.Errs)[:zb0002] + } else { + z.Errs = make([]string, zb0002) + } + for za0001 := range z.Errs { + z.Errs[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Errs", za0001) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *DeleteVersionsErrsResp) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + for za0001 := range z.Errs { + s += msgp.StringPrefixSize + len(z.Errs[za0001]) + } + return +} + +// DecodeMsg implements msgp.Decodable +func (z *DiskInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 18 { + err = msgp.ArrayError{Wanted: 18, Got: zb0001} + return + } + z.Total, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Total") + return + } + z.Free, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Free") + return + } + z.Used, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "Used") + return + } + z.UsedInodes, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "UsedInodes") + return + } + z.FreeInodes, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "FreeInodes") + return + } + z.Major, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Major") + return + } + z.Minor, err = dc.ReadUint32() + if err != nil { + err = msgp.WrapError(err, "Minor") + return + } + z.NRRequests, err = dc.ReadUint64() + if err != nil { + err = msgp.WrapError(err, "NRRequests") + return + } + z.FSType, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FSType") + return + } + z.RootDisk, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "RootDisk") + return + } + z.Healing, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Healing") + return + } + z.Scanning, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Scanning") + return + } + z.Endpoint, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Endpoint") + return + } + z.MountPath, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "MountPath") + return + } + z.ID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "ID") + return + } + z.Rotational, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Rotational") + return + } + err = z.Metrics.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Metrics") + return + } + z.Error, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *DiskInfo) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 18 + err = en.Append(0xdc, 0x0, 0x12) + if err != nil { + return + } + err = en.WriteUint64(z.Total) + if err != nil { + err = msgp.WrapError(err, "Total") + return + } + err = en.WriteUint64(z.Free) + if err != nil { + err = msgp.WrapError(err, "Free") + return + } + err = en.WriteUint64(z.Used) + if err != nil { + err = msgp.WrapError(err, "Used") return } err = en.WriteUint64(z.UsedInodes) @@ -1430,19 +1931,17 @@ func (z *DiskMetrics) DecodeMsg(dc *msgp.Reader) (err error) { if z.LastMinute == nil { z.LastMinute = make(map[string]AccElem, zb0002) } else if len(z.LastMinute) > 0 { - for key := range z.LastMinute { - delete(z.LastMinute, key) - } + clear(z.LastMinute) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 AccElem za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "LastMinute") return } + var za0002 AccElem err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "LastMinute", za0001) @@ -1460,19 +1959,17 @@ func (z *DiskMetrics) DecodeMsg(dc *msgp.Reader) (err error) { if z.APICalls == nil { z.APICalls = make(map[string]uint64, zb0003) } else if len(z.APICalls) > 0 { - for key := range z.APICalls { - delete(z.APICalls, key) - } + clear(z.APICalls) } for zb0003 > 0 { zb0003-- var za0003 string - var za0004 uint64 za0003, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "APICalls") return } + var za0004 uint64 za0004, err = dc.ReadUint64() if err != nil { err = msgp.WrapError(err, "APICalls", za0003) @@ -1689,14 +2186,12 @@ func (z *DiskMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.LastMinute == nil { z.LastMinute = make(map[string]AccElem, zb0002) } else if len(z.LastMinute) > 0 { - for key := range z.LastMinute { - delete(z.LastMinute, key) - } + clear(z.LastMinute) } for zb0002 > 0 { - var za0001 string var za0002 AccElem zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "LastMinute") @@ -1719,14 +2214,12 @@ func (z *DiskMetrics) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.APICalls == nil { z.APICalls = make(map[string]uint64, zb0003) } else if len(z.APICalls) > 0 { - for key := range z.APICalls { - delete(z.APICalls, key) - } + clear(z.APICalls) } for zb0003 > 0 { - var za0003 string var za0004 uint64 zb0003-- + var za0003 string za0003, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "APICalls") @@ -1902,19 +2395,19 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) { if z.Metadata == nil { z.Metadata = make(map[string]string, zb0002) } else if len(z.Metadata) > 0 { - for key := range z.Metadata { - delete(z.Metadata, key) - } + clear(z.Metadata) } + var field []byte + _ = field for zb0002 > 0 { zb0002-- var za0001 string - var za0002 string za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Metadata") return } + var za0002 string za0002, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Metadata", za0001) @@ -1968,6 +2461,9 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Data") return } + if z.Data == nil { + z.Data = make([]byte, 0) + } } z.NumVersions, err = dc.ReadInt() if err != nil { @@ -2002,6 +2498,9 @@ func (z *FileInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Checksum") return } + if z.Checksum == nil { + z.Checksum = make([]byte, 0) + } } z.Versioned, err = dc.ReadBool() if err != nil { @@ -2358,14 +2857,14 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Metadata == nil { z.Metadata = make(map[string]string, zb0002) } else if len(z.Metadata) > 0 { - for key := range z.Metadata { - delete(z.Metadata, key) - } + clear(z.Metadata) } + var field []byte + _ = field for zb0002 > 0 { - var za0001 string var za0002 string zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Metadata") @@ -2420,172 +2919,528 @@ func (z *FileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Data") return } + if z.Data == nil { + z.Data = make([]byte, 0) + } } z.NumVersions, bts, err = msgp.ReadIntBytes(bts) if err != nil { err = msgp.WrapError(err, "NumVersions") return } - z.SuccessorModTime, bts, err = msgp.ReadTimeBytes(bts) + z.SuccessorModTime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SuccessorModTime") + return + } + z.Fresh, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Fresh") + return + } + z.Idx, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Idx") + return + } + if msgp.IsNil(bts) { + bts = bts[1:] + z.Checksum = nil + } else { + z.Checksum, bts, err = msgp.ReadBytesBytes(bts, z.Checksum) + if err != nil { + err = msgp.WrapError(err, "Checksum") + return + } + if z.Checksum == nil { + z.Checksum = make([]byte, 0) + } + } + z.Versioned, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Versioned") + return + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *FileInfo) Msgsize() (s int) { + s = 3 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.StringPrefixSize + len(z.VersionID) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.TransitionStatus) + msgp.StringPrefixSize + len(z.TransitionedObjName) + msgp.StringPrefixSize + len(z.TransitionTier) + msgp.StringPrefixSize + len(z.TransitionVersionID) + msgp.BoolSize + msgp.StringPrefixSize + len(z.DataDir) + msgp.BoolSize + msgp.TimeSize + msgp.Int64Size + msgp.Uint32Size + msgp.Uint64Size + msgp.MapHeaderSize + if z.Metadata != nil { + for za0001, za0002 := range z.Metadata { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) + } + } + s += msgp.ArrayHeaderSize + for za0003 := range z.Parts { + s += z.Parts[za0003].Msgsize() + } + s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.BytesPrefixSize + len(z.Checksum) + msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *FileInfoVersions) DecodeMsg(dc *msgp.Reader) (err error) { + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 5 { + err = msgp.ArrayError{Wanted: 5, Got: zb0001} + return + } + z.Volume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + z.Name, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + z.LatestModTime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "LatestModTime") + return + } + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] + } else { + z.Versions = make([]FileInfo, zb0002) + } + for za0001 := range z.Versions { + err = z.Versions[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + var zb0003 uint32 + zb0003, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "FreeVersions") + return + } + if cap(z.FreeVersions) >= int(zb0003) { + z.FreeVersions = (z.FreeVersions)[:zb0003] + } else { + z.FreeVersions = make([]FileInfo, zb0003) + } + for za0002 := range z.FreeVersions { + err = z.FreeVersions[za0002].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "FreeVersions", za0002) + return + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) { + // array header, size 5 + err = en.Append(0x95) + if err != nil { + return + } + err = en.WriteString(z.Volume) + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + err = en.WriteString(z.Name) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + err = en.WriteTime(z.LatestModTime) + if err != nil { + err = msgp.WrapError(err, "LatestModTime") + return + } + err = en.WriteArrayHeader(uint32(len(z.Versions))) + if err != nil { + err = msgp.WrapError(err, "Versions") + return + } + for za0001 := range z.Versions { + err = z.Versions[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + err = en.WriteArrayHeader(uint32(len(z.FreeVersions))) + if err != nil { + err = msgp.WrapError(err, "FreeVersions") + return + } + for za0002 := range z.FreeVersions { + err = z.FreeVersions[za0002].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "FreeVersions", za0002) + return + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *FileInfoVersions) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // array header, size 5 + o = append(o, 0x95) + o = msgp.AppendString(o, z.Volume) + o = msgp.AppendString(o, z.Name) + o = msgp.AppendTime(o, z.LatestModTime) + o = msgp.AppendArrayHeader(o, uint32(len(z.Versions))) + for za0001 := range z.Versions { + o, err = z.Versions[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Versions", za0001) + return + } + } + o = msgp.AppendArrayHeader(o, uint32(len(z.FreeVersions))) + for za0002 := range z.FreeVersions { + o, err = z.FreeVersions[za0002].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "FreeVersions", za0002) + return + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *FileInfoVersions) UnmarshalMsg(bts []byte) (o []byte, err error) { + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + if zb0001 != 5 { + err = msgp.ArrayError{Wanted: 5, Got: zb0001} + return + } + z.Volume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + z.Name, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "SuccessorModTime") + err = msgp.WrapError(err, "Name") return } - z.Fresh, bts, err = msgp.ReadBoolBytes(bts) + z.LatestModTime, bts, err = msgp.ReadTimeBytes(bts) if err != nil { - err = msgp.WrapError(err, "Fresh") + err = msgp.WrapError(err, "LatestModTime") return } - z.Idx, bts, err = msgp.ReadIntBytes(bts) + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "Idx") + err = msgp.WrapError(err, "Versions") return } - if msgp.IsNil(bts) { - bts = bts[1:] - z.Checksum = nil + if cap(z.Versions) >= int(zb0002) { + z.Versions = (z.Versions)[:zb0002] } else { - z.Checksum, bts, err = msgp.ReadBytesBytes(bts, z.Checksum) + z.Versions = make([]FileInfo, zb0002) + } + for za0001 := range z.Versions { + bts, err = z.Versions[za0001].UnmarshalMsg(bts) if err != nil { - err = msgp.WrapError(err, "Checksum") + err = msgp.WrapError(err, "Versions", za0001) return } } - z.Versioned, bts, err = msgp.ReadBoolBytes(bts) + var zb0003 uint32 + zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "Versioned") + err = msgp.WrapError(err, "FreeVersions") return } + if cap(z.FreeVersions) >= int(zb0003) { + z.FreeVersions = (z.FreeVersions)[:zb0003] + } else { + z.FreeVersions = make([]FileInfo, zb0003) + } + for za0002 := range z.FreeVersions { + bts, err = z.FreeVersions[za0002].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "FreeVersions", za0002) + return + } + } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *FileInfo) Msgsize() (s int) { - s = 3 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.StringPrefixSize + len(z.VersionID) + msgp.BoolSize + msgp.BoolSize + msgp.StringPrefixSize + len(z.TransitionStatus) + msgp.StringPrefixSize + len(z.TransitionedObjName) + msgp.StringPrefixSize + len(z.TransitionTier) + msgp.StringPrefixSize + len(z.TransitionVersionID) + msgp.BoolSize + msgp.StringPrefixSize + len(z.DataDir) + msgp.BoolSize + msgp.TimeSize + msgp.Int64Size + msgp.Uint32Size + msgp.Uint64Size + msgp.MapHeaderSize - if z.Metadata != nil { - for za0001, za0002 := range z.Metadata { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) - } +func (z *FileInfoVersions) Msgsize() (s int) { + s = 1 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.TimeSize + msgp.ArrayHeaderSize + for za0001 := range z.Versions { + s += z.Versions[za0001].Msgsize() } s += msgp.ArrayHeaderSize - for za0003 := range z.Parts { - s += z.Parts[za0003].Msgsize() + for za0002 := range z.FreeVersions { + s += z.FreeVersions[za0002].Msgsize() } - s += z.Erasure.Msgsize() + msgp.BoolSize + z.ReplicationState.Msgsize() + msgp.BytesPrefixSize + len(z.Data) + msgp.IntSize + msgp.TimeSize + msgp.BoolSize + msgp.IntSize + msgp.BytesPrefixSize + len(z.Checksum) + msgp.BoolSize return } // DecodeMsg implements msgp.Decodable -func (z *FileInfoVersions) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *FilesInfo) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field var zb0001 uint32 - zb0001, err = dc.ReadArrayHeader() + zb0001, err = dc.ReadMapHeader() if err != nil { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} - return + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Files": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Files") + return + } + if cap(z.Files) >= int(zb0002) { + z.Files = (z.Files)[:zb0002] + } else { + z.Files = make([]FileInfo, zb0002) + } + for za0001 := range z.Files { + err = z.Files[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Files", za0001) + return + } + } + case "IsTruncated": + z.IsTruncated, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "IsTruncated") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } } - z.Volume, err = dc.ReadString() + return +} + +// EncodeMsg implements msgp.Encodable +func (z *FilesInfo) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "Files" + err = en.Append(0x82, 0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) if err != nil { - err = msgp.WrapError(err, "Volume") return } - z.Name, err = dc.ReadString() + err = en.WriteArrayHeader(uint32(len(z.Files))) if err != nil { - err = msgp.WrapError(err, "Name") + err = msgp.WrapError(err, "Files") return } - z.LatestModTime, err = dc.ReadTime() + for za0001 := range z.Files { + err = z.Files[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Files", za0001) + return + } + } + // write "IsTruncated" + err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64) if err != nil { - err = msgp.WrapError(err, "LatestModTime") return } - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() + err = en.WriteBool(z.IsTruncated) if err != nil { - err = msgp.WrapError(err, "Versions") + err = msgp.WrapError(err, "IsTruncated") return } - if cap(z.Versions) >= int(zb0002) { - z.Versions = (z.Versions)[:zb0002] - } else { - z.Versions = make([]FileInfo, zb0002) - } - for za0001 := range z.Versions { - err = z.Versions[za0001].DecodeMsg(dc) + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *FilesInfo) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 2 + // string "Files" + o = append(o, 0x82, 0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Files))) + for za0001 := range z.Files { + o, err = z.Files[za0001].MarshalMsg(o) if err != nil { - err = msgp.WrapError(err, "Versions", za0001) + err = msgp.WrapError(err, "Files", za0001) return } } - var zb0003 uint32 - zb0003, err = dc.ReadArrayHeader() + // string "IsTruncated" + o = append(o, 0xab, 0x49, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64) + o = msgp.AppendBool(o, z.IsTruncated) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *FilesInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "FreeVersions") + err = msgp.WrapError(err) return } - if cap(z.FreeVersions) >= int(zb0003) { - z.FreeVersions = (z.FreeVersions)[:zb0003] - } else { - z.FreeVersions = make([]FileInfo, zb0003) + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Files": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Files") + return + } + if cap(z.Files) >= int(zb0002) { + z.Files = (z.Files)[:zb0002] + } else { + z.Files = make([]FileInfo, zb0002) + } + for za0001 := range z.Files { + bts, err = z.Files[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Files", za0001) + return + } + } + case "IsTruncated": + z.IsTruncated, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "IsTruncated") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } } - for za0002 := range z.FreeVersions { - err = z.FreeVersions[za0002].DecodeMsg(dc) + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *FilesInfo) Msgsize() (s int) { + s = 1 + 6 + msgp.ArrayHeaderSize + for za0001 := range z.Files { + s += z.Files[za0001].Msgsize() + } + s += 12 + msgp.BoolSize + return +} + +// DecodeMsg implements msgp.Decodable +func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() if err != nil { - err = msgp.WrapError(err, "FreeVersions", za0002) + err = msgp.WrapError(err) return } + switch msgp.UnsafeString(field) { + case "e": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Entries") + return + } + if cap(z.Entries) >= int(zb0002) { + z.Entries = (z.Entries)[:zb0002] + } else { + z.Entries = make([]string, zb0002) + } + for za0001 := range z.Entries { + z.Entries[za0001], err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Entries", za0001) + return + } + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } } return } // EncodeMsg implements msgp.Encodable -func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 5 - err = en.Append(0x95) - if err != nil { - return - } - err = en.WriteString(z.Volume) - if err != nil { - err = msgp.WrapError(err, "Volume") - return - } - err = en.WriteString(z.Name) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - err = en.WriteTime(z.LatestModTime) - if err != nil { - err = msgp.WrapError(err, "LatestModTime") - return - } - err = en.WriteArrayHeader(uint32(len(z.Versions))) +func (z *ListDirResult) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "e" + err = en.Append(0x81, 0xa1, 0x65) if err != nil { - err = msgp.WrapError(err, "Versions") return } - for za0001 := range z.Versions { - err = z.Versions[za0001].EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "Versions", za0001) - return - } - } - err = en.WriteArrayHeader(uint32(len(z.FreeVersions))) + err = en.WriteArrayHeader(uint32(len(z.Entries))) if err != nil { - err = msgp.WrapError(err, "FreeVersions") + err = msgp.WrapError(err, "Entries") return } - for za0002 := range z.FreeVersions { - err = z.FreeVersions[za0002].EncodeMsg(en) + for za0001 := range z.Entries { + err = en.WriteString(z.Entries[za0001]) if err != nil { - err = msgp.WrapError(err, "FreeVersions", za0002) + err = msgp.WrapError(err, "Entries", za0001) return } } @@ -2593,93 +3448,61 @@ func (z *FileInfoVersions) EncodeMsg(en *msgp.Writer) (err error) { } // MarshalMsg implements msgp.Marshaler -func (z *FileInfoVersions) MarshalMsg(b []byte) (o []byte, err error) { +func (z *ListDirResult) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // array header, size 5 - o = append(o, 0x95) - o = msgp.AppendString(o, z.Volume) - o = msgp.AppendString(o, z.Name) - o = msgp.AppendTime(o, z.LatestModTime) - o = msgp.AppendArrayHeader(o, uint32(len(z.Versions))) - for za0001 := range z.Versions { - o, err = z.Versions[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Versions", za0001) - return - } - } - o = msgp.AppendArrayHeader(o, uint32(len(z.FreeVersions))) - for za0002 := range z.FreeVersions { - o, err = z.FreeVersions[za0002].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "FreeVersions", za0002) - return - } + // map header, size 1 + // string "e" + o = append(o, 0x81, 0xa1, 0x65) + o = msgp.AppendArrayHeader(o, uint32(len(z.Entries))) + for za0001 := range z.Entries { + o = msgp.AppendString(o, z.Entries[za0001]) } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *FileInfoVersions) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field var zb0001 uint32 - zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} - return - } - z.Volume, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Volume") - return - } - z.Name, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Name") - return - } - z.LatestModTime, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "LatestModTime") - return - } - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Versions") - return - } - if cap(z.Versions) >= int(zb0002) { - z.Versions = (z.Versions)[:zb0002] - } else { - z.Versions = make([]FileInfo, zb0002) - } - for za0001 := range z.Versions { - bts, err = z.Versions[za0001].UnmarshalMsg(bts) + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) if err != nil { - err = msgp.WrapError(err, "Versions", za0001) + err = msgp.WrapError(err) return } - } - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "FreeVersions") - return - } - if cap(z.FreeVersions) >= int(zb0003) { - z.FreeVersions = (z.FreeVersions)[:zb0003] - } else { - z.FreeVersions = make([]FileInfo, zb0003) - } - for za0002 := range z.FreeVersions { - bts, err = z.FreeVersions[za0002].UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "FreeVersions", za0002) - return + switch msgp.UnsafeString(field) { + case "e": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Entries") + return + } + if cap(z.Entries) >= int(zb0002) { + z.Entries = (z.Entries)[:zb0002] + } else { + z.Entries = make([]string, zb0002) + } + for za0001 := range z.Entries { + z.Entries[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Entries", za0001) + return + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } } } o = bts @@ -2687,20 +3510,16 @@ func (z *FileInfoVersions) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *FileInfoVersions) Msgsize() (s int) { - s = 1 + msgp.StringPrefixSize + len(z.Volume) + msgp.StringPrefixSize + len(z.Name) + msgp.TimeSize + msgp.ArrayHeaderSize - for za0001 := range z.Versions { - s += z.Versions[za0001].Msgsize() - } - s += msgp.ArrayHeaderSize - for za0002 := range z.FreeVersions { - s += z.FreeVersions[za0002].Msgsize() +func (z *ListDirResult) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + for za0001 := range z.Entries { + s += msgp.StringPrefixSize + len(z.Entries[za0001]) } return } // DecodeMsg implements msgp.Decodable -func (z *FilesInfo) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -2717,31 +3536,25 @@ func (z *FilesInfo) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "Files": + case "i": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { - err = msgp.WrapError(err, "Files") + err = msgp.WrapError(err, "IDs") return } - if cap(z.Files) >= int(zb0002) { - z.Files = (z.Files)[:zb0002] + if cap(z.IDs) >= int(zb0002) { + z.IDs = (z.IDs)[:zb0002] } else { - z.Files = make([]FileInfo, zb0002) + z.IDs = make([]string, zb0002) } - for za0001 := range z.Files { - err = z.Files[za0001].DecodeMsg(dc) + for za0001 := range z.IDs { + z.IDs[za0001], err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "Files", za0001) + err = msgp.WrapError(err, "IDs", za0001) return } } - case "IsTruncated": - z.IsTruncated, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "IsTruncated") - return - } default: err = dc.Skip() if err != nil { @@ -2754,60 +3567,43 @@ func (z *FilesInfo) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *FilesInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 - // write "Files" - err = en.Append(0x82, 0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) +func (z *LocalDiskIDs) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "i" + err = en.Append(0x81, 0xa1, 0x69) if err != nil { return } - err = en.WriteArrayHeader(uint32(len(z.Files))) + err = en.WriteArrayHeader(uint32(len(z.IDs))) if err != nil { - err = msgp.WrapError(err, "Files") + err = msgp.WrapError(err, "IDs") return } - for za0001 := range z.Files { - err = z.Files[za0001].EncodeMsg(en) + for za0001 := range z.IDs { + err = en.WriteString(z.IDs[za0001]) if err != nil { - err = msgp.WrapError(err, "Files", za0001) + err = msgp.WrapError(err, "IDs", za0001) return } } - // write "IsTruncated" - err = en.Append(0xab, 0x49, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64) - if err != nil { - return - } - err = en.WriteBool(z.IsTruncated) - if err != nil { - err = msgp.WrapError(err, "IsTruncated") - return - } return } // MarshalMsg implements msgp.Marshaler -func (z *FilesInfo) MarshalMsg(b []byte) (o []byte, err error) { +func (z *LocalDiskIDs) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Files" - o = append(o, 0x82, 0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Files))) - for za0001 := range z.Files { - o, err = z.Files[za0001].MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "Files", za0001) - return - } + // map header, size 1 + // string "i" + o = append(o, 0x81, 0xa1, 0x69) + o = msgp.AppendArrayHeader(o, uint32(len(z.IDs))) + for za0001 := range z.IDs { + o = msgp.AppendString(o, z.IDs[za0001]) } - // string "IsTruncated" - o = append(o, 0xab, 0x49, 0x73, 0x54, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x65, 0x64) - o = msgp.AppendBool(o, z.IsTruncated) return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *FilesInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *LocalDiskIDs) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -2824,31 +3620,25 @@ func (z *FilesInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "Files": + case "i": var zb0002 uint32 zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "Files") + err = msgp.WrapError(err, "IDs") return } - if cap(z.Files) >= int(zb0002) { - z.Files = (z.Files)[:zb0002] + if cap(z.IDs) >= int(zb0002) { + z.IDs = (z.IDs)[:zb0002] } else { - z.Files = make([]FileInfo, zb0002) + z.IDs = make([]string, zb0002) } - for za0001 := range z.Files { - bts, err = z.Files[za0001].UnmarshalMsg(bts) + for za0001 := range z.IDs { + z.IDs[za0001], bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Files", za0001) + err = msgp.WrapError(err, "IDs", za0001) return } } - case "IsTruncated": - z.IsTruncated, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IsTruncated") - return - } default: bts, err = msgp.Skip(bts) if err != nil { @@ -2862,17 +3652,16 @@ func (z *FilesInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *FilesInfo) Msgsize() (s int) { - s = 1 + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Files { - s += z.Files[za0001].Msgsize() +func (z *LocalDiskIDs) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + for za0001 := range z.IDs { + s += msgp.StringPrefixSize + len(z.IDs[za0001]) } - s += 12 + msgp.BoolSize return } // DecodeMsg implements msgp.Decodable -func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -2889,24 +3678,64 @@ func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "e": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() + case "id": + z.DiskID, err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "Entries") + err = msgp.WrapError(err, "DiskID") return } - if cap(z.Entries) >= int(zb0002) { - z.Entries = (z.Entries)[:zb0002] - } else { - z.Entries = make([]string, zb0002) + case "v": + z.Volume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Volume") + return } - for za0001 := range z.Entries { - z.Entries[za0001], err = dc.ReadString() + case "ov": + z.OrigVolume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "OrigVolume") + return + } + case "fp": + z.FilePath, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + case "uo": + var zb0002 uint32 + zb0002, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err, "UpdateOpts") + return + } + for zb0002 > 0 { + zb0002-- + field, err = dc.ReadMapKeyPtr() if err != nil { - err = msgp.WrapError(err, "Entries", za0001) + err = msgp.WrapError(err, "UpdateOpts") return } + switch msgp.UnsafeString(field) { + case "np": + z.UpdateOpts.NoPersistence, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err, "UpdateOpts") + return + } + } + } + case "fi": + err = z.FI.DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "FI") + return } default: err = dc.Skip() @@ -2920,43 +3749,111 @@ func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *ListDirResult) EncodeMsg(en *msgp.Writer) (err error) { +func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 6 + // write "id" + err = en.Append(0x86, 0xa2, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.DiskID) + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + // write "v" + err = en.Append(0xa1, 0x76) + if err != nil { + return + } + err = en.WriteString(z.Volume) + if err != nil { + err = msgp.WrapError(err, "Volume") + return + } + // write "ov" + err = en.Append(0xa2, 0x6f, 0x76) + if err != nil { + return + } + err = en.WriteString(z.OrigVolume) + if err != nil { + err = msgp.WrapError(err, "OrigVolume") + return + } + // write "fp" + err = en.Append(0xa2, 0x66, 0x70) + if err != nil { + return + } + err = en.WriteString(z.FilePath) + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + // write "uo" + err = en.Append(0xa2, 0x75, 0x6f) + if err != nil { + return + } // map header, size 1 - // write "e" - err = en.Append(0x81, 0xa1, 0x65) + // write "np" + err = en.Append(0x81, 0xa2, 0x6e, 0x70) if err != nil { return } - err = en.WriteArrayHeader(uint32(len(z.Entries))) + err = en.WriteBool(z.UpdateOpts.NoPersistence) if err != nil { - err = msgp.WrapError(err, "Entries") + err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") return } - for za0001 := range z.Entries { - err = en.WriteString(z.Entries[za0001]) - if err != nil { - err = msgp.WrapError(err, "Entries", za0001) - return - } + // write "fi" + err = en.Append(0xa2, 0x66, 0x69) + if err != nil { + return + } + err = z.FI.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "FI") + return } return } // MarshalMsg implements msgp.Marshaler -func (z *ListDirResult) MarshalMsg(b []byte) (o []byte, err error) { +func (z *MetadataHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "id" + o = append(o, 0x86, 0xa2, 0x69, 0x64) + o = msgp.AppendString(o, z.DiskID) + // string "v" + o = append(o, 0xa1, 0x76) + o = msgp.AppendString(o, z.Volume) + // string "ov" + o = append(o, 0xa2, 0x6f, 0x76) + o = msgp.AppendString(o, z.OrigVolume) + // string "fp" + o = append(o, 0xa2, 0x66, 0x70) + o = msgp.AppendString(o, z.FilePath) + // string "uo" + o = append(o, 0xa2, 0x75, 0x6f) // map header, size 1 - // string "e" - o = append(o, 0x81, 0xa1, 0x65) - o = msgp.AppendArrayHeader(o, uint32(len(z.Entries))) - for za0001 := range z.Entries { - o = msgp.AppendString(o, z.Entries[za0001]) + // string "np" + o = append(o, 0x81, 0xa2, 0x6e, 0x70) + o = msgp.AppendBool(o, z.UpdateOpts.NoPersistence) + // string "fi" + o = append(o, 0xa2, 0x66, 0x69) + o, err = z.FI.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "FI") + return } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -2973,24 +3870,64 @@ func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "e": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + case "id": + z.DiskID, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Entries") + err = msgp.WrapError(err, "DiskID") return } - if cap(z.Entries) >= int(zb0002) { - z.Entries = (z.Entries)[:zb0002] - } else { - z.Entries = make([]string, zb0002) + case "v": + z.Volume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Volume") + return } - for za0001 := range z.Entries { - z.Entries[za0001], bts, err = msgp.ReadStringBytes(bts) + case "ov": + z.OrigVolume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "OrigVolume") + return + } + case "fp": + z.FilePath, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "FilePath") + return + } + case "uo": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateOpts") + return + } + for zb0002 > 0 { + zb0002-- + field, bts, err = msgp.ReadMapKeyZC(bts) if err != nil { - err = msgp.WrapError(err, "Entries", za0001) + err = msgp.WrapError(err, "UpdateOpts") return } + switch msgp.UnsafeString(field) { + case "np": + z.UpdateOpts.NoPersistence, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "UpdateOpts") + return + } + } + } + case "fi": + bts, err = z.FI.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "FI") + return } default: bts, err = msgp.Skip(bts) @@ -3005,16 +3942,13 @@ func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ListDirResult) Msgsize() (s int) { - s = 1 + 2 + msgp.ArrayHeaderSize - for za0001 := range z.Entries { - s += msgp.StringPrefixSize + len(z.Entries[za0001]) - } +func (z *MetadataHandlerParams) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.OrigVolume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + 1 + 3 + msgp.BoolSize + 3 + z.FI.Msgsize() return } // DecodeMsg implements msgp.Decodable -func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *RawFileInfo) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -3031,24 +3965,23 @@ func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "IDs": - var zb0002 uint32 - zb0002, err = dc.ReadArrayHeader() - if err != nil { - err = msgp.WrapError(err, "IDs") - return - } - if cap(z.IDs) >= int(zb0002) { - z.IDs = (z.IDs)[:zb0002] + case "b": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Buf") + return + } + z.Buf = nil } else { - z.IDs = make([]string, zb0002) - } - for za0001 := range z.IDs { - z.IDs[za0001], err = dc.ReadString() + z.Buf, err = dc.ReadBytes(z.Buf) if err != nil { - err = msgp.WrapError(err, "IDs", za0001) + err = msgp.WrapError(err, "Buf") return } + if z.Buf == nil { + z.Buf = make([]byte, 0) + } } default: err = dc.Skip() @@ -3062,22 +3995,22 @@ func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *LocalDiskIDs) EncodeMsg(en *msgp.Writer) (err error) { +func (z *RawFileInfo) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 1 - // write "IDs" - err = en.Append(0x81, 0xa3, 0x49, 0x44, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.IDs))) + // write "b" + err = en.Append(0x81, 0xa1, 0x62) if err != nil { - err = msgp.WrapError(err, "IDs") return } - for za0001 := range z.IDs { - err = en.WriteString(z.IDs[za0001]) + if z.Buf == nil { // allownil: if nil + err = en.WriteNil() if err != nil { - err = msgp.WrapError(err, "IDs", za0001) + return + } + } else { + err = en.WriteBytes(z.Buf) + if err != nil { + err = msgp.WrapError(err, "Buf") return } } @@ -3085,20 +4018,21 @@ func (z *LocalDiskIDs) EncodeMsg(en *msgp.Writer) (err error) { } // MarshalMsg implements msgp.Marshaler -func (z *LocalDiskIDs) MarshalMsg(b []byte) (o []byte, err error) { +func (z *RawFileInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 1 - // string "IDs" - o = append(o, 0x81, 0xa3, 0x49, 0x44, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.IDs))) - for za0001 := range z.IDs { - o = msgp.AppendString(o, z.IDs[za0001]) + // string "b" + o = append(o, 0x81, 0xa1, 0x62) + if z.Buf == nil { // allownil: if nil + o = msgp.AppendNil(o) + } else { + o = msgp.AppendBytes(o, z.Buf) } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *LocalDiskIDs) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *RawFileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -3115,24 +4049,19 @@ func (z *LocalDiskIDs) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "IDs": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IDs") - return - } - if cap(z.IDs) >= int(zb0002) { - z.IDs = (z.IDs)[:zb0002] + case "b": + if msgp.IsNil(bts) { + bts = bts[1:] + z.Buf = nil } else { - z.IDs = make([]string, zb0002) - } - for za0001 := range z.IDs { - z.IDs[za0001], bts, err = msgp.ReadStringBytes(bts) + z.Buf, bts, err = msgp.ReadBytesBytes(bts, z.Buf) if err != nil { - err = msgp.WrapError(err, "IDs", za0001) + err = msgp.WrapError(err, "Buf") return } + if z.Buf == nil { + z.Buf = make([]byte, 0) + } } default: bts, err = msgp.Skip(bts) @@ -3147,16 +4076,13 @@ func (z *LocalDiskIDs) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *LocalDiskIDs) Msgsize() (s int) { - s = 1 + 4 + msgp.ArrayHeaderSize - for za0001 := range z.IDs { - s += msgp.StringPrefixSize + len(z.IDs[za0001]) - } +func (z *RawFileInfo) Msgsize() (s int) { + s = 1 + 2 + msgp.BytesPrefixSize + len(z.Buf) return } // DecodeMsg implements msgp.Decodable -func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *ReadAllHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -3185,53 +4111,12 @@ func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Volume") return } - case "ov": - z.OrigVolume, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "OrigVolume") - return - } case "fp": z.FilePath, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "FilePath") return } - case "uo": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - for zb0002 > 0 { - zb0002-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - switch msgp.UnsafeString(field) { - case "np": - z.UpdateOpts.NoPersistence, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - } - } - case "fi": - err = z.FI.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } default: err = dc.Skip() if err != nil { @@ -3244,10 +4129,10 @@ func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 6 +func (z ReadAllHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 3 // write "id" - err = en.Append(0x86, 0xa2, 0x69, 0x64) + err = en.Append(0x83, 0xa2, 0x69, 0x64) if err != nil { return } @@ -3266,16 +4151,6 @@ func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Volume") return } - // write "ov" - err = en.Append(0xa2, 0x6f, 0x76) - if err != nil { - return - } - err = en.WriteString(z.OrigVolume) - if err != nil { - err = msgp.WrapError(err, "OrigVolume") - return - } // write "fp" err = en.Append(0xa2, 0x66, 0x70) if err != nil { @@ -3286,69 +4161,27 @@ func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "FilePath") return } - // write "uo" - err = en.Append(0xa2, 0x75, 0x6f) - if err != nil { - return - } - // map header, size 1 - // write "np" - err = en.Append(0x81, 0xa2, 0x6e, 0x70) - if err != nil { - return - } - err = en.WriteBool(z.UpdateOpts.NoPersistence) - if err != nil { - err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") - return - } - // write "fi" - err = en.Append(0xa2, 0x66, 0x69) - if err != nil { - return - } - err = z.FI.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } return } // MarshalMsg implements msgp.Marshaler -func (z *MetadataHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { +func (z ReadAllHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 6 + // map header, size 3 // string "id" - o = append(o, 0x86, 0xa2, 0x69, 0x64) + o = append(o, 0x83, 0xa2, 0x69, 0x64) o = msgp.AppendString(o, z.DiskID) // string "v" o = append(o, 0xa1, 0x76) o = msgp.AppendString(o, z.Volume) - // string "ov" - o = append(o, 0xa2, 0x6f, 0x76) - o = msgp.AppendString(o, z.OrigVolume) // string "fp" o = append(o, 0xa2, 0x66, 0x70) o = msgp.AppendString(o, z.FilePath) - // string "uo" - o = append(o, 0xa2, 0x75, 0x6f) - // map header, size 1 - // string "np" - o = append(o, 0x81, 0xa2, 0x6e, 0x70) - o = msgp.AppendBool(o, z.UpdateOpts.NoPersistence) - // string "fi" - o = append(o, 0xa2, 0x66, 0x69) - o, err = z.FI.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "FI") - return - } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ReadAllHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -3377,51 +4210,10 @@ func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Volume") return } - case "ov": - z.OrigVolume, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "OrigVolume") - return - } - case "fp": - z.FilePath, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "FilePath") - return - } - case "uo": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - switch msgp.UnsafeString(field) { - case "np": - z.UpdateOpts.NoPersistence, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "UpdateOpts", "NoPersistence") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "UpdateOpts") - return - } - } - } - case "fi": - bts, err = z.FI.UnmarshalMsg(bts) + case "fp": + z.FilePath, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "FI") + err = msgp.WrapError(err, "FilePath") return } default: @@ -3437,13 +4229,13 @@ func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *MetadataHandlerParams) Msgsize() (s int) { - s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.OrigVolume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + 1 + 3 + msgp.BoolSize + 3 + z.FI.Msgsize() +func (z ReadAllHandlerParams) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) return } // DecodeMsg implements msgp.Decodable -func (z *RawFileInfo) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *ReadMultipleReq) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -3452,6 +4244,8 @@ func (z *RawFileInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -3460,21 +4254,62 @@ func (z *RawFileInfo) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "b": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "Buf") - return - } - z.Buf = nil + case "bk": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "pr": + z.Prefix, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + zb0001Mask |= 0x1 + case "fl": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err, "Files") + return + } + if cap(z.Files) >= int(zb0002) { + z.Files = (z.Files)[:zb0002] } else { - z.Buf, err = dc.ReadBytes(z.Buf) + z.Files = make([]string, zb0002) + } + for za0001 := range z.Files { + z.Files[za0001], err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "Buf") + err = msgp.WrapError(err, "Files", za0001) return } } + case "ms": + z.MaxSize, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "MaxSize") + return + } + case "mo": + z.MetadataOnly, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "MetadataOnly") + return + } + case "ab": + z.AbortOn404, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "AbortOn404") + return + } + case "mr": + z.MaxResults, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "MaxResults") + return + } default: err = dc.Skip() if err != nil { @@ -3483,26 +4318,109 @@ func (z *RawFileInfo) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.Prefix = "" + } + return } // EncodeMsg implements msgp.Encodable -func (z *RawFileInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 1 - // write "b" - err = en.Append(0x81, 0xa1, 0x62) +func (z *ReadMultipleReq) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(7) + var zb0001Mask uint8 /* 7 bits */ + _ = zb0001Mask + if z.Prefix == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) if err != nil { return } - if z.Buf == nil { // allownil: if nil - err = en.WriteNil() + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "bk" + err = en.Append(0xa2, 0x62, 0x6b) if err != nil { return } - } else { - err = en.WriteBytes(z.Buf) + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "pr" + err = en.Append(0xa2, 0x70, 0x72) + if err != nil { + return + } + err = en.WriteString(z.Prefix) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + } + // write "fl" + err = en.Append(0xa2, 0x66, 0x6c) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Files))) + if err != nil { + err = msgp.WrapError(err, "Files") + return + } + for za0001 := range z.Files { + err = en.WriteString(z.Files[za0001]) + if err != nil { + err = msgp.WrapError(err, "Files", za0001) + return + } + } + // write "ms" + err = en.Append(0xa2, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteInt64(z.MaxSize) + if err != nil { + err = msgp.WrapError(err, "MaxSize") + return + } + // write "mo" + err = en.Append(0xa2, 0x6d, 0x6f) + if err != nil { + return + } + err = en.WriteBool(z.MetadataOnly) + if err != nil { + err = msgp.WrapError(err, "MetadataOnly") + return + } + // write "ab" + err = en.Append(0xa2, 0x61, 0x62) + if err != nil { + return + } + err = en.WriteBool(z.AbortOn404) + if err != nil { + err = msgp.WrapError(err, "AbortOn404") + return + } + // write "mr" + err = en.Append(0xa2, 0x6d, 0x72) if err != nil { - err = msgp.WrapError(err, "Buf") + return + } + err = en.WriteInt(z.MaxResults) + if err != nil { + err = msgp.WrapError(err, "MaxResults") return } } @@ -3510,21 +4428,53 @@ func (z *RawFileInfo) EncodeMsg(en *msgp.Writer) (err error) { } // MarshalMsg implements msgp.Marshaler -func (z *RawFileInfo) MarshalMsg(b []byte) (o []byte, err error) { +func (z *ReadMultipleReq) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "b" - o = append(o, 0x81, 0xa1, 0x62) - if z.Buf == nil { // allownil: if nil - o = msgp.AppendNil(o) - } else { - o = msgp.AppendBytes(o, z.Buf) + // check for omitted fields + zb0001Len := uint32(7) + var zb0001Mask uint8 /* 7 bits */ + _ = zb0001Mask + if z.Prefix == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "bk" + o = append(o, 0xa2, 0x62, 0x6b) + o = msgp.AppendString(o, z.Bucket) + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "pr" + o = append(o, 0xa2, 0x70, 0x72) + o = msgp.AppendString(o, z.Prefix) + } + // string "fl" + o = append(o, 0xa2, 0x66, 0x6c) + o = msgp.AppendArrayHeader(o, uint32(len(z.Files))) + for za0001 := range z.Files { + o = msgp.AppendString(o, z.Files[za0001]) + } + // string "ms" + o = append(o, 0xa2, 0x6d, 0x73) + o = msgp.AppendInt64(o, z.MaxSize) + // string "mo" + o = append(o, 0xa2, 0x6d, 0x6f) + o = msgp.AppendBool(o, z.MetadataOnly) + // string "ab" + o = append(o, 0xa2, 0x61, 0x62) + o = msgp.AppendBool(o, z.AbortOn404) + // string "mr" + o = append(o, 0xa2, 0x6d, 0x72) + o = msgp.AppendInt(o, z.MaxResults) } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *RawFileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ReadMultipleReq) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -3533,6 +4483,8 @@ func (z *RawFileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -3541,17 +4493,62 @@ func (z *RawFileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "b": - if msgp.IsNil(bts) { - bts = bts[1:] - z.Buf = nil + case "bk": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "pr": + z.Prefix, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + zb0001Mask |= 0x1 + case "fl": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Files") + return + } + if cap(z.Files) >= int(zb0002) { + z.Files = (z.Files)[:zb0002] } else { - z.Buf, bts, err = msgp.ReadBytesBytes(bts, z.Buf) + z.Files = make([]string, zb0002) + } + for za0001 := range z.Files { + z.Files[za0001], bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Buf") + err = msgp.WrapError(err, "Files", za0001) return } } + case "ms": + z.MaxSize, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "MaxSize") + return + } + case "mo": + z.MetadataOnly, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MetadataOnly") + return + } + case "ab": + z.AbortOn404, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "AbortOn404") + return + } + case "mr": + z.MaxResults, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "MaxResults") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -3560,18 +4557,27 @@ func (z *RawFileInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.Prefix = "" + } + o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *RawFileInfo) Msgsize() (s int) { - s = 1 + 2 + msgp.BytesPrefixSize + len(z.Buf) +func (z *ReadMultipleReq) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.Bucket) + 3 + msgp.StringPrefixSize + len(z.Prefix) + 3 + msgp.ArrayHeaderSize + for za0001 := range z.Files { + s += msgp.StringPrefixSize + len(z.Files[za0001]) + } + s += 3 + msgp.Int64Size + 3 + msgp.BoolSize + 3 + msgp.BoolSize + 3 + msgp.IntSize return } // DecodeMsg implements msgp.Decodable -func (z *ReadAllHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *ReadMultipleResp) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -3580,6 +4586,8 @@ func (z *ReadAllHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 2 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -3588,89 +4596,221 @@ func (z *ReadAllHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "id": - z.DiskID, err = dc.ReadString() + case "bk": + z.Bucket, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + case "pr": + z.Prefix, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Prefix") + return + } + zb0001Mask |= 0x1 + case "fl": + z.File, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "File") + return + } + case "ex": + z.Exists, err = dc.ReadBool() + if err != nil { + err = msgp.WrapError(err, "Exists") + return + } + case "er": + z.Error, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + zb0001Mask |= 0x2 + case "d": + z.Data, err = dc.ReadBytes(z.Data) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + case "m": + z.Modtime, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Modtime") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + // Clear omitted fields. + if zb0001Mask != 0x3 { + if (zb0001Mask & 0x1) == 0 { + z.Prefix = "" + } + if (zb0001Mask & 0x2) == 0 { + z.Error = "" + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *ReadMultipleResp) EncodeMsg(en *msgp.Writer) (err error) { + // check for omitted fields + zb0001Len := uint32(7) + var zb0001Mask uint8 /* 7 bits */ + _ = zb0001Mask + if z.Prefix == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.Error == "" { + zb0001Len-- + zb0001Mask |= 0x10 + } + // variable map header, size zb0001Len + err = en.Append(0x80 | uint8(zb0001Len)) + if err != nil { + return + } + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "bk" + err = en.Append(0xa2, 0x62, 0x6b) + if err != nil { + return + } + err = en.WriteString(z.Bucket) + if err != nil { + err = msgp.WrapError(err, "Bucket") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "pr" + err = en.Append(0xa2, 0x70, 0x72) if err != nil { - err = msgp.WrapError(err, "DiskID") return } - case "v": - z.Volume, err = dc.ReadString() + err = en.WriteString(z.Prefix) if err != nil { - err = msgp.WrapError(err, "Volume") + err = msgp.WrapError(err, "Prefix") return } - case "fp": - z.FilePath, err = dc.ReadString() + } + // write "fl" + err = en.Append(0xa2, 0x66, 0x6c) + if err != nil { + return + } + err = en.WriteString(z.File) + if err != nil { + err = msgp.WrapError(err, "File") + return + } + // write "ex" + err = en.Append(0xa2, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteBool(z.Exists) + if err != nil { + err = msgp.WrapError(err, "Exists") + return + } + if (zb0001Mask & 0x10) == 0 { // if not omitted + // write "er" + err = en.Append(0xa2, 0x65, 0x72) if err != nil { - err = msgp.WrapError(err, "FilePath") return } - default: - err = dc.Skip() + err = en.WriteString(z.Error) if err != nil { - err = msgp.WrapError(err) + err = msgp.WrapError(err, "Error") return } } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z ReadAllHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 3 - // write "id" - err = en.Append(0x83, 0xa2, 0x69, 0x64) - if err != nil { - return - } - err = en.WriteString(z.DiskID) - if err != nil { - err = msgp.WrapError(err, "DiskID") - return - } - // write "v" - err = en.Append(0xa1, 0x76) - if err != nil { - return - } - err = en.WriteString(z.Volume) - if err != nil { - err = msgp.WrapError(err, "Volume") - return - } - // write "fp" - err = en.Append(0xa2, 0x66, 0x70) - if err != nil { - return - } - err = en.WriteString(z.FilePath) - if err != nil { - err = msgp.WrapError(err, "FilePath") - return + // write "d" + err = en.Append(0xa1, 0x64) + if err != nil { + return + } + err = en.WriteBytes(z.Data) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + // write "m" + err = en.Append(0xa1, 0x6d) + if err != nil { + return + } + err = en.WriteTime(z.Modtime) + if err != nil { + err = msgp.WrapError(err, "Modtime") + return + } } return } // MarshalMsg implements msgp.Marshaler -func (z ReadAllHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { +func (z *ReadMultipleResp) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 3 - // string "id" - o = append(o, 0x83, 0xa2, 0x69, 0x64) - o = msgp.AppendString(o, z.DiskID) - // string "v" - o = append(o, 0xa1, 0x76) - o = msgp.AppendString(o, z.Volume) - // string "fp" - o = append(o, 0xa2, 0x66, 0x70) - o = msgp.AppendString(o, z.FilePath) + // check for omitted fields + zb0001Len := uint32(7) + var zb0001Mask uint8 /* 7 bits */ + _ = zb0001Mask + if z.Prefix == "" { + zb0001Len-- + zb0001Mask |= 0x2 + } + if z.Error == "" { + zb0001Len-- + zb0001Mask |= 0x10 + } + // variable map header, size zb0001Len + o = append(o, 0x80|uint8(zb0001Len)) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "bk" + o = append(o, 0xa2, 0x62, 0x6b) + o = msgp.AppendString(o, z.Bucket) + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "pr" + o = append(o, 0xa2, 0x70, 0x72) + o = msgp.AppendString(o, z.Prefix) + } + // string "fl" + o = append(o, 0xa2, 0x66, 0x6c) + o = msgp.AppendString(o, z.File) + // string "ex" + o = append(o, 0xa2, 0x65, 0x78) + o = msgp.AppendBool(o, z.Exists) + if (zb0001Mask & 0x10) == 0 { // if not omitted + // string "er" + o = append(o, 0xa2, 0x65, 0x72) + o = msgp.AppendString(o, z.Error) + } + // string "d" + o = append(o, 0xa1, 0x64) + o = msgp.AppendBytes(o, z.Data) + // string "m" + o = append(o, 0xa1, 0x6d) + o = msgp.AppendTime(o, z.Modtime) + } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *ReadAllHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ReadMultipleResp) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -3679,6 +4819,8 @@ func (z *ReadAllHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 2 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -3687,22 +4829,48 @@ func (z *ReadAllHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "id": - z.DiskID, bts, err = msgp.ReadStringBytes(bts) + case "bk": + z.Bucket, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "DiskID") + err = msgp.WrapError(err, "Bucket") return } - case "v": - z.Volume, bts, err = msgp.ReadStringBytes(bts) + case "pr": + z.Prefix, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Volume") + err = msgp.WrapError(err, "Prefix") return } - case "fp": - z.FilePath, bts, err = msgp.ReadStringBytes(bts) + zb0001Mask |= 0x1 + case "fl": + z.File, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "FilePath") + err = msgp.WrapError(err, "File") + return + } + case "ex": + z.Exists, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Exists") + return + } + case "er": + z.Error, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + zb0001Mask |= 0x2 + case "d": + z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data) + if err != nil { + err = msgp.WrapError(err, "Data") + return + } + case "m": + z.Modtime, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Modtime") return } default: @@ -3713,18 +4881,27 @@ func (z *ReadAllHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if zb0001Mask != 0x3 { + if (zb0001Mask & 0x1) == 0 { + z.Prefix = "" + } + if (zb0001Mask & 0x2) == 0 { + z.Error = "" + } + } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z ReadAllHandlerParams) Msgsize() (s int) { - s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) +func (z *ReadMultipleResp) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.Bucket) + 3 + msgp.StringPrefixSize + len(z.Prefix) + 3 + msgp.StringPrefixSize + len(z.File) + 3 + msgp.BoolSize + 3 + msgp.StringPrefixSize + len(z.Error) + 2 + msgp.BytesPrefixSize + len(z.Data) + 2 + msgp.TimeSize return } // DecodeMsg implements msgp.Decodable -func (z *ReadMultipleReq) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *ReadPartsReq) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -3741,61 +4918,25 @@ func (z *ReadMultipleReq) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "Bucket": - z.Bucket, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "Prefix": - z.Prefix, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - case "Files": + case "p": var zb0002 uint32 zb0002, err = dc.ReadArrayHeader() if err != nil { - err = msgp.WrapError(err, "Files") + err = msgp.WrapError(err, "Paths") return } - if cap(z.Files) >= int(zb0002) { - z.Files = (z.Files)[:zb0002] + if cap(z.Paths) >= int(zb0002) { + z.Paths = (z.Paths)[:zb0002] } else { - z.Files = make([]string, zb0002) + z.Paths = make([]string, zb0002) } - for za0001 := range z.Files { - z.Files[za0001], err = dc.ReadString() + for za0001 := range z.Paths { + z.Paths[za0001], err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "Files", za0001) + err = msgp.WrapError(err, "Paths", za0001) return } } - case "MaxSize": - z.MaxSize, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "MaxSize") - return - } - case "MetadataOnly": - z.MetadataOnly, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "MetadataOnly") - return - } - case "AbortOn404": - z.AbortOn404, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "AbortOn404") - return - } - case "MaxResults": - z.MaxResults, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "MaxResults") - return - } default: err = dc.Skip() if err != nil { @@ -3808,121 +4949,43 @@ func (z *ReadMultipleReq) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *ReadMultipleReq) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 7 - // write "Bucket" - err = en.Append(0x87, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteString(z.Bucket) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - // write "Prefix" - err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - if err != nil { - return - } - err = en.WriteString(z.Prefix) - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - // write "Files" - err = en.Append(0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) +func (z *ReadPartsReq) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "p" + err = en.Append(0x81, 0xa1, 0x70) if err != nil { return } - err = en.WriteArrayHeader(uint32(len(z.Files))) + err = en.WriteArrayHeader(uint32(len(z.Paths))) if err != nil { - err = msgp.WrapError(err, "Files") + err = msgp.WrapError(err, "Paths") return } - for za0001 := range z.Files { - err = en.WriteString(z.Files[za0001]) + for za0001 := range z.Paths { + err = en.WriteString(z.Paths[za0001]) if err != nil { - err = msgp.WrapError(err, "Files", za0001) + err = msgp.WrapError(err, "Paths", za0001) return } } - // write "MaxSize" - err = en.Append(0xa7, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.MaxSize) - if err != nil { - err = msgp.WrapError(err, "MaxSize") - return - } - // write "MetadataOnly" - err = en.Append(0xac, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4f, 0x6e, 0x6c, 0x79) - if err != nil { - return - } - err = en.WriteBool(z.MetadataOnly) - if err != nil { - err = msgp.WrapError(err, "MetadataOnly") - return - } - // write "AbortOn404" - err = en.Append(0xaa, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4f, 0x6e, 0x34, 0x30, 0x34) - if err != nil { - return - } - err = en.WriteBool(z.AbortOn404) - if err != nil { - err = msgp.WrapError(err, "AbortOn404") - return - } - // write "MaxResults" - err = en.Append(0xaa, 0x4d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteInt(z.MaxResults) - if err != nil { - err = msgp.WrapError(err, "MaxResults") - return - } return } // MarshalMsg implements msgp.Marshaler -func (z *ReadMultipleReq) MarshalMsg(b []byte) (o []byte, err error) { +func (z *ReadPartsReq) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 7 - // string "Bucket" - o = append(o, 0x87, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - o = msgp.AppendString(o, z.Bucket) - // string "Prefix" - o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - o = msgp.AppendString(o, z.Prefix) - // string "Files" - o = append(o, 0xa5, 0x46, 0x69, 0x6c, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.Files))) - for za0001 := range z.Files { - o = msgp.AppendString(o, z.Files[za0001]) + // map header, size 1 + // string "p" + o = append(o, 0x81, 0xa1, 0x70) + o = msgp.AppendArrayHeader(o, uint32(len(z.Paths))) + for za0001 := range z.Paths { + o = msgp.AppendString(o, z.Paths[za0001]) } - // string "MaxSize" - o = append(o, 0xa7, 0x4d, 0x61, 0x78, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.MaxSize) - // string "MetadataOnly" - o = append(o, 0xac, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4f, 0x6e, 0x6c, 0x79) - o = msgp.AppendBool(o, z.MetadataOnly) - // string "AbortOn404" - o = append(o, 0xaa, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x4f, 0x6e, 0x34, 0x30, 0x34) - o = msgp.AppendBool(o, z.AbortOn404) - // string "MaxResults" - o = append(o, 0xaa, 0x4d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73) - o = msgp.AppendInt(o, z.MaxResults) return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *ReadMultipleReq) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ReadPartsReq) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -3939,60 +5002,24 @@ func (z *ReadMultipleReq) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "Bucket": - z.Bucket, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "Prefix": - z.Prefix, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - case "Files": + case "p": var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Files") - return - } - if cap(z.Files) >= int(zb0002) { - z.Files = (z.Files)[:zb0002] - } else { - z.Files = make([]string, zb0002) - } - for za0001 := range z.Files { - z.Files[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Files", za0001) - return - } - } - case "MaxSize": - z.MaxSize, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "MaxSize") - return - } - case "MetadataOnly": - z.MetadataOnly, bts, err = msgp.ReadBoolBytes(bts) + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "MetadataOnly") + err = msgp.WrapError(err, "Paths") return } - case "AbortOn404": - z.AbortOn404, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "AbortOn404") - return + if cap(z.Paths) >= int(zb0002) { + z.Paths = (z.Paths)[:zb0002] + } else { + z.Paths = make([]string, zb0002) } - case "MaxResults": - z.MaxResults, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "MaxResults") - return + for za0001 := range z.Paths { + z.Paths[za0001], bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Paths", za0001) + return + } } default: bts, err = msgp.Skip(bts) @@ -4007,17 +5034,16 @@ func (z *ReadMultipleReq) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ReadMultipleReq) Msgsize() (s int) { - s = 1 + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 6 + msgp.ArrayHeaderSize - for za0001 := range z.Files { - s += msgp.StringPrefixSize + len(z.Files[za0001]) +func (z *ReadPartsReq) Msgsize() (s int) { + s = 1 + 2 + msgp.ArrayHeaderSize + for za0001 := range z.Paths { + s += msgp.StringPrefixSize + len(z.Paths[za0001]) } - s += 8 + msgp.Int64Size + 13 + msgp.BoolSize + 11 + msgp.BoolSize + 11 + msgp.IntSize return } // DecodeMsg implements msgp.Decodable -func (z *ReadMultipleResp) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *ReadPartsResp) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -4034,47 +5060,36 @@ func (z *ReadMultipleResp) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "Bucket": - z.Bucket, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "Prefix": - z.Prefix, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - case "File": - z.File, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "File") - return - } - case "Exists": - z.Exists, err = dc.ReadBool() - if err != nil { - err = msgp.WrapError(err, "Exists") - return - } - case "Error": - z.Error, err = dc.ReadString() + case "is": + var zb0002 uint32 + zb0002, err = dc.ReadArrayHeader() if err != nil { - err = msgp.WrapError(err, "Error") + err = msgp.WrapError(err, "Infos") return } - case "Data": - z.Data, err = dc.ReadBytes(z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return + if cap(z.Infos) >= int(zb0002) { + z.Infos = (z.Infos)[:zb0002] + } else { + z.Infos = make([]*ObjectPartInfo, zb0002) } - case "Modtime": - z.Modtime, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "Modtime") - return + for za0001 := range z.Infos { + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Infos", za0001) + return + } + z.Infos[za0001] = nil + } else { + if z.Infos[za0001] == nil { + z.Infos[za0001] = new(ObjectPartInfo) + } + err = z.Infos[za0001].DecodeMsg(dc) + if err != nil { + err = msgp.WrapError(err, "Infos", za0001) + return + } + } } default: err = dc.Skip() @@ -4088,111 +5103,58 @@ func (z *ReadMultipleResp) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z *ReadMultipleResp) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 7 - // write "Bucket" - err = en.Append(0x87, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteString(z.Bucket) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - // write "Prefix" - err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - if err != nil { - return - } - err = en.WriteString(z.Prefix) - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - // write "File" - err = en.Append(0xa4, 0x46, 0x69, 0x6c, 0x65) - if err != nil { - return - } - err = en.WriteString(z.File) - if err != nil { - err = msgp.WrapError(err, "File") - return - } - // write "Exists" - err = en.Append(0xa6, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73) - if err != nil { - return - } - err = en.WriteBool(z.Exists) - if err != nil { - err = msgp.WrapError(err, "Exists") - return - } - // write "Error" - err = en.Append(0xa5, 0x45, 0x72, 0x72, 0x6f, 0x72) - if err != nil { - return - } - err = en.WriteString(z.Error) - if err != nil { - err = msgp.WrapError(err, "Error") - return - } - // write "Data" - err = en.Append(0xa4, 0x44, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteBytes(z.Data) +func (z *ReadPartsResp) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "is" + err = en.Append(0x81, 0xa2, 0x69, 0x73) if err != nil { - err = msgp.WrapError(err, "Data") return } - // write "Modtime" - err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x74, 0x69, 0x6d, 0x65) + err = en.WriteArrayHeader(uint32(len(z.Infos))) if err != nil { + err = msgp.WrapError(err, "Infos") return } - err = en.WriteTime(z.Modtime) - if err != nil { - err = msgp.WrapError(err, "Modtime") - return + for za0001 := range z.Infos { + if z.Infos[za0001] == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.Infos[za0001].EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "Infos", za0001) + return + } + } } return } // MarshalMsg implements msgp.Marshaler -func (z *ReadMultipleResp) MarshalMsg(b []byte) (o []byte, err error) { +func (z *ReadPartsResp) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 7 - // string "Bucket" - o = append(o, 0x87, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - o = msgp.AppendString(o, z.Bucket) - // string "Prefix" - o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78) - o = msgp.AppendString(o, z.Prefix) - // string "File" - o = append(o, 0xa4, 0x46, 0x69, 0x6c, 0x65) - o = msgp.AppendString(o, z.File) - // string "Exists" - o = append(o, 0xa6, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73) - o = msgp.AppendBool(o, z.Exists) - // string "Error" - o = append(o, 0xa5, 0x45, 0x72, 0x72, 0x6f, 0x72) - o = msgp.AppendString(o, z.Error) - // string "Data" - o = append(o, 0xa4, 0x44, 0x61, 0x74, 0x61) - o = msgp.AppendBytes(o, z.Data) - // string "Modtime" - o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x74, 0x69, 0x6d, 0x65) - o = msgp.AppendTime(o, z.Modtime) + // map header, size 1 + // string "is" + o = append(o, 0x81, 0xa2, 0x69, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Infos))) + for za0001 := range z.Infos { + if z.Infos[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Infos[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Infos", za0001) + return + } + } + } return } // UnmarshalMsg implements msgp.Unmarshaler -func (z *ReadMultipleResp) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *ReadPartsResp) UnmarshalMsg(bts []byte) (o []byte, err error) { var field []byte _ = field var zb0001 uint32 @@ -4209,47 +5171,35 @@ func (z *ReadMultipleResp) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "Bucket": - z.Bucket, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "Prefix": - z.Prefix, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Prefix") - return - } - case "File": - z.File, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "File") - return - } - case "Exists": - z.Exists, bts, err = msgp.ReadBoolBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Exists") - return - } - case "Error": - z.Error, bts, err = msgp.ReadStringBytes(bts) + case "is": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { - err = msgp.WrapError(err, "Error") + err = msgp.WrapError(err, "Infos") return } - case "Data": - z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return + if cap(z.Infos) >= int(zb0002) { + z.Infos = (z.Infos)[:zb0002] + } else { + z.Infos = make([]*ObjectPartInfo, zb0002) } - case "Modtime": - z.Modtime, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Modtime") - return + for za0001 := range z.Infos { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Infos[za0001] = nil + } else { + if z.Infos[za0001] == nil { + z.Infos[za0001] = new(ObjectPartInfo) + } + bts, err = z.Infos[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Infos", za0001) + return + } + } } default: bts, err = msgp.Skip(bts) @@ -4264,8 +5214,15 @@ func (z *ReadMultipleResp) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ReadMultipleResp) Msgsize() (s int) { - s = 1 + 7 + msgp.StringPrefixSize + len(z.Bucket) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 5 + msgp.StringPrefixSize + len(z.File) + 7 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Error) + 5 + msgp.BytesPrefixSize + len(z.Data) + 8 + msgp.TimeSize +func (z *ReadPartsResp) Msgsize() (s int) { + s = 1 + 3 + msgp.ArrayHeaderSize + for za0001 := range z.Infos { + if z.Infos[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Infos[za0001].Msgsize() + } + } return } @@ -4455,6 +5412,7 @@ func (z *RenameDataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { return } // map header, size 0 + _ = z.Opts.BaseOptions err = en.Append(0x80) if err != nil { return @@ -4494,6 +5452,7 @@ func (z *RenameDataHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { // string "BaseOptions" o = append(o, 0x81, 0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) // map header, size 0 + _ = z.Opts.BaseOptions o = append(o, 0x80) return } @@ -4617,7 +5576,7 @@ func (z *RenameDataHandlerParams) Msgsize() (s int) { } // DecodeMsg implements msgp.Decodable -func (z *RenameDataResp) DecodeMsg(dc *msgp.Reader) (err error) { +func (z *RenameDataInlineHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte _ = field var zb0001 uint32 @@ -4634,10 +5593,10 @@ func (z *RenameDataResp) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "sig": - z.Signature, err = dc.ReadUint64() + case "p": + err = z.RenameDataHandlerParams.DecodeMsg(dc) if err != nil { - err = msgp.WrapError(err, "Signature") + err = msgp.WrapError(err, "RenameDataHandlerParams") return } default: @@ -4652,28 +5611,154 @@ func (z *RenameDataResp) DecodeMsg(dc *msgp.Reader) (err error) { } // EncodeMsg implements msgp.Encodable -func (z RenameDataResp) EncodeMsg(en *msgp.Writer) (err error) { +func (z *RenameDataInlineHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "p" + err = en.Append(0x81, 0xa1, 0x70) + if err != nil { + return + } + err = z.RenameDataHandlerParams.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "RenameDataHandlerParams") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *RenameDataInlineHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) // map header, size 1 - // write "sig" - err = en.Append(0x81, 0xa3, 0x73, 0x69, 0x67) + // string "p" + o = append(o, 0x81, 0xa1, 0x70) + o, err = z.RenameDataHandlerParams.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "RenameDataHandlerParams") + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *RenameDataInlineHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "p": + bts, err = z.RenameDataHandlerParams.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "RenameDataHandlerParams") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RenameDataInlineHandlerParams) Msgsize() (s int) { + s = 1 + 2 + z.RenameDataHandlerParams.Msgsize() + return +} + +// DecodeMsg implements msgp.Decodable +func (z *RenameDataResp) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "s": + z.Sign, err = dc.ReadBytes(z.Sign) + if err != nil { + err = msgp.WrapError(err, "Sign") + return + } + case "od": + z.OldDataDir, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "OldDataDir") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *RenameDataResp) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 2 + // write "s" + err = en.Append(0x82, 0xa1, 0x73) + if err != nil { + return + } + err = en.WriteBytes(z.Sign) if err != nil { + err = msgp.WrapError(err, "Sign") return } - err = en.WriteUint64(z.Signature) + // write "od" + err = en.Append(0xa2, 0x6f, 0x64) if err != nil { - err = msgp.WrapError(err, "Signature") + return + } + err = en.WriteString(z.OldDataDir) + if err != nil { + err = msgp.WrapError(err, "OldDataDir") return } return } // MarshalMsg implements msgp.Marshaler -func (z RenameDataResp) MarshalMsg(b []byte) (o []byte, err error) { +func (z *RenameDataResp) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 1 - // string "sig" - o = append(o, 0x81, 0xa3, 0x73, 0x69, 0x67) - o = msgp.AppendUint64(o, z.Signature) + // map header, size 2 + // string "s" + o = append(o, 0x82, 0xa1, 0x73) + o = msgp.AppendBytes(o, z.Sign) + // string "od" + o = append(o, 0xa2, 0x6f, 0x64) + o = msgp.AppendString(o, z.OldDataDir) return } @@ -4695,10 +5780,16 @@ func (z *RenameDataResp) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "sig": - z.Signature, bts, err = msgp.ReadUint64Bytes(bts) + case "s": + z.Sign, bts, err = msgp.ReadBytesBytes(bts, z.Sign) + if err != nil { + err = msgp.WrapError(err, "Sign") + return + } + case "od": + z.OldDataDir, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Signature") + err = msgp.WrapError(err, "OldDataDir") return } default: @@ -4714,8 +5805,8 @@ func (z *RenameDataResp) UnmarshalMsg(bts []byte) (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z RenameDataResp) Msgsize() (s int) { - s = 1 + 4 + msgp.Uint64Size +func (z *RenameDataResp) Msgsize() (s int) { + s = 1 + 2 + msgp.BytesPrefixSize + len(z.Sign) + 3 + msgp.StringPrefixSize + len(z.OldDataDir) return } @@ -4983,6 +6074,7 @@ func (z *RenameOptions) EncodeMsg(en *msgp.Writer) (err error) { return } // map header, size 0 + _ = z.BaseOptions err = en.Append(0x80) if err != nil { return @@ -4997,6 +6089,7 @@ func (z *RenameOptions) MarshalMsg(b []byte) (o []byte, err error) { // string "BaseOptions" o = append(o, 0x81, 0xab, 0x42, 0x61, 0x73, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73) // map header, size 0 + _ = z.BaseOptions o = append(o, 0x80) return } @@ -5060,6 +6153,259 @@ func (z *RenameOptions) Msgsize() (s int) { return } +// DecodeMsg implements msgp.Decodable +func (z *RenamePartHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.DiskID, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + case "sv": + z.SrcVolume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SrcVolume") + return + } + case "sp": + z.SrcFilePath, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SrcFilePath") + return + } + case "dv": + z.DstVolume, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DstVolume") + return + } + case "dp": + z.DstFilePath, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "DstFilePath") + return + } + case "m": + z.Meta, err = dc.ReadBytes(z.Meta) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + case "kp": + z.SkipParent, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "SkipParent") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *RenamePartHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 7 + // write "id" + err = en.Append(0x87, 0xa2, 0x69, 0x64) + if err != nil { + return + } + err = en.WriteString(z.DiskID) + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + // write "sv" + err = en.Append(0xa2, 0x73, 0x76) + if err != nil { + return + } + err = en.WriteString(z.SrcVolume) + if err != nil { + err = msgp.WrapError(err, "SrcVolume") + return + } + // write "sp" + err = en.Append(0xa2, 0x73, 0x70) + if err != nil { + return + } + err = en.WriteString(z.SrcFilePath) + if err != nil { + err = msgp.WrapError(err, "SrcFilePath") + return + } + // write "dv" + err = en.Append(0xa2, 0x64, 0x76) + if err != nil { + return + } + err = en.WriteString(z.DstVolume) + if err != nil { + err = msgp.WrapError(err, "DstVolume") + return + } + // write "dp" + err = en.Append(0xa2, 0x64, 0x70) + if err != nil { + return + } + err = en.WriteString(z.DstFilePath) + if err != nil { + err = msgp.WrapError(err, "DstFilePath") + return + } + // write "m" + err = en.Append(0xa1, 0x6d) + if err != nil { + return + } + err = en.WriteBytes(z.Meta) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + // write "kp" + err = en.Append(0xa2, 0x6b, 0x70) + if err != nil { + return + } + err = en.WriteString(z.SkipParent) + if err != nil { + err = msgp.WrapError(err, "SkipParent") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *RenamePartHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 7 + // string "id" + o = append(o, 0x87, 0xa2, 0x69, 0x64) + o = msgp.AppendString(o, z.DiskID) + // string "sv" + o = append(o, 0xa2, 0x73, 0x76) + o = msgp.AppendString(o, z.SrcVolume) + // string "sp" + o = append(o, 0xa2, 0x73, 0x70) + o = msgp.AppendString(o, z.SrcFilePath) + // string "dv" + o = append(o, 0xa2, 0x64, 0x76) + o = msgp.AppendString(o, z.DstVolume) + // string "dp" + o = append(o, 0xa2, 0x64, 0x70) + o = msgp.AppendString(o, z.DstFilePath) + // string "m" + o = append(o, 0xa1, 0x6d) + o = msgp.AppendBytes(o, z.Meta) + // string "kp" + o = append(o, 0xa2, 0x6b, 0x70) + o = msgp.AppendString(o, z.SkipParent) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *RenamePartHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "id": + z.DiskID, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DiskID") + return + } + case "sv": + z.SrcVolume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SrcVolume") + return + } + case "sp": + z.SrcFilePath, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SrcFilePath") + return + } + case "dv": + z.DstVolume, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DstVolume") + return + } + case "dp": + z.DstFilePath, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "DstFilePath") + return + } + case "m": + z.Meta, bts, err = msgp.ReadBytesBytes(bts, z.Meta) + if err != nil { + err = msgp.WrapError(err, "Meta") + return + } + case "kp": + z.SkipParent, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SkipParent") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *RenamePartHandlerParams) Msgsize() (s int) { + s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 3 + msgp.StringPrefixSize + len(z.SrcVolume) + 3 + msgp.StringPrefixSize + len(z.SrcFilePath) + 3 + msgp.StringPrefixSize + len(z.DstVolume) + 3 + msgp.StringPrefixSize + len(z.DstFilePath) + 2 + msgp.BytesPrefixSize + len(z.Meta) + 3 + msgp.StringPrefixSize + len(z.SkipParent) + return +} + // DecodeMsg implements msgp.Decodable func (z *UpdateMetadataOpts) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte @@ -5171,8 +6517,8 @@ func (z *VolInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 2 { - err = msgp.ArrayError{Wanted: 2, Got: zb0001} + if zb0001 != 3 { + err = msgp.ArrayError{Wanted: 3, Got: zb0001} return } z.Name, err = dc.ReadString() @@ -5185,13 +6531,18 @@ func (z *VolInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Created") return } + z.Deleted, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Deleted") + return + } return } // EncodeMsg implements msgp.Encodable func (z VolInfo) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 2 - err = en.Append(0x92) + // array header, size 3 + err = en.Append(0x93) if err != nil { return } @@ -5205,16 +6556,22 @@ func (z VolInfo) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Created") return } + err = en.WriteTime(z.Deleted) + if err != nil { + err = msgp.WrapError(err, "Deleted") + return + } return } // MarshalMsg implements msgp.Marshaler func (z VolInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // array header, size 2 - o = append(o, 0x92) + // array header, size 3 + o = append(o, 0x93) o = msgp.AppendString(o, z.Name) o = msgp.AppendTime(o, z.Created) + o = msgp.AppendTime(o, z.Deleted) return } @@ -5226,8 +6583,8 @@ func (z *VolInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 2 { - err = msgp.ArrayError{Wanted: 2, Got: zb0001} + if zb0001 != 3 { + err = msgp.ArrayError{Wanted: 3, Got: zb0001} return } z.Name, bts, err = msgp.ReadStringBytes(bts) @@ -5240,13 +6597,18 @@ func (z *VolInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Created") return } + z.Deleted, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Deleted") + return + } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z VolInfo) Msgsize() (s int) { - s = 1 + msgp.StringPrefixSize + len(z.Name) + msgp.TimeSize + s = 1 + msgp.StringPrefixSize + len(z.Name) + msgp.TimeSize + msgp.TimeSize return } @@ -5270,8 +6632,8 @@ func (z *VolsInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, zb0001) return } - if zb0003 != 2 { - err = msgp.ArrayError{Wanted: 2, Got: zb0003} + if zb0003 != 3 { + err = msgp.ArrayError{Wanted: 3, Got: zb0003} return } (*z)[zb0001].Name, err = dc.ReadString() @@ -5284,6 +6646,11 @@ func (z *VolsInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, zb0001, "Created") return } + (*z)[zb0001].Deleted, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, zb0001, "Deleted") + return + } } return } @@ -5296,8 +6663,8 @@ func (z VolsInfo) EncodeMsg(en *msgp.Writer) (err error) { return } for zb0004 := range z { - // array header, size 2 - err = en.Append(0x92) + // array header, size 3 + err = en.Append(0x93) if err != nil { return } @@ -5311,6 +6678,11 @@ func (z VolsInfo) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, zb0004, "Created") return } + err = en.WriteTime(z[zb0004].Deleted) + if err != nil { + err = msgp.WrapError(err, zb0004, "Deleted") + return + } } return } @@ -5320,10 +6692,11 @@ func (z VolsInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) o = msgp.AppendArrayHeader(o, uint32(len(z))) for zb0004 := range z { - // array header, size 2 - o = append(o, 0x92) + // array header, size 3 + o = append(o, 0x93) o = msgp.AppendString(o, z[zb0004].Name) o = msgp.AppendTime(o, z[zb0004].Created) + o = msgp.AppendTime(o, z[zb0004].Deleted) } return } @@ -5348,8 +6721,8 @@ func (z *VolsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, zb0001) return } - if zb0003 != 2 { - err = msgp.ArrayError{Wanted: 2, Got: zb0003} + if zb0003 != 3 { + err = msgp.ArrayError{Wanted: 3, Got: zb0003} return } (*z)[zb0001].Name, bts, err = msgp.ReadStringBytes(bts) @@ -5362,6 +6735,11 @@ func (z *VolsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, zb0001, "Created") return } + (*z)[zb0001].Deleted, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, zb0001, "Deleted") + return + } } o = bts return @@ -5371,7 +6749,7 @@ func (z *VolsInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { func (z VolsInfo) Msgsize() (s int) { s = msgp.ArrayHeaderSize for zb0004 := range z { - s += 1 + msgp.StringPrefixSize + len(z[zb0004].Name) + msgp.TimeSize + s += 1 + msgp.StringPrefixSize + len(z[zb0004].Name) + msgp.TimeSize + msgp.TimeSize } return } diff --git a/cmd/storage-datatypes_gen_test.go b/cmd/storage-datatypes_gen_test.go index cee67107f9ebe..78f53f36f6d83 100644 --- a/cmd/storage-datatypes_gen_test.go +++ b/cmd/storage-datatypes_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" @@ -235,8 +235,8 @@ func BenchmarkDecodeCheckPartsHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalDeleteFileHandlerParams(t *testing.T) { - v := DeleteFileHandlerParams{} +func TestMarshalUnmarshalCheckPartsResp(t *testing.T) { + v := CheckPartsResp{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -258,8 +258,8 @@ func TestMarshalUnmarshalDeleteFileHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgDeleteFileHandlerParams(b *testing.B) { - v := DeleteFileHandlerParams{} +func BenchmarkMarshalMsgCheckPartsResp(b *testing.B) { + v := CheckPartsResp{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -267,8 +267,8 @@ func BenchmarkMarshalMsgDeleteFileHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgDeleteFileHandlerParams(b *testing.B) { - v := DeleteFileHandlerParams{} +func BenchmarkAppendMsgCheckPartsResp(b *testing.B) { + v := CheckPartsResp{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -279,8 +279,8 @@ func BenchmarkAppendMsgDeleteFileHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalDeleteFileHandlerParams(b *testing.B) { - v := DeleteFileHandlerParams{} +func BenchmarkUnmarshalCheckPartsResp(b *testing.B) { + v := CheckPartsResp{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -293,17 +293,17 @@ func BenchmarkUnmarshalDeleteFileHandlerParams(b *testing.B) { } } -func TestEncodeDecodeDeleteFileHandlerParams(t *testing.T) { - v := DeleteFileHandlerParams{} +func TestEncodeDecodeCheckPartsResp(t *testing.T) { + v := CheckPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDeleteFileHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeCheckPartsResp Msgsize() is inaccurate") } - vn := DeleteFileHandlerParams{} + vn := CheckPartsResp{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -317,8 +317,8 @@ func TestEncodeDecodeDeleteFileHandlerParams(t *testing.T) { } } -func BenchmarkEncodeDeleteFileHandlerParams(b *testing.B) { - v := DeleteFileHandlerParams{} +func BenchmarkEncodeCheckPartsResp(b *testing.B) { + v := CheckPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -331,8 +331,8 @@ func BenchmarkEncodeDeleteFileHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeDeleteFileHandlerParams(b *testing.B) { - v := DeleteFileHandlerParams{} +func BenchmarkDecodeCheckPartsResp(b *testing.B) { + v := CheckPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -348,8 +348,8 @@ func BenchmarkDecodeDeleteFileHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalDeleteOptions(t *testing.T) { - v := DeleteOptions{} +func TestMarshalUnmarshalDeleteBulkReq(t *testing.T) { + v := DeleteBulkReq{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -371,8 +371,8 @@ func TestMarshalUnmarshalDeleteOptions(t *testing.T) { } } -func BenchmarkMarshalMsgDeleteOptions(b *testing.B) { - v := DeleteOptions{} +func BenchmarkMarshalMsgDeleteBulkReq(b *testing.B) { + v := DeleteBulkReq{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -380,8 +380,8 @@ func BenchmarkMarshalMsgDeleteOptions(b *testing.B) { } } -func BenchmarkAppendMsgDeleteOptions(b *testing.B) { - v := DeleteOptions{} +func BenchmarkAppendMsgDeleteBulkReq(b *testing.B) { + v := DeleteBulkReq{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -392,8 +392,8 @@ func BenchmarkAppendMsgDeleteOptions(b *testing.B) { } } -func BenchmarkUnmarshalDeleteOptions(b *testing.B) { - v := DeleteOptions{} +func BenchmarkUnmarshalDeleteBulkReq(b *testing.B) { + v := DeleteBulkReq{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -406,17 +406,17 @@ func BenchmarkUnmarshalDeleteOptions(b *testing.B) { } } -func TestEncodeDecodeDeleteOptions(t *testing.T) { - v := DeleteOptions{} +func TestEncodeDecodeDeleteBulkReq(t *testing.T) { + v := DeleteBulkReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDeleteOptions Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDeleteBulkReq Msgsize() is inaccurate") } - vn := DeleteOptions{} + vn := DeleteBulkReq{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -430,8 +430,8 @@ func TestEncodeDecodeDeleteOptions(t *testing.T) { } } -func BenchmarkEncodeDeleteOptions(b *testing.B) { - v := DeleteOptions{} +func BenchmarkEncodeDeleteBulkReq(b *testing.B) { + v := DeleteBulkReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -444,8 +444,8 @@ func BenchmarkEncodeDeleteOptions(b *testing.B) { en.Flush() } -func BenchmarkDecodeDeleteOptions(b *testing.B) { - v := DeleteOptions{} +func BenchmarkDecodeDeleteBulkReq(b *testing.B) { + v := DeleteBulkReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -461,8 +461,8 @@ func BenchmarkDecodeDeleteOptions(b *testing.B) { } } -func TestMarshalUnmarshalDeleteVersionHandlerParams(t *testing.T) { - v := DeleteVersionHandlerParams{} +func TestMarshalUnmarshalDeleteFileHandlerParams(t *testing.T) { + v := DeleteFileHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -484,8 +484,8 @@ func TestMarshalUnmarshalDeleteVersionHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgDeleteVersionHandlerParams(b *testing.B) { - v := DeleteVersionHandlerParams{} +func BenchmarkMarshalMsgDeleteFileHandlerParams(b *testing.B) { + v := DeleteFileHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -493,8 +493,8 @@ func BenchmarkMarshalMsgDeleteVersionHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgDeleteVersionHandlerParams(b *testing.B) { - v := DeleteVersionHandlerParams{} +func BenchmarkAppendMsgDeleteFileHandlerParams(b *testing.B) { + v := DeleteFileHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -505,8 +505,8 @@ func BenchmarkAppendMsgDeleteVersionHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalDeleteVersionHandlerParams(b *testing.B) { - v := DeleteVersionHandlerParams{} +func BenchmarkUnmarshalDeleteFileHandlerParams(b *testing.B) { + v := DeleteFileHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -519,17 +519,17 @@ func BenchmarkUnmarshalDeleteVersionHandlerParams(b *testing.B) { } } -func TestEncodeDecodeDeleteVersionHandlerParams(t *testing.T) { - v := DeleteVersionHandlerParams{} +func TestEncodeDecodeDeleteFileHandlerParams(t *testing.T) { + v := DeleteFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDeleteVersionHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDeleteFileHandlerParams Msgsize() is inaccurate") } - vn := DeleteVersionHandlerParams{} + vn := DeleteFileHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -543,8 +543,8 @@ func TestEncodeDecodeDeleteVersionHandlerParams(t *testing.T) { } } -func BenchmarkEncodeDeleteVersionHandlerParams(b *testing.B) { - v := DeleteVersionHandlerParams{} +func BenchmarkEncodeDeleteFileHandlerParams(b *testing.B) { + v := DeleteFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -557,8 +557,8 @@ func BenchmarkEncodeDeleteVersionHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeDeleteVersionHandlerParams(b *testing.B) { - v := DeleteVersionHandlerParams{} +func BenchmarkDecodeDeleteFileHandlerParams(b *testing.B) { + v := DeleteFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -574,8 +574,8 @@ func BenchmarkDecodeDeleteVersionHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalDiskInfo(t *testing.T) { - v := DiskInfo{} +func TestMarshalUnmarshalDeleteOptions(t *testing.T) { + v := DeleteOptions{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -597,8 +597,8 @@ func TestMarshalUnmarshalDiskInfo(t *testing.T) { } } -func BenchmarkMarshalMsgDiskInfo(b *testing.B) { - v := DiskInfo{} +func BenchmarkMarshalMsgDeleteOptions(b *testing.B) { + v := DeleteOptions{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -606,8 +606,8 @@ func BenchmarkMarshalMsgDiskInfo(b *testing.B) { } } -func BenchmarkAppendMsgDiskInfo(b *testing.B) { - v := DiskInfo{} +func BenchmarkAppendMsgDeleteOptions(b *testing.B) { + v := DeleteOptions{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -618,8 +618,8 @@ func BenchmarkAppendMsgDiskInfo(b *testing.B) { } } -func BenchmarkUnmarshalDiskInfo(b *testing.B) { - v := DiskInfo{} +func BenchmarkUnmarshalDeleteOptions(b *testing.B) { + v := DeleteOptions{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -632,17 +632,17 @@ func BenchmarkUnmarshalDiskInfo(b *testing.B) { } } -func TestEncodeDecodeDiskInfo(t *testing.T) { - v := DiskInfo{} +func TestEncodeDecodeDeleteOptions(t *testing.T) { + v := DeleteOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDiskInfo Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDeleteOptions Msgsize() is inaccurate") } - vn := DiskInfo{} + vn := DeleteOptions{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -656,8 +656,8 @@ func TestEncodeDecodeDiskInfo(t *testing.T) { } } -func BenchmarkEncodeDiskInfo(b *testing.B) { - v := DiskInfo{} +func BenchmarkEncodeDeleteOptions(b *testing.B) { + v := DeleteOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -670,8 +670,8 @@ func BenchmarkEncodeDiskInfo(b *testing.B) { en.Flush() } -func BenchmarkDecodeDiskInfo(b *testing.B) { - v := DiskInfo{} +func BenchmarkDecodeDeleteOptions(b *testing.B) { + v := DeleteOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -687,8 +687,8 @@ func BenchmarkDecodeDiskInfo(b *testing.B) { } } -func TestMarshalUnmarshalDiskInfoOptions(t *testing.T) { - v := DiskInfoOptions{} +func TestMarshalUnmarshalDeleteVersionHandlerParams(t *testing.T) { + v := DeleteVersionHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -710,8 +710,8 @@ func TestMarshalUnmarshalDiskInfoOptions(t *testing.T) { } } -func BenchmarkMarshalMsgDiskInfoOptions(b *testing.B) { - v := DiskInfoOptions{} +func BenchmarkMarshalMsgDeleteVersionHandlerParams(b *testing.B) { + v := DeleteVersionHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -719,8 +719,8 @@ func BenchmarkMarshalMsgDiskInfoOptions(b *testing.B) { } } -func BenchmarkAppendMsgDiskInfoOptions(b *testing.B) { - v := DiskInfoOptions{} +func BenchmarkAppendMsgDeleteVersionHandlerParams(b *testing.B) { + v := DeleteVersionHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -731,8 +731,8 @@ func BenchmarkAppendMsgDiskInfoOptions(b *testing.B) { } } -func BenchmarkUnmarshalDiskInfoOptions(b *testing.B) { - v := DiskInfoOptions{} +func BenchmarkUnmarshalDeleteVersionHandlerParams(b *testing.B) { + v := DeleteVersionHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -745,17 +745,17 @@ func BenchmarkUnmarshalDiskInfoOptions(b *testing.B) { } } -func TestEncodeDecodeDiskInfoOptions(t *testing.T) { - v := DiskInfoOptions{} +func TestEncodeDecodeDeleteVersionHandlerParams(t *testing.T) { + v := DeleteVersionHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDiskInfoOptions Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDeleteVersionHandlerParams Msgsize() is inaccurate") } - vn := DiskInfoOptions{} + vn := DeleteVersionHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -769,8 +769,8 @@ func TestEncodeDecodeDiskInfoOptions(t *testing.T) { } } -func BenchmarkEncodeDiskInfoOptions(b *testing.B) { - v := DiskInfoOptions{} +func BenchmarkEncodeDeleteVersionHandlerParams(b *testing.B) { + v := DeleteVersionHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -783,8 +783,8 @@ func BenchmarkEncodeDiskInfoOptions(b *testing.B) { en.Flush() } -func BenchmarkDecodeDiskInfoOptions(b *testing.B) { - v := DiskInfoOptions{} +func BenchmarkDecodeDeleteVersionHandlerParams(b *testing.B) { + v := DeleteVersionHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -800,8 +800,8 @@ func BenchmarkDecodeDiskInfoOptions(b *testing.B) { } } -func TestMarshalUnmarshalDiskMetrics(t *testing.T) { - v := DiskMetrics{} +func TestMarshalUnmarshalDeleteVersionsErrsResp(t *testing.T) { + v := DeleteVersionsErrsResp{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -823,8 +823,8 @@ func TestMarshalUnmarshalDiskMetrics(t *testing.T) { } } -func BenchmarkMarshalMsgDiskMetrics(b *testing.B) { - v := DiskMetrics{} +func BenchmarkMarshalMsgDeleteVersionsErrsResp(b *testing.B) { + v := DeleteVersionsErrsResp{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -832,8 +832,8 @@ func BenchmarkMarshalMsgDiskMetrics(b *testing.B) { } } -func BenchmarkAppendMsgDiskMetrics(b *testing.B) { - v := DiskMetrics{} +func BenchmarkAppendMsgDeleteVersionsErrsResp(b *testing.B) { + v := DeleteVersionsErrsResp{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -844,8 +844,8 @@ func BenchmarkAppendMsgDiskMetrics(b *testing.B) { } } -func BenchmarkUnmarshalDiskMetrics(b *testing.B) { - v := DiskMetrics{} +func BenchmarkUnmarshalDeleteVersionsErrsResp(b *testing.B) { + v := DeleteVersionsErrsResp{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -858,17 +858,17 @@ func BenchmarkUnmarshalDiskMetrics(b *testing.B) { } } -func TestEncodeDecodeDiskMetrics(t *testing.T) { - v := DiskMetrics{} +func TestEncodeDecodeDeleteVersionsErrsResp(t *testing.T) { + v := DeleteVersionsErrsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeDiskMetrics Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDeleteVersionsErrsResp Msgsize() is inaccurate") } - vn := DiskMetrics{} + vn := DeleteVersionsErrsResp{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -882,8 +882,8 @@ func TestEncodeDecodeDiskMetrics(t *testing.T) { } } -func BenchmarkEncodeDiskMetrics(b *testing.B) { - v := DiskMetrics{} +func BenchmarkEncodeDeleteVersionsErrsResp(b *testing.B) { + v := DeleteVersionsErrsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -896,8 +896,8 @@ func BenchmarkEncodeDiskMetrics(b *testing.B) { en.Flush() } -func BenchmarkDecodeDiskMetrics(b *testing.B) { - v := DiskMetrics{} +func BenchmarkDecodeDeleteVersionsErrsResp(b *testing.B) { + v := DeleteVersionsErrsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -913,8 +913,8 @@ func BenchmarkDecodeDiskMetrics(b *testing.B) { } } -func TestMarshalUnmarshalFileInfo(t *testing.T) { - v := FileInfo{} +func TestMarshalUnmarshalDiskInfo(t *testing.T) { + v := DiskInfo{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -936,8 +936,8 @@ func TestMarshalUnmarshalFileInfo(t *testing.T) { } } -func BenchmarkMarshalMsgFileInfo(b *testing.B) { - v := FileInfo{} +func BenchmarkMarshalMsgDiskInfo(b *testing.B) { + v := DiskInfo{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -945,8 +945,8 @@ func BenchmarkMarshalMsgFileInfo(b *testing.B) { } } -func BenchmarkAppendMsgFileInfo(b *testing.B) { - v := FileInfo{} +func BenchmarkAppendMsgDiskInfo(b *testing.B) { + v := DiskInfo{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -957,8 +957,8 @@ func BenchmarkAppendMsgFileInfo(b *testing.B) { } } -func BenchmarkUnmarshalFileInfo(b *testing.B) { - v := FileInfo{} +func BenchmarkUnmarshalDiskInfo(b *testing.B) { + v := DiskInfo{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -971,17 +971,17 @@ func BenchmarkUnmarshalFileInfo(b *testing.B) { } } -func TestEncodeDecodeFileInfo(t *testing.T) { - v := FileInfo{} +func TestEncodeDecodeDiskInfo(t *testing.T) { + v := DiskInfo{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeFileInfo Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDiskInfo Msgsize() is inaccurate") } - vn := FileInfo{} + vn := DiskInfo{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -995,8 +995,8 @@ func TestEncodeDecodeFileInfo(t *testing.T) { } } -func BenchmarkEncodeFileInfo(b *testing.B) { - v := FileInfo{} +func BenchmarkEncodeDiskInfo(b *testing.B) { + v := DiskInfo{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1009,8 +1009,8 @@ func BenchmarkEncodeFileInfo(b *testing.B) { en.Flush() } -func BenchmarkDecodeFileInfo(b *testing.B) { - v := FileInfo{} +func BenchmarkDecodeDiskInfo(b *testing.B) { + v := DiskInfo{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1026,8 +1026,8 @@ func BenchmarkDecodeFileInfo(b *testing.B) { } } -func TestMarshalUnmarshalFileInfoVersions(t *testing.T) { - v := FileInfoVersions{} +func TestMarshalUnmarshalDiskInfoOptions(t *testing.T) { + v := DiskInfoOptions{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1049,8 +1049,8 @@ func TestMarshalUnmarshalFileInfoVersions(t *testing.T) { } } -func BenchmarkMarshalMsgFileInfoVersions(b *testing.B) { - v := FileInfoVersions{} +func BenchmarkMarshalMsgDiskInfoOptions(b *testing.B) { + v := DiskInfoOptions{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1058,8 +1058,8 @@ func BenchmarkMarshalMsgFileInfoVersions(b *testing.B) { } } -func BenchmarkAppendMsgFileInfoVersions(b *testing.B) { - v := FileInfoVersions{} +func BenchmarkAppendMsgDiskInfoOptions(b *testing.B) { + v := DiskInfoOptions{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1070,8 +1070,8 @@ func BenchmarkAppendMsgFileInfoVersions(b *testing.B) { } } -func BenchmarkUnmarshalFileInfoVersions(b *testing.B) { - v := FileInfoVersions{} +func BenchmarkUnmarshalDiskInfoOptions(b *testing.B) { + v := DiskInfoOptions{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1084,17 +1084,17 @@ func BenchmarkUnmarshalFileInfoVersions(b *testing.B) { } } -func TestEncodeDecodeFileInfoVersions(t *testing.T) { - v := FileInfoVersions{} +func TestEncodeDecodeDiskInfoOptions(t *testing.T) { + v := DiskInfoOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeFileInfoVersions Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDiskInfoOptions Msgsize() is inaccurate") } - vn := FileInfoVersions{} + vn := DiskInfoOptions{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1108,8 +1108,8 @@ func TestEncodeDecodeFileInfoVersions(t *testing.T) { } } -func BenchmarkEncodeFileInfoVersions(b *testing.B) { - v := FileInfoVersions{} +func BenchmarkEncodeDiskInfoOptions(b *testing.B) { + v := DiskInfoOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1122,8 +1122,8 @@ func BenchmarkEncodeFileInfoVersions(b *testing.B) { en.Flush() } -func BenchmarkDecodeFileInfoVersions(b *testing.B) { - v := FileInfoVersions{} +func BenchmarkDecodeDiskInfoOptions(b *testing.B) { + v := DiskInfoOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1139,8 +1139,8 @@ func BenchmarkDecodeFileInfoVersions(b *testing.B) { } } -func TestMarshalUnmarshalFilesInfo(t *testing.T) { - v := FilesInfo{} +func TestMarshalUnmarshalDiskMetrics(t *testing.T) { + v := DiskMetrics{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1162,8 +1162,8 @@ func TestMarshalUnmarshalFilesInfo(t *testing.T) { } } -func BenchmarkMarshalMsgFilesInfo(b *testing.B) { - v := FilesInfo{} +func BenchmarkMarshalMsgDiskMetrics(b *testing.B) { + v := DiskMetrics{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1171,8 +1171,8 @@ func BenchmarkMarshalMsgFilesInfo(b *testing.B) { } } -func BenchmarkAppendMsgFilesInfo(b *testing.B) { - v := FilesInfo{} +func BenchmarkAppendMsgDiskMetrics(b *testing.B) { + v := DiskMetrics{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1183,8 +1183,8 @@ func BenchmarkAppendMsgFilesInfo(b *testing.B) { } } -func BenchmarkUnmarshalFilesInfo(b *testing.B) { - v := FilesInfo{} +func BenchmarkUnmarshalDiskMetrics(b *testing.B) { + v := DiskMetrics{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1197,17 +1197,17 @@ func BenchmarkUnmarshalFilesInfo(b *testing.B) { } } -func TestEncodeDecodeFilesInfo(t *testing.T) { - v := FilesInfo{} +func TestEncodeDecodeDiskMetrics(t *testing.T) { + v := DiskMetrics{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeFilesInfo Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeDiskMetrics Msgsize() is inaccurate") } - vn := FilesInfo{} + vn := DiskMetrics{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1221,8 +1221,8 @@ func TestEncodeDecodeFilesInfo(t *testing.T) { } } -func BenchmarkEncodeFilesInfo(b *testing.B) { - v := FilesInfo{} +func BenchmarkEncodeDiskMetrics(b *testing.B) { + v := DiskMetrics{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1235,8 +1235,8 @@ func BenchmarkEncodeFilesInfo(b *testing.B) { en.Flush() } -func BenchmarkDecodeFilesInfo(b *testing.B) { - v := FilesInfo{} +func BenchmarkDecodeDiskMetrics(b *testing.B) { + v := DiskMetrics{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1252,8 +1252,8 @@ func BenchmarkDecodeFilesInfo(b *testing.B) { } } -func TestMarshalUnmarshalListDirResult(t *testing.T) { - v := ListDirResult{} +func TestMarshalUnmarshalFileInfo(t *testing.T) { + v := FileInfo{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1275,8 +1275,8 @@ func TestMarshalUnmarshalListDirResult(t *testing.T) { } } -func BenchmarkMarshalMsgListDirResult(b *testing.B) { - v := ListDirResult{} +func BenchmarkMarshalMsgFileInfo(b *testing.B) { + v := FileInfo{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1284,8 +1284,347 @@ func BenchmarkMarshalMsgListDirResult(b *testing.B) { } } -func BenchmarkAppendMsgListDirResult(b *testing.B) { - v := ListDirResult{} +func BenchmarkAppendMsgFileInfo(b *testing.B) { + v := FileInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFileInfo(b *testing.B) { + v := FileInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeFileInfo(t *testing.T) { + v := FileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeFileInfo Msgsize() is inaccurate") + } + + vn := FileInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeFileInfo(b *testing.B) { + v := FileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeFileInfo(b *testing.B) { + v := FileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalFileInfoVersions(t *testing.T) { + v := FileInfoVersions{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgFileInfoVersions(b *testing.B) { + v := FileInfoVersions{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgFileInfoVersions(b *testing.B) { + v := FileInfoVersions{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFileInfoVersions(b *testing.B) { + v := FileInfoVersions{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeFileInfoVersions(t *testing.T) { + v := FileInfoVersions{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeFileInfoVersions Msgsize() is inaccurate") + } + + vn := FileInfoVersions{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeFileInfoVersions(b *testing.B) { + v := FileInfoVersions{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeFileInfoVersions(b *testing.B) { + v := FileInfoVersions{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalFilesInfo(t *testing.T) { + v := FilesInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgFilesInfo(b *testing.B) { + v := FilesInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgFilesInfo(b *testing.B) { + v := FilesInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalFilesInfo(b *testing.B) { + v := FilesInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeFilesInfo(t *testing.T) { + v := FilesInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeFilesInfo Msgsize() is inaccurate") + } + + vn := FilesInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeFilesInfo(b *testing.B) { + v := FilesInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeFilesInfo(b *testing.B) { + v := FilesInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalListDirResult(t *testing.T) { + v := ListDirResult{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgListDirResult(b *testing.B) { + v := ListDirResult{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgListDirResult(b *testing.B) { + v := ListDirResult{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1310,17 +1649,243 @@ func BenchmarkUnmarshalListDirResult(b *testing.B) { } } -func TestEncodeDecodeListDirResult(t *testing.T) { - v := ListDirResult{} +func TestEncodeDecodeListDirResult(t *testing.T) { + v := ListDirResult{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeListDirResult Msgsize() is inaccurate") + } + + vn := ListDirResult{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeListDirResult(b *testing.B) { + v := ListDirResult{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeListDirResult(b *testing.B) { + v := ListDirResult{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) { + v := LocalDiskIDs{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgLocalDiskIDs(b *testing.B) { + v := LocalDiskIDs{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgLocalDiskIDs(b *testing.B) { + v := LocalDiskIDs{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalLocalDiskIDs(b *testing.B) { + v := LocalDiskIDs{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeLocalDiskIDs(t *testing.T) { + v := LocalDiskIDs{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeLocalDiskIDs Msgsize() is inaccurate") + } + + vn := LocalDiskIDs{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeLocalDiskIDs(b *testing.B) { + v := LocalDiskIDs{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeLocalDiskIDs(b *testing.B) { + v := LocalDiskIDs{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetadataHandlerParams(t *testing.T) { + v := MetadataHandlerParams{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetadataHandlerParams(b *testing.B) { + v := MetadataHandlerParams{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetadataHandlerParams(b *testing.B) { + v := MetadataHandlerParams{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetadataHandlerParams(b *testing.B) { + v := MetadataHandlerParams{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetadataHandlerParams(t *testing.T) { + v := MetadataHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeListDirResult Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeMetadataHandlerParams Msgsize() is inaccurate") } - vn := ListDirResult{} + vn := MetadataHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1334,8 +1899,8 @@ func TestEncodeDecodeListDirResult(t *testing.T) { } } -func BenchmarkEncodeListDirResult(b *testing.B) { - v := ListDirResult{} +func BenchmarkEncodeMetadataHandlerParams(b *testing.B) { + v := MetadataHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1348,8 +1913,234 @@ func BenchmarkEncodeListDirResult(b *testing.B) { en.Flush() } -func BenchmarkDecodeListDirResult(b *testing.B) { - v := ListDirResult{} +func BenchmarkDecodeMetadataHandlerParams(b *testing.B) { + v := MetadataHandlerParams{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalRawFileInfo(t *testing.T) { + v := RawFileInfo{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgRawFileInfo(b *testing.B) { + v := RawFileInfo{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgRawFileInfo(b *testing.B) { + v := RawFileInfo{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalRawFileInfo(b *testing.B) { + v := RawFileInfo{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeRawFileInfo(t *testing.T) { + v := RawFileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeRawFileInfo Msgsize() is inaccurate") + } + + vn := RawFileInfo{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeRawFileInfo(b *testing.B) { + v := RawFileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeRawFileInfo(b *testing.B) { + v := RawFileInfo{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalReadAllHandlerParams(t *testing.T) { + v := ReadAllHandlerParams{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgReadAllHandlerParams(b *testing.B) { + v := ReadAllHandlerParams{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgReadAllHandlerParams(b *testing.B) { + v := ReadAllHandlerParams{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalReadAllHandlerParams(b *testing.B) { + v := ReadAllHandlerParams{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeReadAllHandlerParams(t *testing.T) { + v := ReadAllHandlerParams{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodeReadAllHandlerParams Msgsize() is inaccurate") + } + + vn := ReadAllHandlerParams{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeReadAllHandlerParams(b *testing.B) { + v := ReadAllHandlerParams{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeReadAllHandlerParams(b *testing.B) { + v := ReadAllHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1365,8 +2156,8 @@ func BenchmarkDecodeListDirResult(b *testing.B) { } } -func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) { - v := LocalDiskIDs{} +func TestMarshalUnmarshalReadMultipleReq(t *testing.T) { + v := ReadMultipleReq{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1388,8 +2179,8 @@ func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) { } } -func BenchmarkMarshalMsgLocalDiskIDs(b *testing.B) { - v := LocalDiskIDs{} +func BenchmarkMarshalMsgReadMultipleReq(b *testing.B) { + v := ReadMultipleReq{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1397,8 +2188,8 @@ func BenchmarkMarshalMsgLocalDiskIDs(b *testing.B) { } } -func BenchmarkAppendMsgLocalDiskIDs(b *testing.B) { - v := LocalDiskIDs{} +func BenchmarkAppendMsgReadMultipleReq(b *testing.B) { + v := ReadMultipleReq{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1409,8 +2200,8 @@ func BenchmarkAppendMsgLocalDiskIDs(b *testing.B) { } } -func BenchmarkUnmarshalLocalDiskIDs(b *testing.B) { - v := LocalDiskIDs{} +func BenchmarkUnmarshalReadMultipleReq(b *testing.B) { + v := ReadMultipleReq{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1423,17 +2214,17 @@ func BenchmarkUnmarshalLocalDiskIDs(b *testing.B) { } } -func TestEncodeDecodeLocalDiskIDs(t *testing.T) { - v := LocalDiskIDs{} +func TestEncodeDecodeReadMultipleReq(t *testing.T) { + v := ReadMultipleReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeLocalDiskIDs Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeReadMultipleReq Msgsize() is inaccurate") } - vn := LocalDiskIDs{} + vn := ReadMultipleReq{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1447,8 +2238,8 @@ func TestEncodeDecodeLocalDiskIDs(t *testing.T) { } } -func BenchmarkEncodeLocalDiskIDs(b *testing.B) { - v := LocalDiskIDs{} +func BenchmarkEncodeReadMultipleReq(b *testing.B) { + v := ReadMultipleReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1461,8 +2252,8 @@ func BenchmarkEncodeLocalDiskIDs(b *testing.B) { en.Flush() } -func BenchmarkDecodeLocalDiskIDs(b *testing.B) { - v := LocalDiskIDs{} +func BenchmarkDecodeReadMultipleReq(b *testing.B) { + v := ReadMultipleReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1478,8 +2269,8 @@ func BenchmarkDecodeLocalDiskIDs(b *testing.B) { } } -func TestMarshalUnmarshalMetadataHandlerParams(t *testing.T) { - v := MetadataHandlerParams{} +func TestMarshalUnmarshalReadMultipleResp(t *testing.T) { + v := ReadMultipleResp{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1501,8 +2292,8 @@ func TestMarshalUnmarshalMetadataHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgMetadataHandlerParams(b *testing.B) { - v := MetadataHandlerParams{} +func BenchmarkMarshalMsgReadMultipleResp(b *testing.B) { + v := ReadMultipleResp{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1510,8 +2301,8 @@ func BenchmarkMarshalMsgMetadataHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgMetadataHandlerParams(b *testing.B) { - v := MetadataHandlerParams{} +func BenchmarkAppendMsgReadMultipleResp(b *testing.B) { + v := ReadMultipleResp{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1522,8 +2313,8 @@ func BenchmarkAppendMsgMetadataHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalMetadataHandlerParams(b *testing.B) { - v := MetadataHandlerParams{} +func BenchmarkUnmarshalReadMultipleResp(b *testing.B) { + v := ReadMultipleResp{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1536,17 +2327,17 @@ func BenchmarkUnmarshalMetadataHandlerParams(b *testing.B) { } } -func TestEncodeDecodeMetadataHandlerParams(t *testing.T) { - v := MetadataHandlerParams{} +func TestEncodeDecodeReadMultipleResp(t *testing.T) { + v := ReadMultipleResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeMetadataHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeReadMultipleResp Msgsize() is inaccurate") } - vn := MetadataHandlerParams{} + vn := ReadMultipleResp{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1560,8 +2351,8 @@ func TestEncodeDecodeMetadataHandlerParams(t *testing.T) { } } -func BenchmarkEncodeMetadataHandlerParams(b *testing.B) { - v := MetadataHandlerParams{} +func BenchmarkEncodeReadMultipleResp(b *testing.B) { + v := ReadMultipleResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1574,8 +2365,8 @@ func BenchmarkEncodeMetadataHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeMetadataHandlerParams(b *testing.B) { - v := MetadataHandlerParams{} +func BenchmarkDecodeReadMultipleResp(b *testing.B) { + v := ReadMultipleResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1591,8 +2382,8 @@ func BenchmarkDecodeMetadataHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalRawFileInfo(t *testing.T) { - v := RawFileInfo{} +func TestMarshalUnmarshalReadPartsReq(t *testing.T) { + v := ReadPartsReq{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1614,8 +2405,8 @@ func TestMarshalUnmarshalRawFileInfo(t *testing.T) { } } -func BenchmarkMarshalMsgRawFileInfo(b *testing.B) { - v := RawFileInfo{} +func BenchmarkMarshalMsgReadPartsReq(b *testing.B) { + v := ReadPartsReq{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1623,8 +2414,8 @@ func BenchmarkMarshalMsgRawFileInfo(b *testing.B) { } } -func BenchmarkAppendMsgRawFileInfo(b *testing.B) { - v := RawFileInfo{} +func BenchmarkAppendMsgReadPartsReq(b *testing.B) { + v := ReadPartsReq{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1635,8 +2426,8 @@ func BenchmarkAppendMsgRawFileInfo(b *testing.B) { } } -func BenchmarkUnmarshalRawFileInfo(b *testing.B) { - v := RawFileInfo{} +func BenchmarkUnmarshalReadPartsReq(b *testing.B) { + v := ReadPartsReq{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1649,17 +2440,17 @@ func BenchmarkUnmarshalRawFileInfo(b *testing.B) { } } -func TestEncodeDecodeRawFileInfo(t *testing.T) { - v := RawFileInfo{} +func TestEncodeDecodeReadPartsReq(t *testing.T) { + v := ReadPartsReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRawFileInfo Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeReadPartsReq Msgsize() is inaccurate") } - vn := RawFileInfo{} + vn := ReadPartsReq{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1673,8 +2464,8 @@ func TestEncodeDecodeRawFileInfo(t *testing.T) { } } -func BenchmarkEncodeRawFileInfo(b *testing.B) { - v := RawFileInfo{} +func BenchmarkEncodeReadPartsReq(b *testing.B) { + v := ReadPartsReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1687,8 +2478,8 @@ func BenchmarkEncodeRawFileInfo(b *testing.B) { en.Flush() } -func BenchmarkDecodeRawFileInfo(b *testing.B) { - v := RawFileInfo{} +func BenchmarkDecodeReadPartsReq(b *testing.B) { + v := ReadPartsReq{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1704,8 +2495,8 @@ func BenchmarkDecodeRawFileInfo(b *testing.B) { } } -func TestMarshalUnmarshalReadAllHandlerParams(t *testing.T) { - v := ReadAllHandlerParams{} +func TestMarshalUnmarshalReadPartsResp(t *testing.T) { + v := ReadPartsResp{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1727,8 +2518,8 @@ func TestMarshalUnmarshalReadAllHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgReadAllHandlerParams(b *testing.B) { - v := ReadAllHandlerParams{} +func BenchmarkMarshalMsgReadPartsResp(b *testing.B) { + v := ReadPartsResp{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1736,8 +2527,8 @@ func BenchmarkMarshalMsgReadAllHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgReadAllHandlerParams(b *testing.B) { - v := ReadAllHandlerParams{} +func BenchmarkAppendMsgReadPartsResp(b *testing.B) { + v := ReadPartsResp{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1748,8 +2539,8 @@ func BenchmarkAppendMsgReadAllHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalReadAllHandlerParams(b *testing.B) { - v := ReadAllHandlerParams{} +func BenchmarkUnmarshalReadPartsResp(b *testing.B) { + v := ReadPartsResp{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1762,17 +2553,17 @@ func BenchmarkUnmarshalReadAllHandlerParams(b *testing.B) { } } -func TestEncodeDecodeReadAllHandlerParams(t *testing.T) { - v := ReadAllHandlerParams{} +func TestEncodeDecodeReadPartsResp(t *testing.T) { + v := ReadPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeReadAllHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeReadPartsResp Msgsize() is inaccurate") } - vn := ReadAllHandlerParams{} + vn := ReadPartsResp{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1786,8 +2577,8 @@ func TestEncodeDecodeReadAllHandlerParams(t *testing.T) { } } -func BenchmarkEncodeReadAllHandlerParams(b *testing.B) { - v := ReadAllHandlerParams{} +func BenchmarkEncodeReadPartsResp(b *testing.B) { + v := ReadPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1800,8 +2591,8 @@ func BenchmarkEncodeReadAllHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeReadAllHandlerParams(b *testing.B) { - v := ReadAllHandlerParams{} +func BenchmarkDecodeReadPartsResp(b *testing.B) { + v := ReadPartsResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1817,8 +2608,8 @@ func BenchmarkDecodeReadAllHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalReadMultipleReq(t *testing.T) { - v := ReadMultipleReq{} +func TestMarshalUnmarshalRenameDataHandlerParams(t *testing.T) { + v := RenameDataHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1840,8 +2631,8 @@ func TestMarshalUnmarshalReadMultipleReq(t *testing.T) { } } -func BenchmarkMarshalMsgReadMultipleReq(b *testing.B) { - v := ReadMultipleReq{} +func BenchmarkMarshalMsgRenameDataHandlerParams(b *testing.B) { + v := RenameDataHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1849,8 +2640,8 @@ func BenchmarkMarshalMsgReadMultipleReq(b *testing.B) { } } -func BenchmarkAppendMsgReadMultipleReq(b *testing.B) { - v := ReadMultipleReq{} +func BenchmarkAppendMsgRenameDataHandlerParams(b *testing.B) { + v := RenameDataHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1861,8 +2652,8 @@ func BenchmarkAppendMsgReadMultipleReq(b *testing.B) { } } -func BenchmarkUnmarshalReadMultipleReq(b *testing.B) { - v := ReadMultipleReq{} +func BenchmarkUnmarshalRenameDataHandlerParams(b *testing.B) { + v := RenameDataHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1875,17 +2666,17 @@ func BenchmarkUnmarshalReadMultipleReq(b *testing.B) { } } -func TestEncodeDecodeReadMultipleReq(t *testing.T) { - v := ReadMultipleReq{} +func TestEncodeDecodeRenameDataHandlerParams(t *testing.T) { + v := RenameDataHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeReadMultipleReq Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenameDataHandlerParams Msgsize() is inaccurate") } - vn := ReadMultipleReq{} + vn := RenameDataHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -1899,8 +2690,8 @@ func TestEncodeDecodeReadMultipleReq(t *testing.T) { } } -func BenchmarkEncodeReadMultipleReq(b *testing.B) { - v := ReadMultipleReq{} +func BenchmarkEncodeRenameDataHandlerParams(b *testing.B) { + v := RenameDataHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1913,8 +2704,8 @@ func BenchmarkEncodeReadMultipleReq(b *testing.B) { en.Flush() } -func BenchmarkDecodeReadMultipleReq(b *testing.B) { - v := ReadMultipleReq{} +func BenchmarkDecodeRenameDataHandlerParams(b *testing.B) { + v := RenameDataHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -1930,8 +2721,8 @@ func BenchmarkDecodeReadMultipleReq(b *testing.B) { } } -func TestMarshalUnmarshalReadMultipleResp(t *testing.T) { - v := ReadMultipleResp{} +func TestMarshalUnmarshalRenameDataInlineHandlerParams(t *testing.T) { + v := RenameDataInlineHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -1953,8 +2744,8 @@ func TestMarshalUnmarshalReadMultipleResp(t *testing.T) { } } -func BenchmarkMarshalMsgReadMultipleResp(b *testing.B) { - v := ReadMultipleResp{} +func BenchmarkMarshalMsgRenameDataInlineHandlerParams(b *testing.B) { + v := RenameDataInlineHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1962,8 +2753,8 @@ func BenchmarkMarshalMsgReadMultipleResp(b *testing.B) { } } -func BenchmarkAppendMsgReadMultipleResp(b *testing.B) { - v := ReadMultipleResp{} +func BenchmarkAppendMsgRenameDataInlineHandlerParams(b *testing.B) { + v := RenameDataInlineHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -1974,8 +2765,8 @@ func BenchmarkAppendMsgReadMultipleResp(b *testing.B) { } } -func BenchmarkUnmarshalReadMultipleResp(b *testing.B) { - v := ReadMultipleResp{} +func BenchmarkUnmarshalRenameDataInlineHandlerParams(b *testing.B) { + v := RenameDataInlineHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -1988,17 +2779,17 @@ func BenchmarkUnmarshalReadMultipleResp(b *testing.B) { } } -func TestEncodeDecodeReadMultipleResp(t *testing.T) { - v := ReadMultipleResp{} +func TestEncodeDecodeRenameDataInlineHandlerParams(t *testing.T) { + v := RenameDataInlineHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeReadMultipleResp Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenameDataInlineHandlerParams Msgsize() is inaccurate") } - vn := ReadMultipleResp{} + vn := RenameDataInlineHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -2012,8 +2803,8 @@ func TestEncodeDecodeReadMultipleResp(t *testing.T) { } } -func BenchmarkEncodeReadMultipleResp(b *testing.B) { - v := ReadMultipleResp{} +func BenchmarkEncodeRenameDataInlineHandlerParams(b *testing.B) { + v := RenameDataInlineHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2026,8 +2817,8 @@ func BenchmarkEncodeReadMultipleResp(b *testing.B) { en.Flush() } -func BenchmarkDecodeReadMultipleResp(b *testing.B) { - v := ReadMultipleResp{} +func BenchmarkDecodeRenameDataInlineHandlerParams(b *testing.B) { + v := RenameDataInlineHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2043,8 +2834,8 @@ func BenchmarkDecodeReadMultipleResp(b *testing.B) { } } -func TestMarshalUnmarshalRenameDataHandlerParams(t *testing.T) { - v := RenameDataHandlerParams{} +func TestMarshalUnmarshalRenameDataResp(t *testing.T) { + v := RenameDataResp{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -2066,8 +2857,8 @@ func TestMarshalUnmarshalRenameDataHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgRenameDataHandlerParams(b *testing.B) { - v := RenameDataHandlerParams{} +func BenchmarkMarshalMsgRenameDataResp(b *testing.B) { + v := RenameDataResp{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -2075,8 +2866,8 @@ func BenchmarkMarshalMsgRenameDataHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgRenameDataHandlerParams(b *testing.B) { - v := RenameDataHandlerParams{} +func BenchmarkAppendMsgRenameDataResp(b *testing.B) { + v := RenameDataResp{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -2087,8 +2878,8 @@ func BenchmarkAppendMsgRenameDataHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalRenameDataHandlerParams(b *testing.B) { - v := RenameDataHandlerParams{} +func BenchmarkUnmarshalRenameDataResp(b *testing.B) { + v := RenameDataResp{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -2101,17 +2892,17 @@ func BenchmarkUnmarshalRenameDataHandlerParams(b *testing.B) { } } -func TestEncodeDecodeRenameDataHandlerParams(t *testing.T) { - v := RenameDataHandlerParams{} +func TestEncodeDecodeRenameDataResp(t *testing.T) { + v := RenameDataResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRenameDataHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenameDataResp Msgsize() is inaccurate") } - vn := RenameDataHandlerParams{} + vn := RenameDataResp{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -2125,8 +2916,8 @@ func TestEncodeDecodeRenameDataHandlerParams(t *testing.T) { } } -func BenchmarkEncodeRenameDataHandlerParams(b *testing.B) { - v := RenameDataHandlerParams{} +func BenchmarkEncodeRenameDataResp(b *testing.B) { + v := RenameDataResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2139,8 +2930,8 @@ func BenchmarkEncodeRenameDataHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeRenameDataHandlerParams(b *testing.B) { - v := RenameDataHandlerParams{} +func BenchmarkDecodeRenameDataResp(b *testing.B) { + v := RenameDataResp{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2156,8 +2947,8 @@ func BenchmarkDecodeRenameDataHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalRenameDataResp(t *testing.T) { - v := RenameDataResp{} +func TestMarshalUnmarshalRenameFileHandlerParams(t *testing.T) { + v := RenameFileHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -2179,8 +2970,8 @@ func TestMarshalUnmarshalRenameDataResp(t *testing.T) { } } -func BenchmarkMarshalMsgRenameDataResp(b *testing.B) { - v := RenameDataResp{} +func BenchmarkMarshalMsgRenameFileHandlerParams(b *testing.B) { + v := RenameFileHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -2188,8 +2979,8 @@ func BenchmarkMarshalMsgRenameDataResp(b *testing.B) { } } -func BenchmarkAppendMsgRenameDataResp(b *testing.B) { - v := RenameDataResp{} +func BenchmarkAppendMsgRenameFileHandlerParams(b *testing.B) { + v := RenameFileHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -2200,8 +2991,8 @@ func BenchmarkAppendMsgRenameDataResp(b *testing.B) { } } -func BenchmarkUnmarshalRenameDataResp(b *testing.B) { - v := RenameDataResp{} +func BenchmarkUnmarshalRenameFileHandlerParams(b *testing.B) { + v := RenameFileHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -2214,17 +3005,17 @@ func BenchmarkUnmarshalRenameDataResp(b *testing.B) { } } -func TestEncodeDecodeRenameDataResp(t *testing.T) { - v := RenameDataResp{} +func TestEncodeDecodeRenameFileHandlerParams(t *testing.T) { + v := RenameFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRenameDataResp Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenameFileHandlerParams Msgsize() is inaccurate") } - vn := RenameDataResp{} + vn := RenameFileHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -2238,8 +3029,8 @@ func TestEncodeDecodeRenameDataResp(t *testing.T) { } } -func BenchmarkEncodeRenameDataResp(b *testing.B) { - v := RenameDataResp{} +func BenchmarkEncodeRenameFileHandlerParams(b *testing.B) { + v := RenameFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2252,8 +3043,8 @@ func BenchmarkEncodeRenameDataResp(b *testing.B) { en.Flush() } -func BenchmarkDecodeRenameDataResp(b *testing.B) { - v := RenameDataResp{} +func BenchmarkDecodeRenameFileHandlerParams(b *testing.B) { + v := RenameFileHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2269,8 +3060,8 @@ func BenchmarkDecodeRenameDataResp(b *testing.B) { } } -func TestMarshalUnmarshalRenameFileHandlerParams(t *testing.T) { - v := RenameFileHandlerParams{} +func TestMarshalUnmarshalRenameOptions(t *testing.T) { + v := RenameOptions{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -2292,8 +3083,8 @@ func TestMarshalUnmarshalRenameFileHandlerParams(t *testing.T) { } } -func BenchmarkMarshalMsgRenameFileHandlerParams(b *testing.B) { - v := RenameFileHandlerParams{} +func BenchmarkMarshalMsgRenameOptions(b *testing.B) { + v := RenameOptions{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -2301,8 +3092,8 @@ func BenchmarkMarshalMsgRenameFileHandlerParams(b *testing.B) { } } -func BenchmarkAppendMsgRenameFileHandlerParams(b *testing.B) { - v := RenameFileHandlerParams{} +func BenchmarkAppendMsgRenameOptions(b *testing.B) { + v := RenameOptions{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -2313,8 +3104,8 @@ func BenchmarkAppendMsgRenameFileHandlerParams(b *testing.B) { } } -func BenchmarkUnmarshalRenameFileHandlerParams(b *testing.B) { - v := RenameFileHandlerParams{} +func BenchmarkUnmarshalRenameOptions(b *testing.B) { + v := RenameOptions{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -2327,17 +3118,17 @@ func BenchmarkUnmarshalRenameFileHandlerParams(b *testing.B) { } } -func TestEncodeDecodeRenameFileHandlerParams(t *testing.T) { - v := RenameFileHandlerParams{} +func TestEncodeDecodeRenameOptions(t *testing.T) { + v := RenameOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRenameFileHandlerParams Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenameOptions Msgsize() is inaccurate") } - vn := RenameFileHandlerParams{} + vn := RenameOptions{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -2351,8 +3142,8 @@ func TestEncodeDecodeRenameFileHandlerParams(t *testing.T) { } } -func BenchmarkEncodeRenameFileHandlerParams(b *testing.B) { - v := RenameFileHandlerParams{} +func BenchmarkEncodeRenameOptions(b *testing.B) { + v := RenameOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2365,8 +3156,8 @@ func BenchmarkEncodeRenameFileHandlerParams(b *testing.B) { en.Flush() } -func BenchmarkDecodeRenameFileHandlerParams(b *testing.B) { - v := RenameFileHandlerParams{} +func BenchmarkDecodeRenameOptions(b *testing.B) { + v := RenameOptions{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2382,8 +3173,8 @@ func BenchmarkDecodeRenameFileHandlerParams(b *testing.B) { } } -func TestMarshalUnmarshalRenameOptions(t *testing.T) { - v := RenameOptions{} +func TestMarshalUnmarshalRenamePartHandlerParams(t *testing.T) { + v := RenamePartHandlerParams{} bts, err := v.MarshalMsg(nil) if err != nil { t.Fatal(err) @@ -2405,8 +3196,8 @@ func TestMarshalUnmarshalRenameOptions(t *testing.T) { } } -func BenchmarkMarshalMsgRenameOptions(b *testing.B) { - v := RenameOptions{} +func BenchmarkMarshalMsgRenamePartHandlerParams(b *testing.B) { + v := RenamePartHandlerParams{} b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -2414,8 +3205,8 @@ func BenchmarkMarshalMsgRenameOptions(b *testing.B) { } } -func BenchmarkAppendMsgRenameOptions(b *testing.B) { - v := RenameOptions{} +func BenchmarkAppendMsgRenamePartHandlerParams(b *testing.B) { + v := RenamePartHandlerParams{} bts := make([]byte, 0, v.Msgsize()) bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) @@ -2426,8 +3217,8 @@ func BenchmarkAppendMsgRenameOptions(b *testing.B) { } } -func BenchmarkUnmarshalRenameOptions(b *testing.B) { - v := RenameOptions{} +func BenchmarkUnmarshalRenamePartHandlerParams(b *testing.B) { + v := RenamePartHandlerParams{} bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) @@ -2440,17 +3231,17 @@ func BenchmarkUnmarshalRenameOptions(b *testing.B) { } } -func TestEncodeDecodeRenameOptions(t *testing.T) { - v := RenameOptions{} +func TestEncodeDecodeRenamePartHandlerParams(t *testing.T) { + v := RenamePartHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) m := v.Msgsize() if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeRenameOptions Msgsize() is inaccurate") + t.Log("WARNING: TestEncodeDecodeRenamePartHandlerParams Msgsize() is inaccurate") } - vn := RenameOptions{} + vn := RenamePartHandlerParams{} err := msgp.Decode(&buf, &vn) if err != nil { t.Error(err) @@ -2464,8 +3255,8 @@ func TestEncodeDecodeRenameOptions(t *testing.T) { } } -func BenchmarkEncodeRenameOptions(b *testing.B) { - v := RenameOptions{} +func BenchmarkEncodeRenamePartHandlerParams(b *testing.B) { + v := RenamePartHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) @@ -2478,8 +3269,8 @@ func BenchmarkEncodeRenameOptions(b *testing.B) { en.Flush() } -func BenchmarkDecodeRenameOptions(b *testing.B) { - v := RenameOptions{} +func BenchmarkDecodeRenamePartHandlerParams(b *testing.B) { + v := RenamePartHandlerParams{} var buf bytes.Buffer msgp.Encode(&buf, &v) b.SetBytes(int64(buf.Len())) diff --git a/cmd/storage-datatypes_test.go b/cmd/storage-datatypes_test.go index 91c3547c0c744..60470a221227e 100644 --- a/cmd/storage-datatypes_test.go +++ b/cmd/storage-datatypes_test.go @@ -39,8 +39,8 @@ func BenchmarkDecodeVolInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -68,8 +68,8 @@ func BenchmarkDecodeDiskInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -97,8 +97,8 @@ func BenchmarkDecodeDiskInfoGOB(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { dec := gob.NewDecoder(bytes.NewBuffer(encoded)) err := dec.Decode(&v) if err != nil { @@ -123,8 +123,8 @@ func BenchmarkEncodeDiskInfoMsgp(b *testing.B) { b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := msgp.Encode(io.Discard, &v) if err != nil { b.Fatal(err) @@ -149,8 +149,8 @@ func BenchmarkEncodeDiskInfoGOB(b *testing.B) { enc := gob.NewEncoder(io.Discard) b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := enc.Encode(&v) if err != nil { b.Fatal(err) @@ -167,8 +167,8 @@ func BenchmarkDecodeFileInfoMsgp(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := v.DecodeMsg(dc) if err != nil { b.Fatal(err) @@ -184,8 +184,8 @@ func BenchmarkDecodeFileInfoGOB(b *testing.B) { b.Log("Size:", buf.Len(), "bytes") b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { dec := gob.NewDecoder(bytes.NewBuffer(encoded)) err := dec.Decode(&v) if err != nil { @@ -198,8 +198,8 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) { v := FileInfo{Volume: "testbucket", Name: "src/compress/zlib/reader_test.go", VersionID: "", IsLatest: true, Deleted: false, DataDir: "5e0153cc-621a-4267-8cb6-4919140d53b3", XLV1: false, ModTime: UTCNow(), Size: 3430, Mode: 0x0, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-Iv": "jIJPsrkkVYYMvc7edBrNl+7zcM7+ZwXqMb/YAjBO/ck=", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "my-minio-key", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAP2p7ZLv3UpLwBnsKkF2mtWba0qoY42tymK0szRgGvAxBNcXyHXYooe9dQpeeEJWgKUa/8R61oCy1mFwIg==", "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfAPFYRDkHVirJBJxBixNj3PLWt78dFuUTyTLIdLG820J7XqLPBO4gpEEEWw/DoTsJIb+apnaem+rKtQ1h3Q==", "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256", "content-type": "application/octet-stream", "etag": "20000f00e2c3709dc94905c6ce31e1cadbd1c064e14acdcd44cf0ac2db777eeedd88d639fcd64de16851ade8b21a9a1a"}, Parts: []ObjectPartInfo{{ETag: "", Number: 1, Size: 3430, ActualSize: 3398}}, Erasure: ErasureInfo{Algorithm: "reedsolomon", DataBlocks: 2, ParityBlocks: 2, BlockSize: 10485760, Index: 3, Distribution: []int{3, 4, 1, 2}, Checksums: []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}}}} b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := msgp.Encode(io.Discard, &v) if err != nil { b.Fatal(err) @@ -212,8 +212,8 @@ func BenchmarkEncodeFileInfoGOB(b *testing.B) { enc := gob.NewEncoder(io.Discard) b.SetBytes(1) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { err := enc.Encode(&v) if err != nil { b.Fatal(err) diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index 5aff111f0b322..b39d7c8ae0567 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -20,8 +20,6 @@ package cmd import ( "context" "errors" - - "github.com/minio/minio/internal/logger" ) // errMaxVersionsExceeded return error beyond 10000 (default) versions per object @@ -176,7 +174,7 @@ func osErrToFileErr(err error) error { return errFaultyDisk } if isSysErrInvalidArg(err) { - logger.LogIf(context.Background(), err) + storageLogIf(context.Background(), err) // For some odd calls with O_DIRECT reads // filesystems can return EINVAL, handle // these as FileNotFound instead. diff --git a/cmd/storage-interface.go b/cmd/storage-interface.go index 504ef862d0f71..3c26504875e9d 100644 --- a/cmd/storage-interface.go +++ b/cmd/storage-interface.go @@ -23,7 +23,6 @@ import ( "time" "github.com/minio/madmin-go/v3" - xioutil "github.com/minio/minio/internal/ioutil" ) // StorageAPI interface. @@ -82,11 +81,12 @@ type StorageAPI interface { // Metadata operations DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) error DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) []error + DeleteBulk(ctx context.Context, volume string, paths ...string) error WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (FileInfo, error) ReadXL(ctx context.Context, volume, path string, readData bool) (RawFileInfo, error) - RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (uint64, error) + RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (RenameDataResp, error) // File operations. ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) ([]string, error) @@ -95,10 +95,12 @@ type StorageAPI interface { CreateFile(ctx context.Context, origvolume, olume, path string, size int64, reader io.Reader) error ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error - CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error + RenamePart(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string, meta []byte, skipParent string) error + CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) Delete(ctx context.Context, volume string, path string, opts DeleteOptions) (err error) - VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error + VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) + ReadParts(ctx context.Context, bucket string, partMetaPaths ...string) ([]*ObjectPartInfo, error) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error CleanAbandonedData(ctx context.Context, volume string, path string) error @@ -109,182 +111,4 @@ type StorageAPI interface { // Read all. ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) GetDiskLoc() (poolIdx, setIdx, diskIdx int) // Retrieve location indexes. - SetDiskLoc(poolIdx, setIdx, diskIdx int) // Set location indexes. - SetFormatData(b []byte) // Set formatData cached value -} - -type unrecognizedDisk struct { - storage StorageAPI -} - -func (p *unrecognizedDisk) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writer) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) String() string { - return p.storage.String() -} - -func (p *unrecognizedDisk) IsOnline() bool { - return false -} - -func (p *unrecognizedDisk) LastConn() time.Time { - return p.storage.LastConn() -} - -func (p *unrecognizedDisk) IsLocal() bool { - return p.storage.IsLocal() -} - -func (p *unrecognizedDisk) Endpoint() Endpoint { - return p.storage.Endpoint() -} - -func (p *unrecognizedDisk) Hostname() string { - return p.storage.Hostname() -} - -func (p *unrecognizedDisk) Healing() *healingTracker { - return nil -} - -func (p *unrecognizedDisk) NSScanner(ctx context.Context, cache dataUsageCache, updates chan<- dataUsageEntry, scanMode madmin.HealScanMode, shouldSleep func() bool) (dataUsageCache, error) { - return dataUsageCache{}, errDiskNotFound -} - -func (p *unrecognizedDisk) SetFormatData(b []byte) { -} - -func (p *unrecognizedDisk) GetDiskLoc() (poolIdx, setIdx, diskIdx int) { - return -1, -1, -1 -} - -func (p *unrecognizedDisk) SetDiskLoc(poolIdx, setIdx, diskIdx int) { -} - -func (p *unrecognizedDisk) Close() error { - return p.storage.Close() -} - -func (p *unrecognizedDisk) GetDiskID() (string, error) { - return "", errDiskNotFound -} - -func (p *unrecognizedDisk) SetDiskID(id string) { -} - -func (p *unrecognizedDisk) DiskInfo(ctx context.Context, _ DiskInfoOptions) (info DiskInfo, err error) { - return info, errDiskNotFound -} - -func (p *unrecognizedDisk) MakeVolBulk(ctx context.Context, volumes ...string) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) MakeVol(ctx context.Context, volume string) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) ListVols(ctx context.Context) ([]VolInfo, error) { - return nil, errDiskNotFound -} - -func (p *unrecognizedDisk) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) { - return vol, errDiskNotFound -} - -func (p *unrecognizedDisk) DeleteVol(ctx context.Context, volume string, forceDelete bool) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) ([]string, error) { - return nil, errDiskNotFound -} - -func (p *unrecognizedDisk) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { - return 0, errDiskNotFound -} - -func (p *unrecognizedDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error { - return errDiskNotFound -} - -func (p *unrecognizedDisk) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) { - return nil, errDiskNotFound -} - -func (p *unrecognizedDisk) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error { - return errDiskNotFound -} - -func (p *unrecognizedDisk) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (uint64, error) { - return 0, errDiskNotFound -} - -func (p *unrecognizedDisk) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) Delete(ctx context.Context, volume string, path string, opts DeleteOptions) (err error) { - return errDiskNotFound -} - -// DeleteVersions deletes slice of versions, it can be same object or multiple objects. -func (p *unrecognizedDisk) DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) (errs []error) { - errs = make([]error, len(versions)) - - for i := range errs { - errs[i] = errDiskNotFound - } - return errs -} - -func (p *unrecognizedDisk) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error { - return errDiskNotFound -} - -func (p *unrecognizedDisk) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) { - return errDiskNotFound -} - -func (p *unrecognizedDisk) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { - return fi, errDiskNotFound -} - -func (p *unrecognizedDisk) ReadXL(ctx context.Context, volume, path string, readData bool) (rf RawFileInfo, err error) { - return rf, errDiskNotFound -} - -func (p *unrecognizedDisk) ReadAll(ctx context.Context, volume string, path string) (buf []byte, err error) { - return nil, errDiskNotFound -} - -func (p *unrecognizedDisk) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) { - return nil, errDiskNotFound -} - -func (p *unrecognizedDisk) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp chan<- ReadMultipleResp) error { - xioutil.SafeClose(resp) - return errDiskNotFound -} - -func (p *unrecognizedDisk) CleanAbandonedData(ctx context.Context, volume string, path string) error { - return errDiskNotFound } diff --git a/cmd/storage-rest-client.go b/cmd/storage-rest-client.go index 44c8b599375df..d3c17a17fde06 100644 --- a/cmd/storage-rest-client.go +++ b/cmd/storage-rest-client.go @@ -20,7 +20,6 @@ package cmd import ( "bytes" "context" - "encoding/gob" "encoding/hex" "errors" "fmt" @@ -30,17 +29,17 @@ import ( "path" "strconv" "strings" - "sync" + "sync/atomic" "time" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/grid" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/rest" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" xbufio "github.com/philhofer/fwd" "github.com/tinylib/msgp/msgp" ) @@ -54,6 +53,9 @@ func isNetworkError(err error) bool { if down := xnet.IsNetworkOrHostDown(nerr.Err, false); down { return true } + if errors.Is(nerr.Err, rest.ErrClientClosed) { + return true + } } if errors.Is(err, grid.ErrDisconnected) { return true @@ -61,7 +63,7 @@ func isNetworkError(err error) bool { // More corner cases suitable for storage REST API switch { // A peer node can be in shut down phase and proactively - // return 503 server closed error,consider it as an offline node + // return 503 server closed error, consider it as an offline node case strings.Contains(err.Error(), http.ErrServerClosed.Error()): return true // Corner case, the server closed the connection with a keep-alive timeout @@ -86,6 +88,8 @@ func toStorageErr(err error) error { } switch err.Error() { + case errUploadIDNotFound.Error(): + return errUploadIDNotFound case errFaultyDisk.Error(): return errFaultyDisk case errFaultyRemoteDisk.Error(): @@ -156,46 +160,45 @@ func toStorageErr(err error) error { // Abstracts a remote disk. type storageRESTClient struct { - endpoint Endpoint - restClient *rest.Client - gridConn *grid.Subroute - diskID string - formatData []byte - formatMutex sync.RWMutex + endpoint Endpoint + restClient *rest.Client + gridConn *grid.Subroute + diskID atomic.Pointer[string] diskInfoCache *cachevalue.Cache[DiskInfo] - - // Indexes, will be -1 until assigned a set. - poolIndex, setIndex, diskIndex int } // Retrieve location indexes. func (client *storageRESTClient) GetDiskLoc() (poolIdx, setIdx, diskIdx int) { - return client.poolIndex, client.setIndex, client.diskIndex + return client.endpoint.PoolIdx, client.endpoint.SetIdx, client.endpoint.DiskIdx } -// Set location indexes. -func (client *storageRESTClient) SetDiskLoc(poolIdx, setIdx, diskIdx int) { - client.poolIndex = poolIdx - client.setIndex = setIdx - client.diskIndex = diskIdx +// Wrapper to restClient.CallWithMethod to handle network errors, in case of network error the connection is disconnected +// and a healthcheck routine gets invoked that would reconnect. +func (client *storageRESTClient) callGet(ctx context.Context, rpcMethod string, values url.Values, body io.Reader, length int64) (io.ReadCloser, error) { + if values == nil { + values = make(url.Values) + } + values.Set(storageRESTDiskID, *client.diskID.Load()) + respBody, err := client.restClient.CallWithHTTPMethod(ctx, http.MethodGet, rpcMethod, values, body, length) + if err != nil { + return nil, toStorageErr(err) + } + return respBody, nil } -// Wrapper to restClient.Call to handle network errors, in case of network error the connection is makred disconnected -// permanently. The only way to restore the storage connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints() -// after verifying format.json -func (client *storageRESTClient) call(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (io.ReadCloser, error) { +// Wrapper to restClient.Call to handle network errors, in case of network error the connection is disconnected +// and a healthcheck routine gets invoked that would reconnect. +func (client *storageRESTClient) call(ctx context.Context, rpcMethod string, values url.Values, body io.Reader, length int64) (io.ReadCloser, error) { if values == nil { values = make(url.Values) } - values.Set(storageRESTDiskID, client.diskID) - respBody, err := client.restClient.Call(ctx, method, values, body, length) - if err == nil { - return respBody, nil + values.Set(storageRESTDiskID, *client.diskID.Load()) + respBody, err := client.restClient.CallWithHTTPMethod(ctx, http.MethodPost, rpcMethod, values, body, length) + if err != nil { + return nil, toStorageErr(err) } - - err = toStorageErr(err) - return nil, err + return respBody, nil } // Stringer provides a canonicalized representation of network device. @@ -203,8 +206,13 @@ func (client *storageRESTClient) String() string { return client.endpoint.String() } -// IsOnline - returns whether RPC client failed to connect or not. +// IsOnline - returns whether client failed to connect or not. func (client *storageRESTClient) IsOnline() bool { + return client.restClient.IsOnline() || client.IsOnlineWS() +} + +// IsOnlineWS - returns whether websocket client failed to connect or not. +func (client *storageRESTClient) IsOnlineWS() bool { return client.gridConn.State() == grid.StateConnected } @@ -235,7 +243,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC defer xioutil.SafeClose(updates) st, err := storageNSScannerRPC.Call(ctx, client.gridConn, &nsScannerOptions{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), ScanMode: int(scanMode), Cache: &cache, }) @@ -265,16 +273,8 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC return *final, nil } -func (client *storageRESTClient) SetFormatData(b []byte) { - if client.IsOnline() { - client.formatMutex.Lock() - client.formatData = b - client.formatMutex.Unlock() - } -} - func (client *storageRESTClient) GetDiskID() (string, error) { - if !client.IsOnline() { + if !client.IsOnlineWS() { // make sure to check if the disk is offline, since the underlying // value is cached we should attempt to invalidate it if such calls // were attempted. This can lead to false success under certain conditions @@ -287,15 +287,15 @@ func (client *storageRESTClient) GetDiskID() (string, error) { // a cached value - caller should make sure to use this // function on a fresh disk or make sure to look at the error // from a different networked call to validate the GetDiskID() - return client.diskID, nil + return *client.diskID.Load(), nil } func (client *storageRESTClient) SetDiskID(id string) { - client.diskID = id + client.diskID.Store(&id) } func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOptions) (info DiskInfo, err error) { - if !client.IsOnline() { + if !client.IsOnlineWS() { // make sure to check if the disk is offline, since the underlying // value is cached we should attempt to invalidate it if such calls // were attempted. This can lead to false success under certain conditions @@ -309,7 +309,7 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOpti ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - opts.DiskID = client.diskID + opts.DiskID = *client.diskID.Load() infop, err := storageDiskInfoRPC.Call(ctx, client.gridConn, &opts) if err != nil { @@ -322,13 +322,12 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOpti return info, nil } // In all other cases cache the value upto 1sec. - client.diskInfoCache.InitOnce(time.Second, - cachevalue.Opts{CacheError: true}, - func() (info DiskInfo, err error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + client.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, + func(ctx context.Context) (info DiskInfo, err error) { + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() - nopts := DiskInfoOptions{DiskID: client.diskID, Metrics: true} + nopts := DiskInfoOptions{DiskID: *client.diskID.Load(), Metrics: true} infop, err := storageDiskInfoRPC.Call(ctx, client.gridConn, &nopts) if err != nil { return info, toStorageErr(err) @@ -341,7 +340,7 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOpti }, ) - return client.diskInfoCache.Get() + return client.diskInfoCache.GetWithCtx(ctx) } // MakeVolBulk - create multiple volumes in a bulk operation. @@ -362,7 +361,7 @@ func (client *storageRESTClient) ListVols(ctx context.Context) (vols []VolInfo, // StatVol - get volume info over the network. func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) { v, err := storageStatVolRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ - storageRESTDiskID: client.diskID, + storageRESTDiskID: *client.diskID.Load(), storageRESTVolume: volume, })) if err != nil { @@ -403,12 +402,15 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, origvolume, vol return err } _, err = waitForHTTPResponse(respBody) - return err + return toStorageErr(err) } func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err := storageWriteMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), OrigVolume: origvolume, Volume: volume, FilePath: path, @@ -418,8 +420,11 @@ func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, } func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err := storageUpdateMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, UpdateOpts: opts, @@ -429,8 +434,11 @@ func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, pat } func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err = storageDeleteVersionRPC.Call(ctx, client.gridConn, &DeleteVersionHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, ForceDelMarker: forceDelMarker, @@ -442,14 +450,11 @@ func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path // WriteAll - write all data to a file. func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, path string, b []byte) error { - // Specific optimization to avoid re-read from the drives for `format.json` - // in-case the caller is a network operation. - if volume == minioMetaBucket && path == formatConfigFile { - client.SetFormatData(b) - } + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() _, err := storageWriteAllRPC.Call(ctx, client.gridConn, &WriteAllHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, Buf: b, @@ -458,44 +463,61 @@ func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, pa } // CheckParts - stat all file parts. -func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error { - _, err := storageCheckPartsRPC.Call(ctx, client.gridConn, &CheckPartsHandlerParams{ - DiskID: client.diskID, +func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) { + var resp *CheckPartsResp + st, err := storageCheckPartsRPC.Call(ctx, client.gridConn, &CheckPartsHandlerParams{ + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, FI: fi, }) - return toStorageErr(err) + if err != nil { + return nil, toStorageErr(err) + } + err = st.Results(func(r *CheckPartsResp) error { + resp = r + return nil + }) + return resp, toStorageErr(err) } // RenameData - rename source path to destination path atomically, metadata and data file. -func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) { - resp, err := storageRenameDataRPC.Call(ctx, client.gridConn, &RenameDataHandlerParams{ - DiskID: client.diskID, +func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, + dstVolume, dstPath string, opts RenameOptions, +) (res RenameDataResp, err error) { + params := RenameDataHandlerParams{ + DiskID: *client.diskID.Load(), SrcVolume: srcVolume, SrcPath: srcPath, DstPath: dstPath, DstVolume: dstVolume, FI: fi, Opts: opts, - }) + } + var resp *RenameDataResp + if fi.Data == nil { + resp, err = storageRenameDataRPC.Call(ctx, client.gridConn, ¶ms) + } else { + resp, err = storageRenameDataInlineRPC.Call(ctx, client.gridConn, &RenameDataInlineHandlerParams{params}) + } if err != nil { - return 0, toStorageErr(err) + return res, toStorageErr(err) } + defer storageRenameDataRPC.PutResponse(resp) - return resp.Signature, nil + return *resp, nil } // where we keep old *Readers -var readMsgpReaderPool = sync.Pool{New: func() interface{} { return &msgp.Reader{} }} +var readMsgpReaderPool = bpool.Pool[*msgp.Reader]{New: func() *msgp.Reader { return &msgp.Reader{} }} // mspNewReader returns a *Reader that reads from the provided reader. // The reader will be buffered. // Return with readMsgpReaderPoolPut when done. func msgpNewReader(r io.Reader) *msgp.Reader { - p := readMsgpReaderPool.Get().(*msgp.Reader) + p := readMsgpReaderPool.Get() if p.R == nil { - p.R = xbufio.NewReaderSize(r, 4<<10) + p.R = xbufio.NewReaderSize(r, 32<<10) } else { p.R.Reset(r) } @@ -510,16 +532,19 @@ func readMsgpReaderPoolPut(r *msgp.Reader) { } func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + // Use websocket when not reading data. if !opts.ReadData { resp, err := storageReadVersionRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ - storageRESTDiskID: client.diskID, - storageRESTOrigVolume: origvolume, - storageRESTVolume: volume, - storageRESTFilePath: path, - storageRESTVersionID: versionID, - storageRESTReadData: strconv.FormatBool(opts.ReadData), - storageRESTHealing: strconv.FormatBool(opts.Healing), + storageRESTDiskID: *client.diskID.Load(), + storageRESTOrigVolume: origvolume, + storageRESTVolume: volume, + storageRESTFilePath: path, + storageRESTVersionID: versionID, + storageRESTInclFreeVersions: strconv.FormatBool(opts.InclFreeVersions), + storageRESTHealing: strconv.FormatBool(opts.Healing), })) if err != nil { return fi, toStorageErr(err) @@ -532,10 +557,10 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, vo values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) values.Set(storageRESTVersionID, versionID) - values.Set(storageRESTReadData, strconv.FormatBool(opts.ReadData)) + values.Set(storageRESTInclFreeVersions, strconv.FormatBool(opts.InclFreeVersions)) values.Set(storageRESTHealing, strconv.FormatBool(opts.Healing)) - respBody, err := client.call(ctx, storageRESTMethodReadVersion, values, nil, -1) + respBody, err := client.callGet(ctx, storageRESTMethodReadVersion, values, nil, -1) if err != nil { return fi, err } @@ -550,13 +575,15 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, vo // ReadXL - reads all contents of xl.meta of a file. func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + // Use websocket when not reading data. if !readData { resp, err := storageReadXLRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ - storageRESTDiskID: client.diskID, + storageRESTDiskID: *client.diskID.Load(), storageRESTVolume: volume, storageRESTFilePath: path, - storageRESTReadData: "false", })) if err != nil { return rf, toStorageErr(err) @@ -567,8 +594,8 @@ func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) - values.Set(storageRESTReadData, strconv.FormatBool(readData)) - respBody, err := client.call(ctx, storageRESTMethodReadXL, values, nil, -1) + + respBody, err := client.callGet(ctx, storageRESTMethodReadXL, values, nil, -1) if err != nil { return rf, toStorageErr(err) } @@ -583,20 +610,11 @@ func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path // ReadAll - reads all contents of a file. func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, path string) ([]byte, error) { - // Specific optimization to avoid re-read from the drives for `format.json` - // in-case the caller is a network operation. - if volume == minioMetaBucket && path == formatConfigFile { - client.formatMutex.RLock() - formatData := make([]byte, len(client.formatData)) - copy(formatData, client.formatData) - client.formatMutex.RUnlock() - if len(formatData) > 0 { - return formatData, nil - } - } + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() gridBytes, err := storageReadAllRPC.Call(ctx, client.gridConn, &ReadAllHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, }) @@ -614,9 +632,10 @@ func (client *storageRESTClient) ReadFileStream(ctx context.Context, volume, pat values.Set(storageRESTFilePath, path) values.Set(storageRESTOffset, strconv.Itoa(int(offset))) values.Set(storageRESTLength, strconv.Itoa(int(length))) - respBody, err := client.call(ctx, storageRESTMethodReadFileStream, values, nil, -1) + + respBody, err := client.callGet(ctx, storageRESTMethodReadFileStream, values, nil, -1) if err != nil { - return nil, err + return nil, toStorageErr(err) } return respBody, nil } @@ -635,13 +654,13 @@ func (client *storageRESTClient) ReadFile(ctx context.Context, volume string, pa values.Set(storageRESTBitrotAlgo, "") values.Set(storageRESTBitrotHash, "") } - respBody, err := client.call(ctx, storageRESTMethodReadFile, values, nil, -1) + respBody, err := client.callGet(ctx, storageRESTMethodReadFile, values, nil, -1) if err != nil { return 0, err } defer xhttp.DrainBody(respBody) n, err := io.ReadFull(respBody, buf) - return int64(n), err + return int64(n), toStorageErr(err) } // ListDir - lists a directory. @@ -651,7 +670,7 @@ func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTCount, strconv.Itoa(count)) values.Set(storageRESTOrigVolume, origvolume) - values.Set(storageRESTDiskID, client.diskID) + values.Set(storageRESTDiskID, *client.diskID.Load()) st, err := storageListDirRPC.Call(ctx, client.gridConn, values) if err != nil { @@ -666,8 +685,15 @@ func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume // DeleteFile - deletes a file. func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) error { + if !deleteOpts.Immediate { + // add deadlines for all non-immediate purges + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + } + _, err := storageDeleteFileRPC.Call(ctx, client.gridConn, &DeleteFileHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), Volume: volume, FilePath: path, Opts: deleteOpts, @@ -690,7 +716,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri for _, version := range versions { version.EncodeMsg(encoder) } - logger.LogIf(ctx, encoder.Flush()) + storageLogIf(ctx, encoder.Flush()) errs = make([]error, len(versions)) @@ -709,30 +735,89 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri reader, err := waitForHTTPResponse(respBody) if err != nil { for i := range errs { - errs[i] = err + errs[i] = toStorageErr(err) } return errs } dErrResp := &DeleteVersionsErrsResp{} - if err = gob.NewDecoder(reader).Decode(dErrResp); err != nil { + decoder := msgpNewReader(reader) + defer readMsgpReaderPoolPut(decoder) + if err = dErrResp.DecodeMsg(decoder); err != nil { for i := range errs { - errs[i] = err + errs[i] = toStorageErr(err) } return errs } for i, dErr := range dErrResp.Errs { - errs[i] = toStorageErr(dErr) + if dErr != "" { + errs[i] = toStorageErr(errors.New(dErr)) + } else { + errs[i] = nil + } } return errs } +// RenamePart - renames multipart part file +func (client *storageRESTClient) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string, meta []byte, skipParent string) (err error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + + _, err = storageRenamePartRPC.Call(ctx, client.gridConn, &RenamePartHandlerParams{ + DiskID: *client.diskID.Load(), + SrcVolume: srcVolume, + SrcFilePath: srcPath, + DstVolume: dstVolume, + DstFilePath: dstPath, + Meta: meta, + SkipParent: skipParent, + }) + return toStorageErr(err) +} + +// ReadParts - reads various part.N.meta paths from a drive remotely and returns object part info for each of those part.N.meta if found +func (client *storageRESTClient) ReadParts(ctx context.Context, volume string, partMetaPaths ...string) ([]*ObjectPartInfo, error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + + rp := &ReadPartsReq{Paths: partMetaPaths} + buf, err := rp.MarshalMsg(nil) + if err != nil { + return nil, err + } + + respBody, err := client.call(ctx, storageRESTMethodReadParts, values, bytes.NewReader(buf), -1) + defer xhttp.DrainBody(respBody) + if err != nil { + return nil, err + } + + respReader, err := waitForHTTPResponse(respBody) + if err != nil { + return nil, toStorageErr(err) + } + + rd := msgpNewReader(respReader) + defer readMsgpReaderPoolPut(rd) + + readPartsResp := &ReadPartsResp{} + if err = readPartsResp.DecodeMsg(rd); err != nil { + return nil, toStorageErr(err) + } + + return readPartsResp.Infos, nil +} + // RenameFile - renames a file. func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) { + ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout()) + defer cancel() + _, err = storageRenameFileRPC.Call(ctx, client.gridConn, &RenameFileHandlerParams{ - DiskID: client.diskID, + DiskID: *client.diskID.Load(), SrcVolume: srcVolume, SrcFilePath: srcPath, DstVolume: dstVolume, @@ -741,33 +826,57 @@ func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcP return toStorageErr(err) } -func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) error { +func (client *storageRESTClient) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) { values := make(url.Values) values.Set(storageRESTVolume, volume) values.Set(storageRESTFilePath, path) var reader bytes.Buffer if err := msgp.Encode(&reader, &fi); err != nil { - return err + return nil, err } respBody, err := client.call(ctx, storageRESTMethodVerifyFile, values, &reader, -1) defer xhttp.DrainBody(respBody) if err != nil { - return err + return nil, err } respReader, err := waitForHTTPResponse(respBody) + if err != nil { + return nil, toStorageErr(err) + } + + dec := msgpNewReader(respReader) + defer readMsgpReaderPoolPut(dec) + + verifyResp := CheckPartsResp{} + err = verifyResp.DecodeMsg(dec) + if err != nil { + return nil, toStorageErr(err) + } + + return &verifyResp, nil +} + +func (client *storageRESTClient) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) { + values := make(url.Values) + values.Set(storageRESTVolume, volume) + + req := &DeleteBulkReq{Paths: paths} + body, err := req.MarshalMsg(nil) if err != nil { return err } - verifyResp := &VerifyFileResp{} - if err = gob.NewDecoder(respReader).Decode(verifyResp); err != nil { + respBody, err := client.call(ctx, storageRESTMethodDeleteBulk, values, bytes.NewReader(body), int64(len(body))) + if err != nil { return err } + defer xhttp.DrainBody(respBody) - return toStorageErr(verifyResp.Err) + _, err = waitForHTTPResponse(respBody) + return toStorageErr(err) } func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path string, glob bool) (stat []StatInfo, err error) { @@ -782,10 +891,11 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path defer xhttp.DrainBody(respBody) respReader, err := waitForHTTPResponse(respBody) if err != nil { - return stat, err + return stat, toStorageErr(err) } rd := msgpNewReader(respReader) defer readMsgpReaderPoolPut(rd) + for { var st StatInfo err = st.DecodeMsg(rd) @@ -795,10 +905,11 @@ func (client *storageRESTClient) StatInfoFile(ctx context.Context, volume, path } break } + stat = append(stat, st) } - return stat, err + return stat, toStorageErr(err) } // ReadMultiple will read multiple files and send each back as response. @@ -816,12 +927,10 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti return err } defer xhttp.DrainBody(respBody) - if err != nil { - return err - } + pr, pw := io.Pipe() go func() { - pw.CloseWithError(waitForHTTPStream(respBody, pw)) + pw.CloseWithError(waitForHTTPStream(respBody, xioutil.NewDeadlineWriter(pw, globalDriveConfig.GetMaxTimeout()))) }() mr := msgp.NewReader(pr) defer readMsgpReaderPoolPut(mr) @@ -832,7 +941,7 @@ func (client *storageRESTClient) ReadMultiple(ctx context.Context, req ReadMulti err = nil } pr.CloseWithError(err) - return err + return toStorageErr(err) } select { case <-ctx.Done(): @@ -854,7 +963,7 @@ func (client *storageRESTClient) CleanAbandonedData(ctx context.Context, volume } defer xhttp.DrainBody(respBody) _, err = waitForHTTPResponse(respBody) - return err + return toStorageErr(err) } // Close - marks the client as closed. @@ -863,6 +972,8 @@ func (client *storageRESTClient) Close() error { return nil } +var emptyDiskID = "" + // Returns a storage rest client. func newStorageRESTClient(endpoint Endpoint, healthCheck bool, gm *grid.Manager) (*storageRESTClient, error) { serverURL := &url.URL{ @@ -872,7 +983,6 @@ func newStorageRESTClient(endpoint Endpoint, healthCheck bool, gm *grid.Manager) } restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken()) - if healthCheck { // Use a separate client to avoid recursive calls. healthClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken()) @@ -889,10 +999,12 @@ func newStorageRESTClient(endpoint Endpoint, healthCheck bool, gm *grid.Manager) if conn == nil { return nil, fmt.Errorf("unable to find connection for %s in targets: %v", endpoint.GridHost(), gm.Targets()) } - return &storageRESTClient{ + client := &storageRESTClient{ endpoint: endpoint, restClient: restClient, gridConn: conn, diskInfoCache: cachevalue.New[DiskInfo](), - }, nil + } + client.SetDiskID(emptyDiskID) + return client, nil } diff --git a/cmd/storage-rest-common.go b/cmd/storage-rest-common.go index 32fe56c1aaa31..361045de2d768 100644 --- a/cmd/storage-rest-common.go +++ b/cmd/storage-rest-common.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2022 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -20,7 +20,7 @@ package cmd //go:generate msgp -file $GOFILE -unexported const ( - storageRESTVersion = "v57" // Remove TotalTokens from DiskMetrics + storageRESTVersion = "v63" // Introduce RenamePart and ReadParts API storageRESTVersionPrefix = SlashSeparator + storageRESTVersion storageRESTPrefix = minioReservedBucketPath + "/storage" ) @@ -28,47 +28,50 @@ const ( const ( storageRESTMethodHealth = "/health" - storageRESTMethodAppendFile = "/appendfile" - storageRESTMethodCreateFile = "/createfile" - storageRESTMethodWriteAll = "/writeall" - storageRESTMethodReadVersion = "/readversion" - storageRESTMethodReadXL = "/readxl" - storageRESTMethodReadAll = "/readall" - storageRESTMethodReadFile = "/readfile" - storageRESTMethodReadFileStream = "/readfilestream" - storageRESTMethodListDir = "/listdir" - storageRESTMethodDeleteVersions = "/deleteverions" - storageRESTMethodRenameFile = "/renamefile" - storageRESTMethodVerifyFile = "/verifyfile" - storageRESTMethodStatInfoFile = "/statfile" - storageRESTMethodReadMultiple = "/readmultiple" - storageRESTMethodCleanAbandoned = "/cleanabandoned" + storageRESTMethodAppendFile = "/afile" + storageRESTMethodCreateFile = "/cfile" + storageRESTMethodWriteAll = "/wall" + storageRESTMethodReadVersion = "/rver" + storageRESTMethodReadXL = "/rxl" + storageRESTMethodReadAll = "/rall" + storageRESTMethodReadFile = "/rfile" + storageRESTMethodReadFileStream = "/rfilest" + storageRESTMethodListDir = "/ls" + storageRESTMethodDeleteVersions = "/dvers" + storageRESTMethodRenameFile = "/rfile" + storageRESTMethodVerifyFile = "/vfile" + storageRESTMethodStatInfoFile = "/sfile" + storageRESTMethodReadMultiple = "/rmpl" + storageRESTMethodCleanAbandoned = "/cln" + storageRESTMethodDeleteBulk = "/dblk" + storageRESTMethodReadParts = "/rps" ) const ( - storageRESTVolume = "volume" - storageRESTVolumes = "volumes" - storageRESTDirPath = "dir-path" - storageRESTFilePath = "file-path" - storageRESTVersionID = "version-id" - storageRESTReadData = "read-data" - storageRESTHealing = "healing" - storageRESTTotalVersions = "total-versions" - storageRESTSrcVolume = "source-volume" - storageRESTSrcPath = "source-path" - storageRESTDstVolume = "destination-volume" - storageRESTDstPath = "destination-path" - storageRESTOffset = "offset" - storageRESTLength = "length" - storageRESTCount = "count" - storageRESTBitrotAlgo = "bitrot-algo" - storageRESTBitrotHash = "bitrot-hash" - storageRESTDiskID = "disk-id" - storageRESTForceDelete = "force-delete" - storageRESTGlob = "glob" - storageRESTMetrics = "metrics" - storageRESTDriveQuorum = "drive-quorum" - storageRESTOrigVolume = "orig-volume" + storageRESTVolume = "vol" + storageRESTVolumes = "vols" + storageRESTDirPath = "dpath" + storageRESTFilePath = "fp" + storageRESTVersionID = "vid" + storageRESTHealing = "heal" + storageRESTTotalVersions = "tvers" + storageRESTSrcVolume = "svol" + storageRESTSrcPath = "spath" + storageRESTDstVolume = "dvol" + storageRESTDstPath = "dpath" + storageRESTOffset = "offset" + storageRESTLength = "length" + storageRESTCount = "count" + storageRESTBitrotAlgo = "balg" + storageRESTBitrotHash = "bhash" + storageRESTDiskID = "did" + storageRESTForceDelete = "fdel" + storageRESTGlob = "glob" + storageRESTMetrics = "metrics" + storageRESTDriveQuorum = "dquorum" + storageRESTOrigVolume = "ovol" + storageRESTInclFreeVersions = "incl-fv" + storageRESTRange = "rng" ) type nsScannerOptions struct { diff --git a/cmd/storage-rest-common_gen.go b/cmd/storage-rest-common_gen.go index f81f8c97e0263..c29615c77a4c4 100644 --- a/cmd/storage-rest-common_gen.go +++ b/cmd/storage-rest-common_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/cmd/storage-rest-common_gen_test.go b/cmd/storage-rest-common_gen_test.go index 8085a115c0550..4164020a963d2 100644 --- a/cmd/storage-rest-common_gen_test.go +++ b/cmd/storage-rest-common_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/storage-rest-server.go b/cmd/storage-rest-server.go index 994b7b0ebf742..927571b3dd2b7 100644 --- a/cmd/storage-rest-server.go +++ b/cmd/storage-rest-server.go @@ -21,7 +21,6 @@ import ( "bufio" "context" "encoding/binary" - "encoding/gob" "encoding/hex" "errors" "fmt" @@ -29,13 +28,13 @@ import ( "net/http" "os/user" "path" - "runtime" "runtime/debug" "strconv" "strings" "sync" "time" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/grid" "github.com/tinylib/msgp/msgp" @@ -47,7 +46,7 @@ import ( xjwt "github.com/minio/minio/internal/jwt" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) var errDiskStale = errors.New("drive stale") @@ -58,32 +57,30 @@ type storageRESTServer struct { } var ( - storageCheckPartsRPC = grid.NewSingleHandler[*CheckPartsHandlerParams, grid.NoPayload](grid.HandlerCheckParts, func() *CheckPartsHandlerParams { return &CheckPartsHandlerParams{} }, grid.NewNoPayload) - storageDeleteFileRPC = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams { return &DeleteFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true) - storageDeleteVersionRPC = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams { return &DeleteVersionHandlerParams{} }, grid.NewNoPayload) - storageDiskInfoRPC = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} }, func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true) - storageNSScannerRPC = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner, func() *nsScannerOptions { return &nsScannerOptions{} }, nil, func() *nsScannerResp { return &nsScannerResp{} }) - storageReadAllRPC = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams { return &ReadAllHandlerParams{} }, grid.NewBytes).AllowCallRequestPool(true) - storageWriteAllRPC = grid.NewSingleHandler[*WriteAllHandlerParams, grid.NoPayload](grid.HandlerWriteAll, func() *WriteAllHandlerParams { return &WriteAllHandlerParams{} }, grid.NewNoPayload) - storageReadVersionRPC = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo { return &FileInfo{} }) - storageReadXLRPC = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo { return &RawFileInfo{} }) - storageRenameDataRPC = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData, func() *RenameDataHandlerParams { return &RenameDataHandlerParams{} }, func() *RenameDataResp { return &RenameDataResp{} }) - storageRenameFileRPC = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams { return &RenameFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true) - storageStatVolRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} }) - storageUpdateMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload) - storageWriteMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload) - storageListDirRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *ListDirResult](grid.HandlerListDir, grid.NewMSS, nil, func() *ListDirResult { return &ListDirResult{} }).WithOutCapacity(1) + storageCheckPartsRPC = grid.NewStream[*CheckPartsHandlerParams, grid.NoPayload, *CheckPartsResp](grid.HandlerCheckParts3, func() *CheckPartsHandlerParams { return &CheckPartsHandlerParams{} }, nil, func() *CheckPartsResp { return &CheckPartsResp{} }) + storageDeleteFileRPC = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams { return &DeleteFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true) + storageDeleteVersionRPC = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams { return &DeleteVersionHandlerParams{} }, grid.NewNoPayload) + storageDiskInfoRPC = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} }, func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true) + storageNSScannerRPC = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner, func() *nsScannerOptions { return &nsScannerOptions{} }, nil, func() *nsScannerResp { return &nsScannerResp{} }) + storageReadAllRPC = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams { return &ReadAllHandlerParams{} }, grid.NewBytes).AllowCallRequestPool(true) + storageWriteAllRPC = grid.NewSingleHandler[*WriteAllHandlerParams, grid.NoPayload](grid.HandlerWriteAll, func() *WriteAllHandlerParams { return &WriteAllHandlerParams{} }, grid.NewNoPayload) + storageReadVersionRPC = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo { return &FileInfo{} }) + storageReadXLRPC = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo { return &RawFileInfo{} }) + storageRenameDataRPC = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData2, func() *RenameDataHandlerParams { return &RenameDataHandlerParams{} }, func() *RenameDataResp { return &RenameDataResp{} }) + storageRenameDataInlineRPC = grid.NewSingleHandler[*RenameDataInlineHandlerParams, *RenameDataResp](grid.HandlerRenameDataInline, newRenameDataInlineHandlerParams, func() *RenameDataResp { return &RenameDataResp{} }).AllowCallRequestPool(false) + storageRenameFileRPC = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams { return &RenameFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true) + storageRenamePartRPC = grid.NewSingleHandler[*RenamePartHandlerParams, grid.NoPayload](grid.HandlerRenamePart, func() *RenamePartHandlerParams { return &RenamePartHandlerParams{} }, grid.NewNoPayload) + storageStatVolRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} }) + storageUpdateMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload) + storageWriteMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload) + storageListDirRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *ListDirResult](grid.HandlerListDir, grid.NewMSS, nil, func() *ListDirResult { return &ListDirResult{} }).WithOutCapacity(1) ) func getStorageViaEndpoint(endpoint Endpoint) StorageAPI { globalLocalDrivesMu.RLock() defer globalLocalDrivesMu.RUnlock() if len(globalLocalSetDrives) == 0 { - for _, drive := range globalLocalDrives { - if drive != nil && drive.Endpoint().Equal(endpoint) { - return drive - } - } + return globalLocalDrivesMap[endpoint.String()] } return globalLocalSetDrives[endpoint.PoolIdx][endpoint.SetIdx][endpoint.DiskIdx] } @@ -112,6 +109,21 @@ func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error) // DefaultSkewTime - skew time is 15 minutes between minio peers. const DefaultSkewTime = 15 * time.Minute +// validateStorageRequestToken will validate the token against the provided audience. +func validateStorageRequestToken(token string) error { + claims := xjwt.NewStandardClaims() + if err := xjwt.ParseWithStandardClaims(token, claims, []byte(globalActiveCred.SecretKey)); err != nil { + return errAuthentication + } + + owner := claims.AccessKey == globalActiveCred.AccessKey || claims.Subject == globalActiveCred.AccessKey + if !owner { + return errAuthentication + } + + return nil +} + // Authenticates storage client's requests and validates for skewed time. func storageServerRequestValidate(r *http.Request) error { token, err := jwtreq.AuthorizationHeaderExtractor.ExtractToken(r) @@ -122,30 +134,23 @@ func storageServerRequestValidate(r *http.Request) error { return errMalformedAuth } - claims := xjwt.NewStandardClaims() - if err = xjwt.ParseWithStandardClaims(token, claims, []byte(globalActiveCred.SecretKey)); err != nil { - return errAuthentication - } - - owner := claims.AccessKey == globalActiveCred.AccessKey || claims.Subject == globalActiveCred.AccessKey - if !owner { - return errAuthentication - } - - if claims.Audience != r.URL.RawQuery { - return errAuthentication + if err = validateStorageRequestToken(token); err != nil { + return err } - requestTimeStr := r.Header.Get("X-Minio-Time") - requestTime, err := time.Parse(time.RFC3339, requestTimeStr) + nanoTime, err := strconv.ParseInt(r.Header.Get("X-Minio-Time"), 10, 64) if err != nil { return errMalformedAuth } - utcNow := UTCNow() - delta := requestTime.Sub(utcNow) + + localTime := UTCNow() + remoteTime := time.Unix(0, nanoTime) + + delta := remoteTime.Sub(localTime) if delta < 0 { delta *= -1 } + if delta > DefaultSkewTime { return errSkewedAuthTime } @@ -371,17 +376,22 @@ func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, * volume := params.Get(storageRESTVolume) filePath := params.Get(storageRESTFilePath) versionID := params.Get(storageRESTVersionID) - readData, err := strconv.ParseBool(params.Get(storageRESTReadData)) + + healing, err := strconv.ParseBool(params.Get(storageRESTHealing)) if err != nil { return nil, grid.NewRemoteErr(err) } - healing, err := strconv.ParseBool(params.Get(storageRESTHealing)) + inclFreeVersions, err := strconv.ParseBool(params.Get(storageRESTInclFreeVersions)) if err != nil { return nil, grid.NewRemoteErr(err) } - fi, err := s.getStorage().ReadVersion(context.Background(), origvolume, volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing}) + fi, err := s.getStorage().ReadVersion(context.Background(), origvolume, volume, filePath, versionID, ReadOptions{ + InclFreeVersions: inclFreeVersions, + ReadData: false, + Healing: healing, + }) if err != nil { return nil, grid.NewRemoteErr(err) } @@ -397,23 +407,29 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re volume := r.Form.Get(storageRESTVolume) filePath := r.Form.Get(storageRESTFilePath) versionID := r.Form.Get(storageRESTVersionID) - readData, err := strconv.ParseBool(r.Form.Get(storageRESTReadData)) + healing, err := strconv.ParseBool(r.Form.Get(storageRESTHealing)) if err != nil { s.writeErrorResponse(w, err) return } - healing, err := strconv.ParseBool(r.Form.Get(storageRESTHealing)) + + inclFreeVersions, err := strconv.ParseBool(r.Form.Get(storageRESTInclFreeVersions)) if err != nil { s.writeErrorResponse(w, err) return } - fi, err := s.getStorage().ReadVersion(r.Context(), origvolume, volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing}) + + fi, err := s.getStorage().ReadVersion(r.Context(), origvolume, volume, filePath, versionID, ReadOptions{ + InclFreeVersions: inclFreeVersions, + ReadData: true, + Healing: healing, + }) if err != nil { s.writeErrorResponse(w, err) return } - logger.LogIf(r.Context(), msgp.Encode(w, &fi)) + storageLogIf(r.Context(), msgp.Encode(w, &fi)) } // WriteMetadataHandler rpc handler to write new updated metadata. @@ -441,14 +457,20 @@ func (s *storageRESTServer) UpdateMetadataHandler(p *MetadataHandlerParams) (gri return grid.NewNPErr(s.getStorage().UpdateMetadata(context.Background(), volume, filePath, p.FI, p.UpdateOpts)) } -// CheckPartsHandler - check if a file metadata exists. -func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.NoPayload, *grid.RemoteErr) { +// CheckPartsHandler - check if a file parts exists. +func (s *storageRESTServer) CheckPartsHandler(ctx context.Context, p *CheckPartsHandlerParams, out chan<- *CheckPartsResp) *grid.RemoteErr { if !s.checkID(p.DiskID) { - return grid.NewNPErr(errDiskNotFound) + return grid.NewRemoteErr(errDiskNotFound) } volume := p.Volume filePath := p.FilePath - return grid.NewNPErr(s.getStorage().CheckParts(context.Background(), volume, filePath, p.FI)) + + resp, err := s.getStorage().CheckParts(ctx, volume, filePath, p.FI) + if err != nil { + return grid.NewRemoteErr(err) + } + out <- resp + return grid.NewRemoteErr(err) } func (s *storageRESTServer) WriteAllHandler(p *WriteAllHandlerParams) (grid.NoPayload, *grid.RemoteErr) { @@ -480,21 +502,17 @@ func (s *storageRESTServer) ReadXLHandler(w http.ResponseWriter, r *http.Request if !s.IsValid(w, r) { return } + volume := r.Form.Get(storageRESTVolume) filePath := r.Form.Get(storageRESTFilePath) - readData, err := strconv.ParseBool(r.Form.Get(storageRESTReadData)) - if err != nil { - s.writeErrorResponse(w, err) - return - } - rf, err := s.getStorage().ReadXL(r.Context(), volume, filePath, readData) + rf, err := s.getStorage().ReadXL(r.Context(), volume, filePath, true) if err != nil { s.writeErrorResponse(w, err) return } - logger.LogIf(r.Context(), msgp.Encode(w, &rf)) + storageLogIf(r.Context(), msgp.Encode(w, &rf)) } // ReadXLHandlerWS - read xl.meta for an object at path. @@ -502,19 +520,40 @@ func (s *storageRESTServer) ReadXLHandlerWS(params *grid.MSS) (*RawFileInfo, *gr if !s.checkID(params.Get(storageRESTDiskID)) { return nil, grid.NewRemoteErr(errDiskNotFound) } + volume := params.Get(storageRESTVolume) filePath := params.Get(storageRESTFilePath) - readData, err := strconv.ParseBool(params.Get(storageRESTReadData)) + rf, err := s.getStorage().ReadXL(context.Background(), volume, filePath, false) if err != nil { return nil, grid.NewRemoteErr(err) } - rf, err := s.getStorage().ReadXL(context.Background(), volume, filePath, readData) + return &rf, nil +} + +// ReadPartsHandler - read section of a file. +func (s *storageRESTServer) ReadPartsHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + volume := r.Form.Get(storageRESTVolume) + + var preq ReadPartsReq + if err := msgp.Decode(r.Body, &preq); err != nil { + s.writeErrorResponse(w, err) + return + } + + done := keepHTTPResponseAlive(w) + infos, err := s.getStorage().ReadParts(r.Context(), volume, preq.Paths...) + done(nil) if err != nil { - return nil, grid.NewRemoteErr(err) + s.writeErrorResponse(w, err) + return } - return &rf, nil + presp := &ReadPartsResp{Infos: infos} + storageLogIf(r.Context(), msgp.Encode(w, presp)) } // ReadFileHandler - read section of a file. @@ -567,46 +606,27 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http } volume := r.Form.Get(storageRESTVolume) filePath := r.Form.Get(storageRESTFilePath) - offset, err := strconv.Atoi(r.Form.Get(storageRESTOffset)) + offset, err := strconv.ParseInt(r.Form.Get(storageRESTOffset), 10, 64) if err != nil { s.writeErrorResponse(w, err) return } - length, err := strconv.Atoi(r.Form.Get(storageRESTLength)) + length, err := strconv.ParseInt(r.Form.Get(storageRESTLength), 10, 64) if err != nil { s.writeErrorResponse(w, err) return } - w.Header().Set(xhttp.ContentLength, strconv.Itoa(length)) - - rc, err := s.getStorage().ReadFileStream(r.Context(), volume, filePath, int64(offset), int64(length)) + rc, err := s.getStorage().ReadFileStream(r.Context(), volume, filePath, offset, length) if err != nil { s.writeErrorResponse(w, err) return } defer rc.Close() - rf, ok := w.(io.ReaderFrom) - if ok && runtime.GOOS != "windows" { - // Attempt to use splice/sendfile() optimization, A very specific behavior mentioned below is necessary. - // See https://github.com/golang/go/blob/f7c5cbb82087c55aa82081e931e0142783700ce8/src/net/sendfile_linux.go#L20 - // Windows can lock up with this optimization, so we fall back to regular copy. - sr, ok := rc.(*sendFileReader) - if ok { - _, err = rf.ReadFrom(sr.Reader) - if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(r.Context(), err) - } - if err == nil || !errors.Is(err, xhttp.ErrNotImplemented) { - return - } - } - } // Fallback to regular copy - _, err = xioutil.Copy(w, rc) if !xnet.IsNetworkOrHostDown(err, true) { // do not need to log disconnected clients - logger.LogIf(r.Context(), err) + storageLogIf(r.Context(), err) } } @@ -639,12 +659,6 @@ func (s *storageRESTServer) DeleteFileHandler(p *DeleteFileHandlerParams) (grid. return grid.NewNPErr(s.getStorage().Delete(context.Background(), p.Volume, p.FilePath, p.Opts)) } -// DeleteVersionsErrsResp - collection of delete errors -// for bulk version deletes -type DeleteVersionsErrsResp struct { - Errs []error -} - // DeleteVersionsHandler - delete a set of a versions. func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -661,7 +675,7 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http versions := make([]FileInfoVersions, totalVersions) decoder := msgpNewReader(r.Body) defer readMsgpReaderPoolPut(decoder) - for i := 0; i < totalVersions; i++ { + for i := range totalVersions { dst := &versions[i] if err := dst.DecodeMsg(decoder); err != nil { s.writeErrorResponse(w, err) @@ -669,21 +683,20 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http } } - dErrsResp := &DeleteVersionsErrsResp{Errs: make([]error, totalVersions)} - - setEventStreamHeaders(w) - encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - opts := DeleteOptions{} errs := s.getStorage().DeleteVersions(r.Context(), volume, versions, opts) done(nil) + + dErrsResp := &DeleteVersionsErrsResp{Errs: make([]string, totalVersions)} for idx := range versions { if errs[idx] != nil { - dErrsResp.Errs[idx] = StorageErr(errs[idx].Error()) + dErrsResp.Errs[idx] = errs[idx].Error() } } - encoder.Encode(dErrsResp) + + buf, _ := dErrsResp.MarshalMsg(nil) + w.Write(buf) } // RenameDataHandler - renames a meta object and data dir to destination. @@ -692,11 +705,14 @@ func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*Rena return nil, grid.NewRemoteErr(errDiskNotFound) } - sign, err := s.getStorage().RenameData(context.Background(), p.SrcVolume, p.SrcPath, p.FI, p.DstVolume, p.DstPath, p.Opts) - resp := &RenameDataResp{ - Signature: sign, - } - return resp, grid.NewRemoteErr(err) + resp, err := s.getStorage().RenameData(context.Background(), p.SrcVolume, p.SrcPath, p.FI, p.DstVolume, p.DstPath, p.Opts) + return &resp, grid.NewRemoteErr(err) +} + +// RenameDataInlineHandler - renames a meta object and data dir to destination. +func (s *storageRESTServer) RenameDataInlineHandler(p *RenameDataInlineHandlerParams) (*RenameDataResp, *grid.RemoteErr) { + defer p.Recycle() + return s.RenameDataHandler(&p.RenameDataHandlerParams) } // RenameFileHandler - rename a file from source to destination @@ -707,6 +723,14 @@ func (s *storageRESTServer) RenameFileHandler(p *RenameFileHandlerParams) (grid. return grid.NewNPErr(s.getStorage().RenameFile(context.Background(), p.SrcVolume, p.SrcFilePath, p.DstVolume, p.DstFilePath)) } +// RenamePartHandler - rename a multipart part from source to destination +func (s *storageRESTServer) RenamePartHandler(p *RenamePartHandlerParams) (grid.NoPayload, *grid.RemoteErr) { + if !s.checkID(p.DiskID) { + return grid.NewNPErr(errDiskNotFound) + } + return grid.NewNPErr(s.getStorage().RenamePart(context.Background(), p.SrcVolume, p.SrcFilePath, p.DstVolume, p.DstFilePath, p.Meta, p.SkipParent)) +} + // CleanAbandonedDataHandler - Clean unused data directories. func (s *storageRESTServer) CleanAbandonedDataHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -785,13 +809,30 @@ func keepHTTPReqResponseAlive(w http.ResponseWriter, r *http.Request) (resp func defer xioutil.SafeClose(doneCh) // Initiate ticker after body has been read. ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for { select { case <-ticker.C: + // The done() might have been called + // concurrently, check for it before we + // write the filler byte. + select { + case err := <-doneCh: + if err != nil { + write([]byte{1}) + write([]byte(err.Error())) + } else { + write([]byte{0}) + } + return + default: + } + // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -800,7 +841,6 @@ func keepHTTPReqResponseAlive(w http.ResponseWriter, r *http.Request) (resp func } else { write([]byte{0}) } - ticker.Stop() return } } @@ -848,10 +888,25 @@ func keepHTTPResponseAlive(w http.ResponseWriter) func(error) { for { select { case <-ticker.C: + // The done() might have been called + // concurrently, check for it before we + // write the filler byte. + select { + case err := <-doneCh: + if err != nil { + write([]byte{1}) + write([]byte(err.Error())) + } else { + write([]byte{0}) + } + return + default: + } + // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -971,7 +1026,7 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { // Response not ready, write a filler byte. write([]byte{32}) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } case err := <-doneCh: if err != nil { @@ -989,7 +1044,7 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { write(tmp[:]) write(block) if canWrite { - w.(http.Flusher).Flush() + xhttp.Flush(w) } } } @@ -997,29 +1052,23 @@ func streamHTTPResponse(w http.ResponseWriter) *httpStreamResponse { return &h } -var poolBuf8k = sync.Pool{ - New: func() interface{} { +var poolBuf8k = bpool.Pool[*[]byte]{ + New: func() *[]byte { b := make([]byte, 8192) return &b }, } -var poolBuf128k = sync.Pool{ - New: func() interface{} { - b := make([]byte, 128<<10) - return b - }, -} - // waitForHTTPStream will wait for responses where // streamHTTPResponse has been used. // The returned reader contains the payload and must be closed if no error is returned. func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error { var tmp [1]byte // 8K copy buffer, reused for less allocs... - bufp := poolBuf8k.Get().(*[]byte) + bufp := poolBuf8k.Get() buf := *bufp defer poolBuf8k.Put(bufp) + for { _, err := io.ReadFull(respBody, tmp[:]) if err != nil { @@ -1064,11 +1113,6 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error { } } -// VerifyFileResp - VerifyFile()'s response. -type VerifyFileResp struct { - Err error -} - // VerifyFileHandler - Verify all part of file for bitrot errors. func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -1088,16 +1132,15 @@ func (s *storageRESTServer) VerifyFileHandler(w http.ResponseWriter, r *http.Req return } - setEventStreamHeaders(w) - encoder := gob.NewEncoder(w) done := keepHTTPResponseAlive(w) - err := s.getStorage().VerifyFile(r.Context(), volume, filePath, fi) - done(nil) - vresp := &VerifyFileResp{} + resp, err := s.getStorage().VerifyFile(r.Context(), volume, filePath, fi) + done(err) if err != nil { - vresp.Err = StorageErr(err.Error()) + return } - encoder.Encode(vresp) + + buf, _ := resp.MarshalMsg(nil) + w.Write(buf) } func checkDiskFatalErrs(errs []error) error { @@ -1113,7 +1156,7 @@ func checkDiskFatalErrs(errs []error) error { } if countErrs(errs, errFileAccessDenied) == len(errs) { - return errDiskAccessDenied + return errFileAccessDenied } if countErrs(errs, errDiskNotDir) == len(errs) { @@ -1149,7 +1192,7 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { } else { hint = "Drives do not support O_DIRECT flags, MinIO erasure coding requires filesystems with O_DIRECT support" } - logger.Fatal(config.ErrUnsupportedBackend(err).Hint(hint), "Unable to initialize backend") + logger.Fatal(config.ErrUnsupportedBackend(err).Hint("%s", hint), "Unable to initialize backend") case errors.Is(err, errDiskNotDir): var hint string if endpoint.URL != nil { @@ -1157,7 +1200,7 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { } else { hint = "Drives are not directories, MinIO erasure coding needs directories" } - logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend") + logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint("%s", hint), "Unable to initialize backend") case errors.Is(err, errDiskAccessDenied): // Show a descriptive error with a hint about how to fix it. var username string @@ -1174,25 +1217,29 @@ func logFatalErrs(err error, endpoint Endpoint, exit bool) { hint = fmt.Sprintf("Run the following command to add write permissions: `sudo chown -R %s. && sudo chmod u+rxw `", username) } if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is not writable %s, %s", endpoint, hint), "log-fatal-errs") } else { - logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint(hint), "Unable to initialize backend") + logger.Fatal(config.ErrUnableToWriteInBackend(err).Hint("%s", hint), "Unable to initialize backend") } case errors.Is(err, errFaultyDisk): if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is faulty at %s, please replace the drive - drive will be offline", endpoint), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } case errors.Is(err, errDiskFull): if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive is already full at %s, incoming I/O will fail - drive will be offline", endpoint), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } + case errors.Is(err, errInconsistentDisk): + if exit { + logger.Fatal(err, "Unable to initialize backend") + } default: if !exit { - logger.LogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs") + storageLogOnceIf(GlobalContext, fmt.Errorf("Drive %s returned an unexpected error: %w, please investigate - drive will be offline", endpoint, err), "log-fatal-errs") } else { logger.Fatal(err, "Unable to initialize backend") } @@ -1218,6 +1265,24 @@ func (s *storageRESTServer) StatInfoFile(w http.ResponseWriter, r *http.Request) } } +func (s *storageRESTServer) DeleteBulkHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + return + } + + var req DeleteBulkReq + mr := msgpNewReader(r.Body) + defer readMsgpReaderPoolPut(mr) + + if err := req.DecodeMsg(mr); err != nil { + s.writeErrorResponse(w, err) + return + } + + volume := r.Form.Get(storageRESTVolume) + keepHTTPResponseAlive(w)(s.getStorage().DeleteBulk(r.Context(), volume, req.Paths...)) +} + // ReadMultiple returns multiple files func (s *storageRESTServer) ReadMultiple(w http.ResponseWriter, r *http.Request) { if !s.IsValid(w, r) { @@ -1272,6 +1337,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin return collectInternodeStats(httpTraceHdrs(f)) } + globalLocalDrivesMap = make(map[string]StorageAPI) globalLocalSetDrives = make([][][]StorageAPI, len(endpointServerPools)) for pool := range globalLocalSetDrives { globalLocalSetDrives[pool] = make([][]StorageAPI, endpointServerPools[pool].SetCount) @@ -1293,24 +1359,29 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodHealth).HandlerFunc(h(server.HealthHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodAppendFile).HandlerFunc(h(server.AppendFileHandler)) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(h(server.ReadVersionHandler)) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadXL).HandlerFunc(h(server.ReadXLHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(h(server.CreateFileHandler)) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(h(server.ReadFileHandler)) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(h(server.ReadFileStreamHandler)) - subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(h(server.DeleteVersionsHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(h(server.VerifyFileHandler)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(h(server.StatInfoFile)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadMultiple).HandlerFunc(h(server.ReadMultiple)) subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCleanAbandoned).HandlerFunc(h(server.CleanAbandonedDataHandler)) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteBulk).HandlerFunc(h(server.DeleteBulkHandler)) + subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadParts).HandlerFunc(h(server.ReadPartsHandler)) + + subrouter.Methods(http.MethodGet).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(h(server.ReadFileStreamHandler)) + subrouter.Methods(http.MethodGet).Path(storageRESTVersionPrefix + storageRESTMethodReadVersion).HandlerFunc(h(server.ReadVersionHandler)) + subrouter.Methods(http.MethodGet).Path(storageRESTVersionPrefix + storageRESTMethodReadXL).HandlerFunc(h(server.ReadXLHandler)) + subrouter.Methods(http.MethodGet).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(h(server.ReadFileHandler)) + logger.FatalIf(storageListDirRPC.RegisterNoInput(gm, server.ListDirHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageReadAllRPC.Register(gm, server.ReadAllHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageWriteAllRPC.Register(gm, server.WriteAllHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageRenameFileRPC.Register(gm, server.RenameFileHandler, endpoint.Path), "unable to register handler") + logger.FatalIf(storageRenamePartRPC.Register(gm, server.RenamePartHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageRenameDataRPC.Register(gm, server.RenameDataHandler, endpoint.Path), "unable to register handler") + logger.FatalIf(storageRenameDataInlineRPC.Register(gm, server.RenameDataInlineHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageDeleteFileRPC.Register(gm, server.DeleteFileHandler, endpoint.Path), "unable to register handler") - logger.FatalIf(storageCheckPartsRPC.Register(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler") + logger.FatalIf(storageCheckPartsRPC.RegisterNoInput(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageReadVersionRPC.Register(gm, server.ReadVersionHandlerWS, endpoint.Path), "unable to register handler") logger.FatalIf(storageWriteMetadataRPC.Register(gm, server.WriteMetadataHandler, endpoint.Path), "unable to register handler") logger.FatalIf(storageUpdateMetadataRPC.Register(gm, server.UpdateMetadataHandler, endpoint.Path), "unable to register handler") @@ -1325,7 +1396,7 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin OutCapacity: 1, }), "unable to register handler") - createStorage := func(server *storageRESTServer) bool { + createStorage := func(endpoint Endpoint) bool { xl, err := newXLStorage(endpoint, false) if err != nil { // if supported errors don't fail, we proceed to @@ -1344,25 +1415,24 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin globalLocalDrivesMu.Lock() defer globalLocalDrivesMu.Unlock() - globalLocalDrives = append(globalLocalDrives, storage) + globalLocalDrivesMap[endpoint.String()] = storage globalLocalSetDrives[endpoint.PoolIdx][endpoint.SetIdx][endpoint.DiskIdx] = storage return true } - if createStorage(server) { + if createStorage(endpoint) { continue } // Start async goroutine to create storage. - go func(server *storageRESTServer) { + go func(endpoint Endpoint) { for { time.Sleep(3 * time.Second) - if createStorage(server) { + if createStorage(endpoint) { return } } - }(server) - + }(endpoint) } } } diff --git a/cmd/storage-rest_test.go b/cmd/storage-rest_test.go index e1f49dc0b3484..a601d7996a1d0 100644 --- a/cmd/storage-rest_test.go +++ b/cmd/storage-rest_test.go @@ -19,7 +19,6 @@ package cmd import ( "bytes" - "context" "errors" "math/rand" "reflect" @@ -28,7 +27,7 @@ import ( "time" "github.com/minio/minio/internal/grid" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // Storage REST server, storageRESTReceiver and StorageRESTClient are @@ -41,7 +40,7 @@ func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - _, err := storage.DiskInfo(context.Background(), DiskInfoOptions{Metrics: true}) + _, err := storage.DiskInfo(t.Context(), DiskInfoOptions{Metrics: true}) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -54,7 +53,7 @@ func testStorageAPIDiskInfo(t *testing.T, storage StorageAPI) { } func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", pathJoin("myobject", xlStorageFormatFile), []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -70,7 +69,7 @@ func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - _, err := storage.StatInfoFile(context.Background(), testCase.volumeName, testCase.objectName+"/"+xlStorageFormatFile, false) + _, err := storage.StatInfoFile(t.Context(), testCase.volumeName, testCase.objectName+"/"+xlStorageFormatFile, false) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -80,7 +79,7 @@ func testStorageAPIStatInfoFile(t *testing.T, storage StorageAPI) { } func testStorageAPIListDir(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", "path/to/myobject", []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", "path/to/myobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -97,7 +96,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.ListDir(context.Background(), "", testCase.volumeName, testCase.prefix, -1) + result, err := storage.ListDir(t.Context(), "", testCase.volumeName, testCase.prefix, -1) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -113,7 +112,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) { } func testStorageAPIReadAll(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -130,7 +129,7 @@ func testStorageAPIReadAll(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - result, err := storage.ReadAll(context.Background(), testCase.volumeName, testCase.objectName) + result, err := storage.ReadAll(t.Context(), testCase.volumeName, testCase.objectName) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -146,7 +145,7 @@ func testStorageAPIReadAll(t *testing.T, storage StorageAPI) { } func testStorageAPIReadFile(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -167,7 +166,7 @@ func testStorageAPIReadFile(t *testing.T, storage StorageAPI) { result := make([]byte, 100) for i, testCase := range testCases { result = result[testCase.offset:3] - _, err := storage.ReadFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.offset, result, nil) + _, err := storage.ReadFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.offset, result, nil) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -209,7 +208,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) { if testCase.ignoreIfWindows && runtime.GOOS == "windows" { continue } - err := storage.AppendFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.data) + err := storage.AppendFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.data) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -217,7 +216,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) { } if !testCase.expectErr { - data, err := storage.ReadAll(context.Background(), testCase.volumeName, testCase.objectName) + data, err := storage.ReadAll(t.Context(), testCase.volumeName, testCase.objectName) if err != nil { t.Fatal(err) } @@ -230,7 +229,7 @@ func testStorageAPIAppendFile(t *testing.T, storage StorageAPI) { } func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -248,7 +247,7 @@ func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - err := storage.Delete(context.Background(), testCase.volumeName, testCase.objectName, DeleteOptions{ + err := storage.Delete(t.Context(), testCase.volumeName, testCase.objectName, DeleteOptions{ Recursive: false, Immediate: false, }) @@ -261,12 +260,12 @@ func testStorageAPIDeleteFile(t *testing.T, storage StorageAPI) { } func testStorageAPIRenameFile(t *testing.T, storage StorageAPI) { - err := storage.AppendFile(context.Background(), "foo", "myobject", []byte("foo")) + err := storage.AppendFile(t.Context(), "foo", "myobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } - err = storage.AppendFile(context.Background(), "foo", "otherobject", []byte("foo")) + err = storage.AppendFile(t.Context(), "foo", "otherobject", []byte("foo")) if err != nil { t.Fatalf("unexpected error %v", err) } @@ -285,7 +284,7 @@ func testStorageAPIRenameFile(t *testing.T, storage StorageAPI) { } for i, testCase := range testCases { - err := storage.RenameFile(context.Background(), testCase.volumeName, testCase.objectName, testCase.destVolumeName, testCase.destObjectName) + err := storage.RenameFile(t.Context(), testCase.volumeName, testCase.objectName, testCase.destVolumeName, testCase.destObjectName) expectErr := (err != nil) if expectErr != testCase.expectErr { @@ -315,6 +314,7 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient { url.Path = t.TempDir() globalMinioHost, globalMinioPort = mustSplitHostPort(url.Host) + globalNodeAuthToken, _ = authenticateNode(globalActiveCred.AccessKey, globalActiveCred.SecretKey) endpoint, err := NewEndpoint(url.String()) if err != nil { @@ -340,11 +340,11 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient { registerStorageRESTHandlers(tg.Mux[1], poolEps, tg.Managers[1]) storage := globalLocalSetDrives[0][0][0] - if err = storage.MakeVol(context.Background(), "foo"); err != nil { + if err = storage.MakeVol(t.Context(), "foo"); err != nil { t.Fatalf("unexpected error %v", err) } - if err = storage.MakeVol(context.Background(), "bar"); err != nil { + if err = storage.MakeVol(t.Context(), "bar"); err != nil { t.Fatalf("unexpected error %v", err) } @@ -354,7 +354,7 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient { } for { - _, err := restClient.DiskInfo(context.Background(), DiskInfoOptions{}) + _, err := restClient.DiskInfo(t.Context(), DiskInfoOptions{}) if err == nil || errors.Is(err, errUnformattedDisk) { break } diff --git a/cmd/storagemetric_string.go b/cmd/storagemetric_string.go index 8cb65838d83a9..794781329dbc3 100644 --- a/cmd/storagemetric_string.go +++ b/cmd/storagemetric_string.go @@ -36,12 +36,15 @@ func _() { _ = x[storageMetricReadMultiple-25] _ = x[storageMetricDeleteAbandonedParts-26] _ = x[storageMetricDiskInfo-27] - _ = x[storageMetricLast-28] + _ = x[storageMetricDeleteBulk-28] + _ = x[storageMetricRenamePart-29] + _ = x[storageMetricReadParts-30] + _ = x[storageMetricLast-31] } -const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadXLReadAllStatInfoFileReadMultipleDeleteAbandonedPartsDiskInfoLast" +const _storageMetric_name = "MakeVolBulkMakeVolListVolsStatVolDeleteVolWalkDirListDirReadFileAppendFileCreateFileReadFileStreamRenameFileRenameDataCheckPartsDeleteDeleteVersionsVerifyFileWriteAllDeleteVersionWriteMetadataUpdateMetadataReadVersionReadXLReadAllStatInfoFileReadMultipleDeleteAbandonedPartsDiskInfoDeleteBulkRenamePartReadPartsLast" -var _storageMetric_index = [...]uint16{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 134, 148, 158, 166, 179, 192, 206, 217, 223, 230, 242, 254, 274, 282, 286} +var _storageMetric_index = [...]uint16{0, 11, 18, 26, 33, 42, 49, 56, 64, 74, 84, 98, 108, 118, 128, 134, 148, 158, 166, 179, 192, 206, 217, 223, 230, 242, 254, 274, 282, 292, 302, 311, 315} func (i storageMetric) String() string { if i >= storageMetric(len(_storageMetric_index)-1) { diff --git a/cmd/streaming-signature-v4.go b/cmd/streaming-signature-v4.go index dc8679989c813..702714be0ac29 100644 --- a/cmd/streaming-signature-v4.go +++ b/cmd/streaming-signature-v4.go @@ -106,7 +106,7 @@ func calculateSeedSignature(r *http.Request, trailers bool) (cred auth.Credentia v4Auth := req.Header.Get(xhttp.Authorization) // Parse signature version '4' header. - signV4Values, errCode := parseSignV4(v4Auth, globalSite.Region, serviceS3) + signV4Values, errCode := parseSignV4(v4Auth, globalSite.Region(), serviceS3) if errCode != ErrNone { return cred, "", "", time.Time{}, errCode } @@ -592,9 +592,10 @@ func readChunkLine(b *bufio.Reader) ([]byte, []byte, error) { if err != nil { // We always know when EOF is coming. // If the caller asked for a line, there should be a line. - if err == io.EOF { + switch err { + case io.EOF: err = io.ErrUnexpectedEOF - } else if err == bufio.ErrBufferFull { + case bufio.ErrBufferFull: err = errLineTooLong } return nil, nil, err @@ -662,5 +663,5 @@ func parseHexUint(v []byte) (n uint64, err error) { n <<= 4 n |= uint64(b) } - return + return n, err } diff --git a/cmd/streaming-signature-v4_test.go b/cmd/streaming-signature-v4_test.go index 89a729f550060..8ea1301ccc8b4 100644 --- a/cmd/streaming-signature-v4_test.go +++ b/cmd/streaming-signature-v4_test.go @@ -41,7 +41,7 @@ func TestReadChunkLine(t *testing.T) { // Test - 2 bytes.NewReader([]byte("1000;")), // Test - 3 - bytes.NewReader([]byte(fmt.Sprintf("%4097d", 1))), + bytes.NewReader(fmt.Appendf(nil, "%4097d", 1)), // Test - 4 bytes.NewReader([]byte("1000;chunk-signature=111123333333333333334444211\r\n")), } diff --git a/cmd/streaming-v4-unsigned.go b/cmd/streaming-v4-unsigned.go index e0acb95e45695..a316686788b4d 100644 --- a/cmd/streaming-v4-unsigned.go +++ b/cmd/streaming-v4-unsigned.go @@ -29,7 +29,12 @@ import ( // newUnsignedV4ChunkedReader returns a new s3UnsignedChunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. // The s3ChunkedReader returns io.EOF when the final 0-length chunk is read. -func newUnsignedV4ChunkedReader(req *http.Request, trailer bool) (io.ReadCloser, APIErrorCode) { +func newUnsignedV4ChunkedReader(req *http.Request, trailer bool, signature bool) (io.ReadCloser, APIErrorCode) { + if signature { + if errCode := doesSignatureMatch(unsignedPayloadTrailer, req, globalSite.Region(), serviceS3); errCode != ErrNone { + return nil, errCode + } + } if trailer { // Discard anything unsigned. req.Trailer = make(http.Header) diff --git a/cmd/sts-errors.go b/cmd/sts-errors.go index 085ce9c698e6c..c68b68f1db357 100644 --- a/cmd/sts-errors.go +++ b/cmd/sts-errors.go @@ -40,7 +40,7 @@ func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, errCode S } switch errCode { case ErrSTSInternalError, ErrSTSUpstreamError: - logger.LogIf(ctx, err, logger.ErrorKind) + stsLogIf(ctx, err, logger.ErrorKind) } encodedErrorResponse := encodeResponse(stsErrorResponse) writeResponse(w, stsErr.HTTPStatusCode, encodedErrorResponse, mimeXML) @@ -81,6 +81,7 @@ const ( ErrSTSMalformedPolicyDocument ErrSTSInsecureConnection ErrSTSInvalidClientCertificate + ErrSTSTooManyIntermediateCAs ErrSTSNotInitialized ErrSTSIAMNotInitialized ErrSTSUpstreamError @@ -145,6 +146,11 @@ var stsErrCodes = stsErrorCodeMap{ Description: "The provided client certificate is invalid. Retry with a different certificate.", HTTPStatusCode: http.StatusBadRequest, }, + ErrSTSTooManyIntermediateCAs: { + Code: "TooManyIntermediateCAs", + Description: "The provided client certificate contains too many intermediate CA certificates", + HTTPStatusCode: http.StatusBadRequest, + }, ErrSTSNotInitialized: { Code: "STSNotInitialized", Description: "STS API not initialized, please try again.", diff --git a/cmd/sts-handlers.go b/cmd/sts-handlers.go index 4b755f8ea8643..c4092bca3bcd2 100644 --- a/cmd/sts-handlers.go +++ b/cmd/sts-handlers.go @@ -22,9 +22,11 @@ import ( "context" "crypto/x509" "encoding/base64" + "encoding/json" "errors" "fmt" "net/http" + "net/url" "strconv" "strings" "time" @@ -36,8 +38,8 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/policy" + "github.com/minio/pkg/v3/wildcard" ) const ( @@ -53,6 +55,7 @@ const ( stsDurationSeconds = "DurationSeconds" stsLDAPUsername = "LDAPUsername" stsLDAPPassword = "LDAPPassword" + stsRevokeTokenType = "TokenRevokeType" // STS API action constants clientGrants = "AssumeRoleWithClientGrants" @@ -74,13 +77,61 @@ const ( parentClaim = "parent" // LDAP claim keys - ldapUser = "ldapUser" - ldapUserN = "ldapUsername" + ldapUser = "ldapUser" // this is a key name for a normalized DN value + ldapActualUser = "ldapActualUser" // this is a key name for the actual DN value + ldapUserN = "ldapUsername" // this is a key name for the short/login username + // Claim key-prefix for LDAP attributes + ldapAttribPrefix = "ldapAttrib_" // Role Claim key roleArnClaim = "roleArn" + + // STS revoke type claim key + tokenRevokeTypeClaim = "tokenRevokeType" + + // maximum supported STS session policy size + maxSTSSessionPolicySize = 2048 ) +type stsClaims map[string]any + +func (c stsClaims) populateSessionPolicy(form url.Values) error { + if len(form) == 0 { + return nil + } + + sessionPolicyStr := form.Get(stsPolicy) + if len(sessionPolicyStr) == 0 { + return nil + } + + sessionPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr))) + if err != nil { + return err + } + + // Version in policy must not be empty + if sessionPolicy.Version == "" { + return errors.New("Version cannot be empty expecting '2012-10-17'") + } + + policyBuf, err := json.Marshal(sessionPolicy) + if err != nil { + return err + } + + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html + // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html + // The plain text that you use for both inline and managed session + // policies shouldn't exceed maxSTSSessionPolicySize characters. + if len(policyBuf) > maxSTSSessionPolicySize { + return errSessionPolicyTooLarge + } + + c[policy.SessionPolicyName] = base64.StdEncoding.EncodeToString(policyBuf) + return nil +} + // stsAPIHandlers implements and provides http handlers for AWS STS API. type stsAPIHandlers struct{} @@ -155,12 +206,12 @@ func checkAssumeRoleAuth(ctx context.Context, r *http.Request) (auth.Credentials return auth.Credentials{}, ErrAccessDenied } - s3Err := isReqAuthenticated(ctx, r, globalSite.Region, serviceSTS) + s3Err := isReqAuthenticated(ctx, r, globalSite.Region(), serviceSTS) if s3Err != ErrNone { return auth.Credentials{}, s3Err } - user, _, s3Err := getReqAccessKeyV4(r, globalSite.Region, serviceSTS) + user, _, s3Err := getReqAccessKeyV4(r, globalSite.Region(), serviceSTS) if s3Err != ErrNone { return auth.Credentials{}, s3Err } @@ -194,11 +245,11 @@ func parseForm(r *http.Request) error { func getTokenSigningKey() (string, error) { secret := globalActiveCred.SecretKey if globalSiteReplicationSys.isEnabled() { - c, err := globalSiteReplicatorCred.Get(GlobalContext) + secretKey, err := globalSiteReplicatorCred.Get(GlobalContext) if err != nil { return "", err } - return c.SecretKey, nil + return secretKey, nil } return secret, nil } @@ -209,7 +260,7 @@ func getTokenSigningKey() (string, error) { func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRole") - claims := make(map[string]interface{}) + claims := stsClaims{} defer logger.AuditLog(ctx, w, r, claims) // Check auth here (otherwise r.Form will have unexpected values from @@ -242,33 +293,15 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { if apiErrCode != ErrNone { stsErr := apiToSTSError(apiErrCode) // Borrow the description error from the API error code - writeSTSErrorResponse(ctx, w, stsErr, fmt.Errorf(errorCodes[apiErrCode].Description)) + writeSTSErrorResponse(ctx, w, stsErr, errors.New(errorCodes[apiErrCode].Description)) return } - sessionPolicyStr := r.Form.Get(stsPolicy) - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html - // The plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. - if len(sessionPolicyStr) > 2048 { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, errSessionPolicyTooLarge) + if err := claims.populateSessionPolicy(r.Form); err != nil { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) return } - if len(sessionPolicyStr) > 0 { - sessionPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr))) - if err != nil { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) - return - } - - // Version in policy must not be empty - if sessionPolicy.Version == "" { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("Version cannot be empty expecting '2012-10-17'")) - return - } - } - duration, err := openid.GetDefaultExpiration(r.Form.Get(stsDurationSeconds)) if err != nil { writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) @@ -278,6 +311,11 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { claims[expClaim] = UTCNow().Add(duration).Unix() claims[parentClaim] = user.AccessKey + tokenRevokeType := r.Form.Get(stsRevokeTokenType) + if tokenRevokeType != "" { + claims[tokenRevokeTypeClaim] = tokenRevokeType + } + // Validate that user.AccessKey's policies can be retrieved - it may not // be in case the user is disabled. if _, err = globalIAMSys.PolicyDBGet(user.AccessKey, user.Groups...); err != nil { @@ -285,10 +323,6 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { return } - if len(sessionPolicyStr) > 0 { - claims[policy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr)) - } - secret, err := getTokenSigningKey() if err != nil { writeSTSErrorResponse(ctx, w, ErrSTSInternalError, err) @@ -314,7 +348,7 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { // Call hook for site replication. if cred.ParentUser != globalActiveCred.AccessKey { - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -339,7 +373,7 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleSSOCommon") - claims := make(map[string]interface{}) + claims := stsClaims{} defer logger.AuditLog(ctx, w, r, claims) // Parse the incoming form data. @@ -382,13 +416,26 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ // defined parameter to disambiguate the intended IDP in this STS request. roleArn := openid.DummyRoleARN roleArnStr := r.Form.Get(stsRoleArn) - if roleArnStr != "" { + isRolePolicyProvider := roleArnStr != "" + if isRolePolicyProvider { var err error roleArn, _, err = globalIAMSys.GetRolePolicy(roleArnStr) if err != nil { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, - fmt.Errorf("Error processing %s parameter: %v", stsRoleArn, err)) - return + // If there is no claim-based provider configured, then an + // unrecognized roleArn is an error + if strings.TrimSpace(iamPolicyClaimNameOpenID()) == "" { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, + fmt.Errorf("Error processing %s parameter: %v", stsRoleArn, err)) + return + } + // If there *is* a claim-based provider configured, then + // treat an unrecognized roleArn the same as no roleArn + // at all. This is to support clients like the AWS SDKs + // or CLI that will not allow an AssumeRoleWithWebIdentity + // call without a RoleARN parameter - for these cases the + // user can supply a dummy ARN, which Minio will ignore. + roleArn = openid.DummyRoleARN + isRolePolicyProvider = false } } @@ -417,7 +464,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ } var policyName string - if roleArnStr != "" && globalIAMSys.HasRolePolicy() { + if isRolePolicyProvider { // If roleArn is used, we set it as a claim, and use the // associated policy when credentials are used. claims[roleArnClaim] = roleArn.String() @@ -446,29 +493,14 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ claims[iamPolicyClaimNameOpenID()] = policyName } - sessionPolicyStr := r.Form.Get(stsPolicy) - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html - // The plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. - if len(sessionPolicyStr) > 2048 { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("Session policy should not exceed 2048 characters")) - return + tokenRevokeType := r.Form.Get(stsRevokeTokenType) + if tokenRevokeType != "" { + claims[tokenRevokeTypeClaim] = tokenRevokeType } - if len(sessionPolicyStr) > 0 { - sessionPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr))) - if err != nil { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) - return - } - - // Version in policy must not be empty - if sessionPolicy.Version == "" { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("Invalid session policy version")) - return - } - - claims[policy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr)) + if err := claims.populateSessionPolicy(r.Form); err != nil { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) + return } secret, err := getTokenSigningKey() @@ -526,6 +558,14 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ writeSTSErrorResponse(ctx, w, ErrSTSAccessDenied, err) return } + if newGlobalAuthZPluginFn() == nil { + // if authZ is not set - we expect the policies to be present. + if globalIAMSys.CurrentPolicies(p) == "" { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, + fmt.Errorf("None of the given policies (`%s`) are defined, credentials will not be generated", p)) + return + } + } } if !globalIAMSys.doesPolicyAllow(p, policy.Args{ @@ -547,7 +587,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithSSO(w http.ResponseWriter, r *http.Requ } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -609,7 +649,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithClientGrants(w http.ResponseWriter, r * func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleWithLDAPIdentity") - claims := make(map[string]interface{}) + claims := stsClaims{} defer logger.AuditLog(ctx, w, r, claims, stsLDAPPassword) // Parse the incoming form data. @@ -640,47 +680,35 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * return } - sessionPolicyStr := r.Form.Get(stsPolicy) - // https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html - // The plain text that you use for both inline and managed session - // policies shouldn't exceed 2048 characters. - if len(sessionPolicyStr) > 2048 { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("Session policy should not exceed 2048 characters")) + if err := claims.populateSessionPolicy(r.Form); err != nil { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) return } - if len(sessionPolicyStr) > 0 { - sessionPolicy, err := policy.ParseConfig(bytes.NewReader([]byte(sessionPolicyStr))) - if err != nil { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) - return - } - - // Version in policy must not be empty - if sessionPolicy.Version == "" { - writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("Version needs to be specified in session policy")) - return - } - } - if !globalIAMSys.Initialized() { writeSTSErrorResponse(ctx, w, ErrSTSIAMNotInitialized, errIAMNotInitialized) return } - ldapUserDN, groupDistNames, err := globalIAMSys.LDAPConfig.Bind(ldapUsername, ldapPassword) + lookupResult, groupDistNames, err := globalIAMSys.LDAPConfig.Bind(ldapUsername, ldapPassword) if err != nil { err = fmt.Errorf("LDAP server error: %w", err) writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) return } + ldapUserDN := lookupResult.NormDN + ldapActualUserDN := lookupResult.ActualDN // Check if this user or their groups have a policy applied. - ldapPolicies, _ := globalIAMSys.PolicyDBGet(ldapUserDN, groupDistNames...) + ldapPolicies, err := globalIAMSys.PolicyDBGet(ldapUserDN, groupDistNames...) + if err != nil { + writeSTSErrorResponse(ctx, w, ErrSTSInternalError, err) + return + } if len(ldapPolicies) == 0 && newGlobalAuthZPluginFn() == nil { writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, fmt.Errorf("expecting a policy to be set for user `%s` or one of their groups: `%s` - rejecting this request", - ldapUserDN, strings.Join(groupDistNames, "`,`"))) + ldapActualUserDN, strings.Join(groupDistNames, "`,`"))) return } @@ -692,10 +720,15 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * claims[expClaim] = UTCNow().Add(expiryDur).Unix() claims[ldapUser] = ldapUserDN + claims[ldapActualUser] = ldapActualUserDN claims[ldapUserN] = ldapUsername - - if len(sessionPolicyStr) > 0 { - claims[policy.SessionPolicyName] = base64.StdEncoding.EncodeToString([]byte(sessionPolicyStr)) + // Add lookup up LDAP attributes as claims. + for attrib, value := range lookupResult.Attributes { + claims[ldapAttribPrefix+attrib] = value + } + tokenRevokeType := r.Form.Get(stsRevokeTokenType) + if tokenRevokeType != "" { + claims[tokenRevokeTypeClaim] = tokenRevokeType } secret, err := getTokenSigningKey() @@ -728,7 +761,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: cred.AccessKey, @@ -758,7 +791,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithLDAPIdentity(w http.ResponseWriter, r * func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleWithCertificate") - claims := make(map[string]interface{}) + claims := make(map[string]any) defer logger.AuditLog(ctx, w, r, claims) if !globalIAMSys.Initialized() { @@ -786,12 +819,26 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h // policy mapping would be ambiguous. // However, we can filter all CA certificates and only check // whether they client has sent exactly one (non-CA) leaf certificate. - peerCertificates := make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates)) + const MaxIntermediateCAs = 10 + var ( + peerCertificates = make([]*x509.Certificate, 0, len(r.TLS.PeerCertificates)) + intermediates *x509.CertPool + numIntermediates int + ) for _, cert := range r.TLS.PeerCertificates { if cert.IsCA { - continue + numIntermediates++ + if numIntermediates > MaxIntermediateCAs { + writeSTSErrorResponse(ctx, w, ErrSTSTooManyIntermediateCAs, fmt.Errorf("client certificate contains more than %d intermediate CAs", MaxIntermediateCAs)) + return + } + if intermediates == nil { + intermediates = x509.NewCertPool() + } + intermediates.AddCert(cert) + } else { + peerCertificates = append(peerCertificates, cert) } - peerCertificates = append(peerCertificates, cert) } r.TLS.PeerCertificates = peerCertificates @@ -812,7 +859,8 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h KeyUsages: []x509.ExtKeyUsage{ x509.ExtKeyUsageClientAuth, }, - Roots: globalRootCAs, + Intermediates: intermediates, + Roots: globalRootCAs, }) if err != nil { writeSTSErrorResponse(ctx, w, ErrSTSInvalidClientCertificate, err) @@ -871,13 +919,18 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h } // Associate any service accounts to the certificate CN - parentUser := "tls:" + certificate.Subject.CommonName + parentUser := "tls" + getKeySeparator() + certificate.Subject.CommonName claims[expClaim] = UTCNow().Add(expiry).Unix() claims[subClaim] = certificate.Subject.CommonName claims[audClaim] = certificate.Subject.Organization claims[issClaim] = certificate.Issuer.CommonName claims[parentClaim] = parentUser + tokenRevokeType := r.Form.Get(stsRevokeTokenType) + if tokenRevokeType != "" { + claims[tokenRevokeTypeClaim] = tokenRevokeType + } + secretKey, err := getTokenSigningKey() if err != nil { writeSTSErrorResponse(ctx, w, ErrSTSInternalError, err) @@ -898,7 +951,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: tmpCredentials.AccessKey, @@ -924,8 +977,10 @@ func (sts *stsAPIHandlers) AssumeRoleWithCertificate(w http.ResponseWriter, r *h func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "AssumeRoleWithCustomToken") - claims := make(map[string]interface{}) - defer logger.AuditLog(ctx, w, r, claims) + claims := make(map[string]any) + + auditLogFilterKeys := []string{stsToken} + defer logger.AuditLog(ctx, w, r, claims, auditLogFilterKeys...) if !globalIAMSys.Initialized() { writeSTSErrorResponse(ctx, w, ErrSTSIAMNotInitialized, errIAMNotInitialized) @@ -969,6 +1024,20 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h return } + _, policyName, err := globalIAMSys.GetRolePolicy(roleArnStr) + if err != nil { + writeSTSErrorResponse(ctx, w, ErrSTSAccessDenied, err) + return + } + + if newGlobalAuthZPluginFn() == nil { // if authZ is not set - we expect the policyname to be present. + if globalIAMSys.CurrentPolicies(policyName) == "" { + writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, + fmt.Errorf("None of the given policies (`%s`) are defined, credentials will not be generated", policyName)) + return + } + } + res, err := authn.Authenticate(roleArn, token) if err != nil { writeSTSErrorResponse(ctx, w, ErrSTSInvalidParameterValue, err) @@ -994,13 +1063,17 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h expiry = requestedDuration } - parentUser := "custom:" + res.Success.User + parentUser := "custom" + getKeySeparator() + res.Success.User // metadata map claims[expClaim] = UTCNow().Add(time.Duration(expiry) * time.Second).Unix() claims[subClaim] = parentUser claims[roleArnClaim] = roleArn.String() claims[parentClaim] = parentUser + tokenRevokeType := r.Form.Get(stsRevokeTokenType) + if tokenRevokeType != "" { + claims[tokenRevokeTypeClaim] = tokenRevokeType + } // Add all other claims from the plugin **without** replacing any // existing claims. @@ -1028,7 +1101,7 @@ func (sts *stsAPIHandlers) AssumeRoleWithCustomToken(w http.ResponseWriter, r *h } // Call hook for site replication. - logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ + replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{ Type: madmin.SRIAMItemSTSAcc, STSCredential: &madmin.SRSTSCredential{ AccessKey: tmpCredentials.AccessKey, diff --git a/cmd/sts-handlers_test.go b/cmd/sts-handlers_test.go index b21f780d74364..a883999e9084e 100644 --- a/cmd/sts-handlers_test.go +++ b/cmd/sts-handlers_test.go @@ -18,17 +18,23 @@ package cmd import ( + "bytes" "context" "fmt" + "io" "os" + "reflect" + "slices" "strings" "testing" "time" + "github.com/klauspost/compress/zip" "github.com/minio/madmin-go/v3" - minio "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7" cr "github.com/minio/minio-go/v7/pkg/credentials" "github.com/minio/minio-go/v7/pkg/set" + "github.com/minio/pkg/v3/ldap" ) func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) { @@ -36,10 +42,13 @@ func runAllIAMSTSTests(suite *TestSuiteIAM, c *check) { // The STS for root test needs to be the first one after setup. suite.TestSTSForRoot(c) suite.TestSTS(c) + suite.TestSTSPrivilegeEscalationBug2_2025_10_15(c, true) + suite.TestSTSPrivilegeEscalationBug2_2025_10_15(c, false) suite.TestSTSWithDenyDeleteVersion(c) suite.TestSTSWithTags(c) suite.TestSTSServiceAccountsWithUsername(c) suite.TestSTSWithGroupPolicy(c) + suite.TestSTSTokenRevoke(c) suite.TearDownSuite(c) } @@ -110,9 +119,12 @@ func (s *TestSuiteIAM) TestSTSServiceAccountsWithUsername(c *check) { c.Fatalf("policy add error: %v", err) } - err = s.adm.SetPolicy(ctx, policy, "dillon", false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: "dillon", + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("Unable to attach policy: %v", err) } assumeRole := cr.STSAssumeRole{ @@ -180,7 +192,7 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -212,7 +224,7 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { } ] } -`, bucket, bucket)) +`, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { @@ -225,9 +237,12 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("Unable to attach policy: %v", err) } // confirm that the user is able to access the bucket @@ -263,6 +278,110 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) { c.mustNotDelete(ctx, minioClient, bucket, versions[0]) } +func (s *TestSuiteIAM) TestSTSPrivilegeEscalationBug2_2025_10_15(c *check, forRoot bool) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + for i := range 3 { + err := s.client.MakeBucket(ctx, fmt.Sprintf("bucket%d", i+1), minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + defer func(i int) { + _ = s.client.RemoveBucket(ctx, fmt.Sprintf("bucket%d", i+1)) + }(i) + } + + allow2BucketsPolicyBytes := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListBucket1AndBucket2", + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": ["arn:aws:s3:::bucket1", "arn:aws:s3:::bucket2"] + }, + { + "Sid": "ReadWriteBucket1AndBucket2Objects", + "Effect": "Allow", + "Action": [ + "s3:DeleteObject", + "s3:DeleteObjectVersion", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:PutObject" + ], + "Resource": ["arn:aws:s3:::bucket1/*", "arn:aws:s3:::bucket2/*"] + } + ] +}`) + + var value cr.Value + var err error + if forRoot { + assumeRole := cr.STSAssumeRole{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + Options: cr.STSAssumeRoleOptions{ + AccessKey: globalActiveCred.AccessKey, + SecretKey: globalActiveCred.SecretKey, + Policy: string(allow2BucketsPolicyBytes), + }, + } + value, err = assumeRole.Retrieve() + if err != nil { + c.Fatalf("err calling assumeRole: %v", err) + } + } else { + // Create a regular user and attach consoleAdmin policy + err := s.adm.AddUser(ctx, "foobar", "foobar123") + if err != nil { + c.Fatalf("could not create user") + } + + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{"consoleAdmin"}, + User: "foobar", + }) + if err != nil { + c.Fatalf("could not attach policy") + } + + assumeRole := cr.STSAssumeRole{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + Options: cr.STSAssumeRoleOptions{ + AccessKey: "foobar", + SecretKey: "foobar123", + Policy: string(allow2BucketsPolicyBytes), + }, + } + value, err = assumeRole.Retrieve() + if err != nil { + c.Fatalf("err calling assumeRole: %v", err) + } + } + restrictedClient := s.getUserClient(c, value.AccessKeyID, value.SecretAccessKey, value.SessionToken) + + buckets, err := restrictedClient.ListBuckets(ctx) + if err != nil { + c.Fatalf("err fetching buckets %s", err) + } + if len(buckets) != 2 || buckets[0].Name != "bucket1" || buckets[1].Name != "bucket2" { + c.Fatalf("restricted STS account should only have access to bucket1 and bucket2") + } + + // Try to escalate privileges + restrictedAdmClient := s.getAdminClient(c, value.AccessKeyID, value.SecretAccessKey, value.SessionToken) + _, err = restrictedAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ + AccessKey: "newroot", + SecretKey: "newroot123", + }) + if err == nil { + c.Fatalf("restricted STS account was able to create service account bypassing sub-policy!") + } +} + func (s *TestSuiteIAM) TestSTSWithTags(c *check) { ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) defer cancel() @@ -276,7 +395,7 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -314,7 +433,7 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) { } } ] -}`, bucket, bucket, bucket, bucket)) +}`, bucket, bucket, bucket, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -326,9 +445,12 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("Unable to attach policy: %v", err) } // confirm that the user is able to access the bucket @@ -387,7 +509,7 @@ func (s *TestSuiteIAM) TestSTS(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -402,7 +524,7 @@ func (s *TestSuiteIAM) TestSTS(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -414,9 +536,12 @@ func (s *TestSuiteIAM) TestSTS(c *check) { c.Fatalf("Unable to set user: %v", err) } - err = s.adm.SetPolicy(ctx, policy, accessKey, false) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("Unable to attach policy: %v", err) } // confirm that the user is able to access the bucket @@ -469,7 +594,7 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) { // Create policy, user and associate policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -484,7 +609,7 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -509,9 +634,12 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) { c.Fatalf("unable to add user to group: %v", err) } - err = s.adm.SetPolicy(ctx, policy, "test-group", true) + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + Group: "test-group", + }) if err != nil { - c.Fatalf("Unable to set policy: %v", err) + c.Fatalf("Unable to attach policy: %v", err) } // confirm that the user is able to access the bucket - permission comes @@ -621,7 +749,7 @@ func (s *TestSuiteIAM) TestSTSForRoot(c *check) { gotBuckets := set.NewStringSet() for _, b := range accInfo.Buckets { gotBuckets.Add(b.Name) - if !(b.Access.Read && b.Access.Write) { + if !b.Access.Read || !b.Access.Write { c.Fatalf("root user should have read and write access to bucket: %v", b.Name) } } @@ -636,6 +764,129 @@ func (s *TestSuiteIAM) TestSTSForRoot(c *check) { } } +// TestSTSTokenRevoke - tests the token revoke API +func (s *TestSuiteIAM) TestSTSTokenRevoke(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), 100*testDefaultTimeout) + defer cancel() + + bucket := getRandomBucketName() + err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + + // Create policy, user and associate policy + policy := "mypolicy" + policyBytes := fmt.Appendf(nil, `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::%s/*" + ] + } + ] +}`, bucket) + err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("policy add error: %v", err) + } + + accessKey, secretKey := mustGenerateCredentials(c) + err = s.adm.SetUser(ctx, accessKey, secretKey, madmin.AccountEnabled) + if err != nil { + c.Fatalf("Unable to set user: %v", err) + } + + _, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: accessKey, + }) + if err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + cases := []struct { + tokenType string + fullRevoke bool + selfRevoke bool + }{ + {"", true, false}, // Case 1 + {"", true, true}, // Case 2 + {"type-1", false, false}, // Case 3 + {"type-2", false, true}, // Case 4 + {"type-2", true, true}, // Case 5 - repeat type 2 to ensure previous revoke does not affect it. + } + + for i, tc := range cases { + // Create STS user. + assumeRole := cr.STSAssumeRole{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + Options: cr.STSAssumeRoleOptions{ + AccessKey: accessKey, + SecretKey: secretKey, + TokenRevokeType: tc.tokenType, + }, + } + + value, err := assumeRole.Retrieve() + if err != nil { + c.Fatalf("err calling assumeRole: %v", err) + } + + minioClient, err := minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } + + // Validate that the client from sts creds can access the bucket. + c.mustListObjects(ctx, minioClient, bucket) + + // Set up revocation + user := accessKey + tokenType := tc.tokenType + reqAdmClient := s.adm + if tc.fullRevoke { + tokenType = "" + } + if tc.selfRevoke { + user = "" + tokenType = "" + reqAdmClient, err = madmin.NewWithOptions(s.endpoint, &madmin.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + }) + if err != nil { + c.Fatalf("Err creating user admin client: %v", err) + } + reqAdmClient.SetCustomTransport(s.TestSuiteCommon.client.Transport) + } + + err = reqAdmClient.RevokeTokens(ctx, madmin.RevokeTokensReq{ + User: user, + TokenRevokeType: tokenType, + FullRevoke: tc.fullRevoke, + }) + if err != nil { + c.Fatalf("Case %d: unexpected error: %v", i+1, err) + } + + // Validate that the client cannot access the bucket after revocation. + c.mustNotListObjects(ctx, minioClient, bucket) + } +} + // SetUpLDAP - expects to setup an LDAP test server using the test LDAP // container and canned data from https://github.com/minio/minio-ldap-testing func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) { @@ -650,6 +901,7 @@ func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) { "lookup_bind_password=admin", "user_dn_search_base_dn=dc=min,dc=io", "user_dn_search_filter=(uid=%s)", + "user_dn_attributes=sshPublicKey", "group_search_base_dn=ou=swengg,dc=min,dc=io", "group_search_filter=(&(objectclass=groupofnames)(member=%d))", } @@ -661,6 +913,36 @@ func (s *TestSuiteIAM) SetUpLDAP(c *check, serverAddr string) { s.RestartIAMSuite(c) } +// SetUpLDAPWithNonNormalizedBaseDN - expects to setup an LDAP test server using +// the test LDAP container and canned data from +// https://github.com/minio/minio-ldap-testing +// +// Sets up non-normalized base DN configuration for testing. +func (s *TestSuiteIAM) SetUpLDAPWithNonNormalizedBaseDN(c *check, serverAddr string) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + configCmds := []string{ + "identity_ldap", + fmt.Sprintf("server_addr=%s", serverAddr), + "server_insecure=on", + "lookup_bind_dn=cn=admin,dc=min,dc=io", + "lookup_bind_password=admin", + // `DC` is intentionally capitalized here. + "user_dn_search_base_dn=DC=min,DC=io", + "user_dn_search_filter=(uid=%s)", + // `DC` is intentionally capitalized here. + "group_search_base_dn=ou=swengg,DC=min,dc=io", + "group_search_filter=(&(objectclass=groupofnames)(member=%d))", + } + _, err := s.adm.SetConfigKV(ctx, strings.Join(configCmds, " ")) + if err != nil { + c.Fatalf("unable to setup LDAP for tests: %v", err) + } + + s.RestartIAMSuite(c) +} + const ( EnvTestLDAPServer = "_MINIO_LDAP_TEST_SERVER" ) @@ -675,104 +957,836 @@ func TestIAMWithLDAPServerSuite(t *testing.T) { ldapServer := os.Getenv(EnvTestLDAPServer) if ldapServer == "" { - c.Skip("Skipping LDAP test as no LDAP server is provided.") + c.Skipf("Skipping LDAP test as no LDAP server is provided via %s", EnvTestLDAPServer) } suite.SetUpSuite(c) suite.SetUpLDAP(c, ldapServer) suite.TestLDAPSTS(c) + suite.TestLDAPPolicyEntitiesLookup(c) + suite.TestLDAPUnicodeVariations(c) suite.TestLDAPSTSServiceAccounts(c) suite.TestLDAPSTSServiceAccountsWithUsername(c) suite.TestLDAPSTSServiceAccountsWithGroups(c) + suite.TestLDAPAttributesLookup(c) + suite.TestLDAPCyrillicUser(c) + suite.TestLDAPSlashDN(c) suite.TearDownSuite(c) }, ) } } -func (s *TestSuiteIAM) TestLDAPSTS(c *check) { - ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) - defer cancel() - - bucket := getRandomBucketName() - err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{}) - if err != nil { - c.Fatalf("bucket create error: %v", err) - } +// This test is for a fix added to handle non-normalized base DN values in the +// LDAP configuration. It runs the existing LDAP sub-tests with a non-normalized +// LDAP configuration. +func TestIAMWithLDAPNonNormalizedBaseDNConfigServerSuite(t *testing.T) { + for i, testCase := range iamTestSuites { + t.Run( + fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription), + func(t *testing.T) { + c := &check{t, testCase.serverType} + suite := testCase - // Create policy - policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::%s/*" - ] - } - ] -}`, bucket)) - err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) - if err != nil { - c.Fatalf("policy add error: %v", err) - } + ldapServer := os.Getenv(EnvTestLDAPServer) + if ldapServer == "" { + c.Skipf("Skipping LDAP test as no LDAP server is provided via %s", EnvTestLDAPServer) + } - ldapID := cr.LDAPIdentity{ - Client: s.TestSuiteCommon.client, - STSEndpoint: s.endPoint, - LDAPUsername: "dillon", - LDAPPassword: "dillon", + suite.SetUpSuite(c) + suite.SetUpLDAPWithNonNormalizedBaseDN(c, ldapServer) + suite.TestLDAPSTS(c) + suite.TestLDAPPolicyEntitiesLookup(c) + suite.TestLDAPUnicodeVariations(c) + suite.TestLDAPSTSServiceAccounts(c) + suite.TestLDAPSTSServiceAccountsWithUsername(c) + suite.TestLDAPSTSServiceAccountsWithGroups(c) + suite.TestLDAPSlashDN(c) + suite.TearDownSuite(c) + }, + ) } +} - _, err = ldapID.Retrieve() - if err == nil { - c.Fatalf("Expected to fail to create STS cred with no associated policy!") - } +func TestIAMExportImportWithLDAP(t *testing.T) { + for i, testCase := range iamTestSuites { + t.Run( + fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription), + func(t *testing.T) { + c := &check{t, testCase.serverType} + suite := testCase - // Attempting to set a non-existent policy should fail. - userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" - err = s.adm.SetPolicy(ctx, policy+"x", userDN, false) - if err == nil { - c.Fatalf("should not be able to set non-existent policy") - } + ldapServer := os.Getenv(EnvTestLDAPServer) + if ldapServer == "" { + c.Skipf("Skipping LDAP test as no LDAP server is provided via %s", EnvTestLDAPServer) + } - err = s.adm.SetPolicy(ctx, policy, userDN, false) - if err != nil { - c.Fatalf("Unable to set policy: %v", err) - } + iamTestContentCases := []iamTestContent{ + { + policies: map[string][]byte{ + "mypolicy": []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:GetObject","s3:ListBucket","s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/*"]}]}`), + }, + ldapUserPolicyMappings: map[string][]string{ + "uid=dillon,ou=people,ou=swengg,dc=min,dc=io": {"mypolicy"}, + "uid=liza,ou=people,ou=swengg,dc=min,dc=io": {"consoleAdmin"}, + }, + ldapGroupPolicyMappings: map[string][]string{ + "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io": {"mypolicy"}, + "cn=projecta,ou=groups,ou=swengg,dc=min,dc=io": {"consoleAdmin"}, + }, + }, + } - value, err := ldapID.Retrieve() - if err != nil { - c.Fatalf("Expected to generate STS creds, got err: %#v", err) + for caseNum, content := range iamTestContentCases { + suite.SetUpSuite(c) + suite.SetUpLDAP(c, ldapServer) + exportedContent := suite.TestIAMExport(c, caseNum, content) + suite.TearDownSuite(c) + suite.SetUpSuite(c) + suite.SetUpLDAP(c, ldapServer) + suite.TestIAMImport(c, exportedContent, caseNum, content) + suite.TearDownSuite(c) + } + }, + ) } +} - minioClient, err := minio.New(s.endpoint, &minio.Options{ - Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), - Secure: s.secure, - Transport: s.TestSuiteCommon.client.Transport, - }) - if err != nil { - c.Fatalf("Error initializing client: %v", err) - } +func TestIAMImportAssetWithLDAP(t *testing.T) { + ctx, cancel := context.WithTimeout(t.Context(), testDefaultTimeout) + defer cancel() - // Validate that user listing does not return any entries - usersList, err := s.adm.ListUsers(ctx) - if err != nil { - c.Fatalf("list users should not fail: %v", err) - } - if len(usersList) != 1 { - c.Fatalf("expected user listing output: %v", usersList) - } - uinfo := usersList[userDN] - if uinfo.PolicyName != policy || uinfo.Status != madmin.AccountEnabled { - c.Fatalf("expected user listing content: %v", uinfo) - } + exportContentStrings := map[string]string{ + allPoliciesFile: `{"consoleAdmin":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:*"]},{"Effect":"Allow","Action":["kms:*"]},{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]},"diagnostics":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["admin:Prometheus","admin:Profiling","admin:ServerTrace","admin:ConsoleLog","admin:ServerInfo","admin:TopLocksInfo","admin:OBDInfo","admin:BandwidthMonitor"],"Resource":["arn:aws:s3:::*"]}]},"readonly":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:GetBucketLocation","s3:GetObject"],"Resource":["arn:aws:s3:::*"]}]},"readwrite":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:*"],"Resource":["arn:aws:s3:::*"]}]},"writeonly":{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Action":["s3:PutObject"],"Resource":["arn:aws:s3:::*"]}]}}`, + + // Built-in user should be imported without errors even if LDAP is + // enabled. + allUsersFile: `{ + "foo": { + "secretKey": "foobar123", + "status": "enabled" + } +} +`, + // Built-in groups should be imported without errors even if LDAP is + // enabled. + allGroupsFile: `{ + "mygroup": { + "version": 1, + "status": "enabled", + "members": [ + "foo" + ], + "updatedAt": "2024-04-23T21:34:43.587429659Z" + } +} +`, + // The `cn=projecty,..` group below is not under a configured DN, but we + // should still import without an error. + allSvcAcctsFile: `{ + "u4ccRswj62HV3Ifwima7": { + "parent": "uid=svc.algorithm,OU=swengg,DC=min,DC=io", + "accessKey": "u4ccRswj62HV3Ifwima7", + "secretKey": "ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge", + "groups": ["cn=project.c,ou=groups,OU=swengg,DC=min,DC=io", "cn=projecty,ou=groups,ou=hwengg,dc=min,dc=io"], + "claims": { + "accessKey": "u4ccRswj62HV3Ifwima7", + "ldapUser": "uid=svc.algorithm,ou=swengg,dc=min,dc=io", + "ldapUsername": "svc.algorithm", + "parent": "uid=svc.algorithm,ou=swengg,dc=min,dc=io", + "sa-policy": "inherited-policy" + }, + "sessionPolicy": null, + "status": "on", + "name": "", + "description": "" + } +} +`, + // Built-in user-to-policies mapping should be imported without errors + // even if LDAP is enabled. + userPolicyMappingsFile: `{ + "foo": { + "version": 0, + "policy": "readwrite", + "updatedAt": "2024-04-23T21:34:43.815519816Z" + } +} +`, + // Contains: + // + // 1. duplicate mapping with same policy, we should not error out; + // + // 2. non-LDAP group mapping, we should not error out; + groupPolicyMappingsFile: `{ + "cn=project.c,ou=groups,ou=swengg,DC=min,dc=io": { + "version": 0, + "policy": "consoleAdmin", + "updatedAt": "2024-04-17T23:54:28.442998301Z" + }, + "mygroup": { + "version": 0, + "policy": "consoleAdmin", + "updatedAt": "2024-04-23T21:34:43.66922872Z" + }, + "cn=project.c,ou=groups,OU=swengg,DC=min,DC=io": { + "version": 0, + "policy": "consoleAdmin", + "updatedAt": "2024-04-17T20:54:28.442998301Z" + } +} +`, + stsUserPolicyMappingsFile: `{ + "uid=dillon,ou=people,OU=swengg,DC=min,DC=io": { + "version": 0, + "policy": "consoleAdmin", + "updatedAt": "2024-04-17T23:54:10.606645642Z" + } +} +`, + } + exportContent := map[string][]byte{} + for k, v := range exportContentStrings { + exportContent[k] = []byte(v) + } + + var importContent []byte + { + var b bytes.Buffer + zipWriter := zip.NewWriter(&b) + rawDataFn := func(r io.Reader, filename string, sz int) error { + header, zerr := zip.FileInfoHeader(dummyFileInfo{ + name: filename, + size: int64(sz), + mode: 0o600, + modTime: time.Now(), + isDir: false, + sys: nil, + }) + if zerr != nil { + adminLogIf(ctx, zerr) + return nil + } + header.Method = zip.Deflate + zwriter, zerr := zipWriter.CreateHeader(header) + if zerr != nil { + adminLogIf(ctx, zerr) + return nil + } + if _, err := io.Copy(zwriter, r); err != nil { + adminLogIf(ctx, err) + } + return nil + } + for _, f := range iamExportFiles { + iamFile := pathJoin(iamAssetsDir, f) + + fileContent, ok := exportContent[f] + if !ok { + t.Fatalf("missing content for %s", f) + } + + if err := rawDataFn(bytes.NewReader(fileContent), iamFile, len(fileContent)); err != nil { + t.Fatalf("failed to write %s: %v", iamFile, err) + } + } + zipWriter.Close() + importContent = b.Bytes() + } + + for i, testCase := range iamTestSuites { + t.Run( + fmt.Sprintf("Test: %d, ServerType: %s", i+1, testCase.ServerTypeDescription), + func(t *testing.T) { + c := &check{t, testCase.serverType} + suite := testCase + + ldapServer := os.Getenv(EnvTestLDAPServer) + if ldapServer == "" { + c.Skipf("Skipping LDAP test as no LDAP server is provided via %s", EnvTestLDAPServer) + } + + suite.SetUpSuite(c) + suite.SetUpLDAP(c, ldapServer) + suite.TestIAMImportAssetContent(c, importContent) + suite.TearDownSuite(c) + }, + ) + } +} + +type iamTestContent struct { + policies map[string][]byte + ldapUserPolicyMappings map[string][]string + ldapGroupPolicyMappings map[string][]string +} + +func (s *TestSuiteIAM) TestIAMExport(c *check, caseNum int, content iamTestContent) []byte { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + for policy, policyBytes := range content.policies { + err := s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("export %d: policy add error: %v", caseNum, err) + } + } + + for userDN, policies := range content.ldapUserPolicyMappings { + // No need to detach, we are starting from a clean slate after exporting. + _, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{ + Policies: policies, + User: userDN, + }) + if err != nil { + c.Fatalf("export %d: Unable to attach policy: %v", caseNum, err) + } + } + + for groupDN, policies := range content.ldapGroupPolicyMappings { + _, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{ + Policies: policies, + Group: groupDN, + }) + if err != nil { + c.Fatalf("export %d: Unable to attach group policy: %v", caseNum, err) + } + } + + contentReader, err := s.adm.ExportIAM(ctx) + if err != nil { + c.Fatalf("export %d: Unable to export IAM: %v", caseNum, err) + } + defer contentReader.Close() + + expContent, err := io.ReadAll(contentReader) + if err != nil { + c.Fatalf("export %d: Unable to read exported content: %v", caseNum, err) + } + + return expContent +} + +type dummyCloser struct { + io.Reader +} + +func (d dummyCloser) Close() error { return nil } + +func (s *TestSuiteIAM) TestIAMImportAssetContent(c *check, content []byte) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + dummyCloser := dummyCloser{bytes.NewReader(content)} + err := s.adm.ImportIAM(ctx, dummyCloser) + if err != nil { + c.Fatalf("Unable to import IAM: %v", err) + } + + entRes, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{}) + if err != nil { + c.Fatalf("Unable to get policy entities: %v", err) + } + + expected := madmin.PolicyEntitiesResult{ + PolicyMappings: []madmin.PolicyEntities{ + { + Policy: "consoleAdmin", + Users: []string{"uid=dillon,ou=people,ou=swengg,dc=min,dc=io"}, + Groups: []string{"cn=project.c,ou=groups,ou=swengg,dc=min,dc=io"}, + }, + }, + } + + entRes.Timestamp = time.Time{} + if !reflect.DeepEqual(expected, entRes) { + c.Fatalf("policy entities mismatch: expected: %v, got: %v", expected, entRes) + } + + dn := "uid=svc.algorithm,ou=swengg,dc=min,dc=io" + res, err := s.adm.ListAccessKeysLDAP(ctx, dn, "") + if err != nil { + c.Fatalf("Unable to list access keys: %v", err) + } + + epochTime := time.Unix(0, 0).UTC() + expectedAccKeys := madmin.ListAccessKeysLDAPResp{ + ServiceAccounts: []madmin.ServiceAccountInfo{ + { + AccessKey: "u4ccRswj62HV3Ifwima7", + Expiration: &epochTime, + }, + }, + } + + if !reflect.DeepEqual(expectedAccKeys, res) { + c.Fatalf("access keys mismatch: expected: %v, got: %v", expectedAccKeys, res) + } + + accKeyInfo, err := s.adm.InfoServiceAccount(ctx, "u4ccRswj62HV3Ifwima7") + if err != nil { + c.Fatalf("Unable to get service account info: %v", err) + } + if accKeyInfo.ParentUser != "uid=svc.algorithm,ou=swengg,dc=min,dc=io" { + c.Fatalf("parent mismatch: expected: %s, got: %s", "uid=svc.algorithm,ou=swengg,dc=min,dc=io", accKeyInfo.ParentUser) + } +} + +func (s *TestSuiteIAM) TestIAMImport(c *check, exportedContent []byte, caseNum int, content iamTestContent) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + dummyCloser := dummyCloser{bytes.NewReader(exportedContent)} + err := s.adm.ImportIAM(ctx, dummyCloser) + if err != nil { + c.Fatalf("import %d: Unable to import IAM: %v", caseNum, err) + } + + gotContent := iamTestContent{ + policies: make(map[string][]byte), + ldapUserPolicyMappings: make(map[string][]string), + ldapGroupPolicyMappings: make(map[string][]string), + } + policyContentMap, err := s.adm.ListCannedPolicies(ctx) + if err != nil { + c.Fatalf("import %d: Unable to list policies: %v", caseNum, err) + } + defaultCannedPolicies := set.CreateStringSet("consoleAdmin", "readwrite", "readonly", + "diagnostics", "writeonly") + for policy, policyBytes := range policyContentMap { + if defaultCannedPolicies.Contains(policy) { + continue + } + gotContent.policies[policy] = policyBytes + } + + policyQueryRes, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{}) + if err != nil { + c.Fatalf("import %d: Unable to get policy entities: %v", caseNum, err) + } + + for _, entity := range policyQueryRes.PolicyMappings { + m := gotContent.ldapUserPolicyMappings + for _, user := range entity.Users { + m[user] = append(m[user], entity.Policy) + } + m = gotContent.ldapGroupPolicyMappings + for _, group := range entity.Groups { + m[group] = append(m[group], entity.Policy) + } + } + + { + // We don't compare the values of the canned policies because server is + // re-encoding them. (FIXME?) + for k := range content.policies { + content.policies[k] = nil + gotContent.policies[k] = nil + } + if !reflect.DeepEqual(content.policies, gotContent.policies) { + c.Fatalf("import %d: policies mismatch: expected: %v, got: %v", caseNum, content.policies, gotContent.policies) + } + } + + if !reflect.DeepEqual(content.ldapUserPolicyMappings, gotContent.ldapUserPolicyMappings) { + c.Fatalf("import %d: user policy mappings mismatch: expected: %v, got: %v", caseNum, content.ldapUserPolicyMappings, gotContent.ldapUserPolicyMappings) + } + + if !reflect.DeepEqual(content.ldapGroupPolicyMappings, gotContent.ldapGroupPolicyMappings) { + c.Fatalf("import %d: group policy mappings mismatch: expected: %v, got: %v", caseNum, content.ldapGroupPolicyMappings, gotContent.ldapGroupPolicyMappings) + } +} + +func (s *TestSuiteIAM) TestLDAPSTS(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + bucket := getRandomBucketName() + err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + + // Create policy + policy := "mypolicy" + policyBytes := fmt.Appendf(nil, `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::%s/*" + ] + } + ] +}`, bucket) + err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("policy add error: %v", err) + } + + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: "dillon", + LDAPPassword: "dillon", + } + + _, err = ldapID.Retrieve() + if err == nil { + c.Fatalf("Expected to fail to create STS cred with no associated policy!") + } + + // Attempting to set a non-existent policy should fail. + userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" + _, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{ + Policies: []string{policy + "x"}, + User: userDN, + }) + if err == nil { + c.Fatalf("should not be able to attach non-existent policy") + } + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDN, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + minioClient, err := minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } + + // Validate that user listing does not return any entries + usersList, err := s.adm.ListUsers(ctx) + if err != nil { + c.Fatalf("list users should not fail: %v", err) + } + if len(usersList) != 1 { + c.Fatalf("expected user listing output: %v", usersList) + } + uinfo := usersList[userDN] + if uinfo.PolicyName != policy || uinfo.Status != madmin.AccountEnabled { + c.Fatalf("expected user listing content: %v", uinfo) + } + + // Validate that the client from sts creds can access the bucket. + c.mustListObjects(ctx, minioClient, bucket) + + // Validate that the client cannot remove any objects + err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{}) + if err.Error() != "Access Denied." { + c.Fatalf("unexpected non-access-denied err: %v", err) + } + + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } + + _, err = ldapID.Retrieve() + if err == nil { + c.Fatalf("Expected to fail to create a user with no associated policy!") + } + + // Set policy via group and validate policy assignment. + groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io" + groupReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + Group: groupDN, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to attach group policy: %v", err) + } + + value, err = ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + minioClient, err = minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } + + // Validate that the client from sts creds can access the bucket. + c.mustListObjects(ctx, minioClient, bucket) + + // Validate that the client cannot remove any objects + err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{}) + c.Assert(err.Error(), "Access Denied.") + + if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to detach group policy: %v", err) + } +} + +func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + bucket := getRandomBucketName() + err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + + // Create policy + policy := "mypolicy" + policyBytes := fmt.Appendf(nil, `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::%s/*" + ] + } + ] +}`, bucket) + err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("policy add error: %v", err) + } + + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: "svc.algorithm", + LDAPPassword: "example", + } + + _, err = ldapID.Retrieve() + if err == nil { + c.Fatalf("Expected to fail to create STS cred with no associated policy!") + } + + mustNormalizeDN := func(dn string) string { + normalizedDN, err := ldap.NormalizeDN(dn) + if err != nil { + c.Fatalf("normalize err: %v", err) + } + return normalizedDN + } + + actualUserDN := mustNormalizeDN("uid=svc.algorithm,OU=swengg,DC=min,DC=io") + + // \uFE52 is the unicode dot SMALL FULL STOP used below: + userDNWithUnicodeDot := "uid=svc﹒algorithm,OU=swengg,DC=min,DC=io" + + if err = s.adm.SetPolicy(ctx, policy, userDNWithUnicodeDot, false); err != nil { + c.Fatalf("Unable to set policy: %v", err) + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + usersList, err := s.adm.ListUsers(ctx) + if err != nil { + c.Fatalf("list users should not fail: %v", err) + } + if len(usersList) != 1 { + c.Fatalf("expected user listing output: %#v", usersList) + } + uinfo := usersList[actualUserDN] + if uinfo.PolicyName != policy || uinfo.Status != madmin.AccountEnabled { + c.Fatalf("expected user listing content: %v", uinfo) + } + + minioClient, err := minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } + + // Validate that the client from sts creds can access the bucket. + c.mustListObjects(ctx, minioClient, bucket) + + // Validate that the client cannot remove any objects + err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{}) + if err.Error() != "Access Denied." { + c.Fatalf("unexpected non-access-denied err: %v", err) + } + + // Remove the policy assignment on the user DN: + if err = s.adm.SetPolicy(ctx, "", userDNWithUnicodeDot, false); err != nil { + c.Fatalf("Unable to remove policy setting: %v", err) + } + + _, err = ldapID.Retrieve() + if err == nil { + c.Fatalf("Expected to fail to create a user with no associated policy!") + } + + // Set policy via group and validate policy assignment. + actualGroupDN := mustNormalizeDN("cn=project.c,ou=groups,ou=swengg,dc=min,dc=io") + groupDNWithUnicodeDot := "cn=project﹒c,ou=groups,ou=swengg,dc=min,dc=io" + if err = s.adm.SetPolicy(ctx, policy, groupDNWithUnicodeDot, true); err != nil { + c.Fatalf("Unable to attach group policy: %v", err) + } + + value, err = ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + policyResult, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{ + Policy: []string{policy}, + }) + if err != nil { + c.Fatalf("GetLDAPPolicyEntities should not fail: %v", err) + } + { + // Check that the mapping we created exists. + idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool { + return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN) + }) + if idx < 0 { + c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult) + } + } + + minioClient, err = minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } + + // Validate that the client from sts creds can access the bucket. + c.mustListObjects(ctx, minioClient, bucket) + + // Validate that the client cannot remove any objects + err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{}) + c.Assert(err.Error(), "Access Denied.") +} + +func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + bucket := getRandomBucketName() + err := s.client.MakeBucket(ctx, bucket, minio.MakeBucketOptions{}) + if err != nil { + c.Fatalf("bucket create error: %v", err) + } + + // Create policy + policy := "mypolicy" + policyBytes := fmt.Appendf(nil, `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::%s/*" + ] + } + ] +}`, bucket) + err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) + if err != nil { + c.Fatalf("policy add error: %v", err) + } + + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: "svc.algorithm", + LDAPPassword: "example", + } + + _, err = ldapID.Retrieve() + if err == nil { + c.Fatalf("Expected to fail to create STS cred with no associated policy!") + } + + mustNormalizeDN := func(dn string) string { + normalizedDN, err := ldap.NormalizeDN(dn) + if err != nil { + c.Fatalf("normalize err: %v", err) + } + return normalizedDN + } + + actualUserDN := mustNormalizeDN("uid=svc.algorithm,OU=swengg,DC=min,DC=io") + + // \uFE52 is the unicode dot SMALL FULL STOP used below: + userDNWithUnicodeDot := "uid=svc﹒algorithm,OU=swengg,DC=min,DC=io" + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDNWithUnicodeDot, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + usersList, err := s.adm.ListUsers(ctx) + if err != nil { + c.Fatalf("list users should not fail: %v", err) + } + if len(usersList) != 1 { + c.Fatalf("expected user listing output: %#v", usersList) + } + uinfo := usersList[actualUserDN] + if uinfo.PolicyName != policy || uinfo.Status != madmin.AccountEnabled { + c.Fatalf("expected user listing content: %v", uinfo) + } + + minioClient, err := minio.New(s.endpoint, &minio.Options{ + Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), + Secure: s.secure, + Transport: s.TestSuiteCommon.client.Transport, + }) + if err != nil { + c.Fatalf("Error initializing client: %v", err) + } // Validate that the client from sts creds can access the bucket. c.mustListObjects(ctx, minioClient, bucket) @@ -784,9 +1798,9 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { } // Remove the policy assignment on the user DN: - err = s.adm.SetPolicy(ctx, "", userDN, false) - if err != nil { - c.Fatalf("Unable to remove policy setting: %v", err) + + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) } _, err = ldapID.Retrieve() @@ -795,10 +1809,15 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { } // Set policy via group and validate policy assignment. - groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io" - err = s.adm.SetPolicy(ctx, policy, groupDN, true) - if err != nil { - c.Fatalf("Unable to set group policy: %v", err) + actualGroupDN := mustNormalizeDN("cn=project.c,ou=groups,ou=swengg,dc=min,dc=io") + groupDNWithUnicodeDot := "cn=project﹒c,ou=groups,ou=swengg,dc=min,dc=io" + groupReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + Group: groupDNWithUnicodeDot, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to attach group policy: %v", err) } value, err = ldapID.Retrieve() @@ -806,6 +1825,22 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { c.Fatalf("Expected to generate STS creds, got err: %#v", err) } + policyResult, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{ + Policy: []string{policy}, + }) + if err != nil { + c.Fatalf("GetLDAPPolicyEntities should not fail: %v", err) + } + { + // Check that the mapping we created exists. + idx := slices.IndexFunc(policyResult.PolicyMappings, func(e madmin.PolicyEntities) bool { + return e.Policy == policy && slices.Contains(e.Groups, actualGroupDN) + }) + if idx < 0 { + c.Fatalf("expected groupDN (%s) to be present in mapping list: %#v", actualGroupDN, policyResult) + } + } + minioClient, err = minio.New(s.endpoint, &minio.Options{ Creds: cr.NewStaticV4(value.AccessKeyID, value.SecretAccessKey, value.SessionToken), Secure: s.secure, @@ -821,6 +1856,10 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) { // Validate that the client cannot remove any objects err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{}) c.Assert(err.Error(), "Access Denied.") + + if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to detach group policy: %v", err) + } } func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { @@ -835,7 +1874,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -850,16 +1889,20 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) } userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" - err = s.adm.SetPolicy(ctx, policy, userDN, false) - if err != nil { - c.Fatalf("Unable to set policy: %v", err) + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDN, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) } ldapID := cr.LDAPIdentity{ @@ -914,6 +1957,11 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) { // 6. Check that service account cannot be created for some other user. c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient) + + // Detach the policy from the user + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } } func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) { @@ -934,12 +1982,12 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) { { "Effect": "Allow", "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket" + "s3:PutObject", + "s3:GetObject", + "s3:ListBucket" ], "Resource": [ - "arn:aws:s3:::${ldap:username}/*" + "arn:aws:s3:::${ldap:username}/*" ] } ] @@ -950,9 +1998,14 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) { } userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" - err = s.adm.SetPolicy(ctx, policy, userDN, false) - if err != nil { - c.Fatalf("Unable to set policy: %v", err) + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: userDN, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) } ldapID := cr.LDAPIdentity{ @@ -1003,6 +2056,10 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) { // 3. Check S3 access for download c.mustDownload(ctx, svcClient, bucket) + + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } } // In this test, the parent users gets their permissions from a group, rather @@ -1019,7 +2076,7 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) { // Create policy policy := "mypolicy" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1034,16 +2091,20 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) } groupDN := "cn=projecta,ou=groups,ou=swengg,dc=min,dc=io" - err = s.adm.SetPolicy(ctx, policy, groupDN, true) - if err != nil { - c.Fatalf("Unable to set policy: %v", err) + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + Group: groupDN, + } + + if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) } ldapID := cr.LDAPIdentity{ @@ -1098,6 +2159,342 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) { // 6. Check that service account cannot be created for some other user. c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient) + + // Detach the user policy + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } +} + +func (s *TestSuiteIAM) TestLDAPCyrillicUser(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + userReq := madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + User: "uid=Пользователь,ou=people,ou=swengg,dc=min,dc=io", + } + + if _, err := s.adm.AttachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) + } + + cases := []struct { + username string + dn string + }{ + { + username: "Пользователь", + dn: "uid=Пользователь,ou=people,ou=swengg,dc=min,dc=io", + }, + } + + conn, err := globalIAMSys.LDAPConfig.LDAP.Connect() + if err != nil { + c.Fatalf("LDAP connect failed: %v", err) + } + defer conn.Close() + + for i, testCase := range cases { + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: testCase.username, + LDAPPassword: "example", + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + // Retrieve the STS account's credential object. + u, ok := globalIAMSys.GetUser(ctx, value.AccessKeyID) + if !ok { + c.Fatalf("Expected to find user %s", value.AccessKeyID) + } + + if u.Credentials.AccessKey != value.AccessKeyID { + c.Fatalf("Expected access key %s, got %s", value.AccessKeyID, u.Credentials.AccessKey) + } + + // Retrieve the credential's claims. + secret, err := getTokenSigningKey() + if err != nil { + c.Fatalf("Error getting token signing key: %v", err) + } + claims, err := getClaimsFromTokenWithSecret(value.SessionToken, secret) + if err != nil { + c.Fatalf("Error getting claims from token: %v", err) + } + + // Validate claims. + dnClaim := claims.MapClaims[ldapActualUser].(string) + if dnClaim != testCase.dn { + c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim) + } + } + + if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } +} + +func (s *TestSuiteIAM) TestLDAPSlashDN(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + policyReq := madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + } + + cases := []struct { + username string + dn string + group string + }{ + { + username: "slashuser", + dn: "uid=slash/user,ou=people,ou=swengg,dc=min,dc=io", + }, + { + username: "dillon", + dn: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io", + group: "cn=project/d,ou=groups,ou=swengg,dc=min,dc=io", + }, + } + + conn, err := globalIAMSys.LDAPConfig.LDAP.Connect() + if err != nil { + c.Fatalf("LDAP connect failed: %v", err) + } + defer conn.Close() + + for i, testCase := range cases { + if testCase.group != "" { + policyReq.Group = testCase.group + policyReq.User = "" + } else { + policyReq.User = testCase.dn + policyReq.Group = "" + } + + if _, err := s.adm.AttachPolicyLDAP(ctx, policyReq); err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: testCase.username, + LDAPPassword: testCase.username, + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + // Retrieve the STS account's credential object. + u, ok := globalIAMSys.GetUser(ctx, value.AccessKeyID) + if !ok { + c.Fatalf("Expected to find user %s", value.AccessKeyID) + } + + if u.Credentials.AccessKey != value.AccessKeyID { + c.Fatalf("Expected access key %s, got %s", value.AccessKeyID, u.Credentials.AccessKey) + } + + // Retrieve the credential's claims. + secret, err := getTokenSigningKey() + if err != nil { + c.Fatalf("Error getting token signing key: %v", err) + } + claims, err := getClaimsFromTokenWithSecret(value.SessionToken, secret) + if err != nil { + c.Fatalf("Error getting claims from token: %v", err) + } + + // Validate claims. + dnClaim := claims.MapClaims[ldapActualUser].(string) + if dnClaim != testCase.dn { + c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim) + } + + if _, err = s.adm.DetachPolicyLDAP(ctx, policyReq); err != nil { + c.Fatalf("Unable to detach user policy: %v", err) + } + } +} + +func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io" + groupReq := madmin.PolicyAssociationReq{ + Policies: []string{"readwrite"}, + Group: groupDN, + } + + if _, err := s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to attach user policy: %v", err) + } + + cases := []struct { + username string + dn string + expectedSSHKeyType string + }{ + { + username: "dillon", + dn: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io", + expectedSSHKeyType: "ssh-ed25519", + }, + { + username: "liza", + dn: "uid=liza,ou=people,ou=swengg,dc=min,dc=io", + expectedSSHKeyType: "ssh-rsa", + }, + } + + conn, err := globalIAMSys.LDAPConfig.LDAP.Connect() + if err != nil { + c.Fatalf("LDAP connect failed: %v", err) + } + defer conn.Close() + + for i, testCase := range cases { + ldapID := cr.LDAPIdentity{ + Client: s.TestSuiteCommon.client, + STSEndpoint: s.endPoint, + LDAPUsername: testCase.username, + LDAPPassword: testCase.username, + } + + value, err := ldapID.Retrieve() + if err != nil { + c.Fatalf("Expected to generate STS creds, got err: %#v", err) + } + + // Retrieve the STS account's credential object. + u, ok := globalIAMSys.GetUser(ctx, value.AccessKeyID) + if !ok { + c.Fatalf("Expected to find user %s", value.AccessKeyID) + } + + if u.Credentials.AccessKey != value.AccessKeyID { + c.Fatalf("Expected access key %s, got %s", value.AccessKeyID, u.Credentials.AccessKey) + } + + // Retrieve the credential's claims. + secret, err := getTokenSigningKey() + if err != nil { + c.Fatalf("Error getting token signing key: %v", err) + } + claims, err := getClaimsFromTokenWithSecret(value.SessionToken, secret) + if err != nil { + c.Fatalf("Error getting claims from token: %v", err) + } + + // Validate claims. Check if the sshPublicKey claim is present. + dnClaim := claims.MapClaims[ldapActualUser].(string) + if dnClaim != testCase.dn { + c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim) + } + sshPublicKeyClaim := claims.MapClaims[ldapAttribPrefix+"sshPublicKey"].([]any)[0].(string) + if sshPublicKeyClaim == "" { + c.Fatalf("Test %d: expected sshPublicKey claim to be present", i+1) + } + parts := strings.Split(sshPublicKeyClaim, " ") + if parts[0] != testCase.expectedSSHKeyType { + c.Fatalf("Test %d: unexpected sshPublicKey type: %s", i+1, parts[0]) + } + } + + if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil { + c.Fatalf("Unable to detach group policy: %v", err) + } +} + +func (s *TestSuiteIAM) TestLDAPPolicyEntitiesLookup(c *check) { + ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout) + defer cancel() + + groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io" + groupPolicy := "readwrite" + groupReq := madmin.PolicyAssociationReq{ + Policies: []string{groupPolicy}, + Group: groupDN, + } + _, err := s.adm.AttachPolicyLDAP(ctx, groupReq) + if err != nil { + c.Fatalf("Unable to attach group policy: %v", err) + } + type caseTemplate struct { + inDN string + expectedOutDN string + expectedGroupDN string + expectedGroupPolicy string + } + cases := []caseTemplate{ + { + inDN: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io", + expectedOutDN: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io", + expectedGroupDN: groupDN, + expectedGroupPolicy: groupPolicy, + }, + } + + policy := "readonly" + for _, testCase := range cases { + userReq := madmin.PolicyAssociationReq{ + Policies: []string{policy}, + User: testCase.inDN, + } + _, err := s.adm.AttachPolicyLDAP(ctx, userReq) + if err != nil { + c.Fatalf("Unable to attach policy: %v", err) + } + + entities, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{ + Users: []string{testCase.inDN}, + Policy: []string{policy}, + }) + if err != nil { + c.Fatalf("Unable to fetch policy entities: %v", err) + } + + // switch statement to check all the conditions + switch { + case len(entities.UserMappings) != 1: + c.Fatalf("Expected to find exactly one user mapping") + case entities.UserMappings[0].User != testCase.expectedOutDN: + c.Fatalf("Expected user DN `%s`, found `%s`", testCase.expectedOutDN, entities.UserMappings[0].User) + case len(entities.UserMappings[0].Policies) != 1: + c.Fatalf("Expected exactly one policy attached to user") + case entities.UserMappings[0].Policies[0] != policy: + c.Fatalf("Expected attached policy `%s`, found `%s`", policy, entities.UserMappings[0].Policies[0]) + case len(entities.UserMappings[0].MemberOfMappings) != 1: + c.Fatalf("Expected exactly one group attached to user") + case entities.UserMappings[0].MemberOfMappings[0].Group != testCase.expectedGroupDN: + c.Fatalf("Expected attached group `%s`, found `%s`", testCase.expectedGroupDN, entities.UserMappings[0].MemberOfMappings[0].Group) + case len(entities.UserMappings[0].MemberOfMappings[0].Policies) != 1: + c.Fatalf("Expected exactly one policy attached to group") + case entities.UserMappings[0].MemberOfMappings[0].Policies[0] != testCase.expectedGroupPolicy: + c.Fatalf("Expected attached policy `%s`, found `%s`", testCase.expectedGroupPolicy, entities.UserMappings[0].MemberOfMappings[0].Policies[0]) + } + + _, err = s.adm.DetachPolicyLDAP(ctx, userReq) + if err != nil { + c.Fatalf("Unable to detach policy: %v", err) + } + } + + _, err = s.adm.DetachPolicyLDAP(ctx, groupReq) + if err != nil { + c.Fatalf("Unable to detach group policy: %v", err) + } } func (s *TestSuiteIAM) TestOpenIDSTS(c *check) { @@ -1130,7 +2527,7 @@ func (s *TestSuiteIAM) TestOpenIDSTS(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1145,7 +2542,7 @@ func (s *TestSuiteIAM) TestOpenIDSTS(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1235,7 +2632,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSDurationSeconds(c *check) { {60, true}, {1800, false}, } { - policyBytes := []byte(fmt.Sprintf(policyTmpl, testCase.durSecs, bucket)) + policyBytes := fmt.Appendf(nil, policyTmpl, testCase.durSecs, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("Test %d: policy add error: %v", i+1, err) @@ -1295,7 +2692,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSAddUser(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1310,7 +2707,7 @@ func (s *TestSuiteIAM) TestOpenIDSTSAddUser(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -1385,7 +2782,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAcc(c *check) { // Create policy - with name as one of the groups in OpenID the user is // a member of. policy := "projecta" - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -1400,7 +2797,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAcc(c *check) { ] } ] -}`, bucket)) +}`, bucket) err = s.adm.AddCannedPolicy(ctx, policy, policyBytes) if err != nil { c.Fatalf("policy add error: %v", err) @@ -2161,7 +3558,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicyUnderAMP(c *check) { svcAK, svcSK := mustGenerateCredentials(c) // This policy does not allow listing objects. - policyBytes := []byte(fmt.Sprintf(`{ + policyBytes := fmt.Appendf(nil, `{ "Version": "2012-10-17", "Statement": [ { @@ -2175,7 +3572,7 @@ func (s *TestSuiteIAM) TestOpenIDServiceAccWithRolePolicyUnderAMP(c *check) { ] } ] -}`, bucket)) +}`, bucket) cr, err := userAdmClient.AddServiceAccount(ctx, madmin.AddServiceAccountReq{ Policy: policyBytes, TargetUser: value.AccessKeyID, diff --git a/cmd/stserrorcode_string.go b/cmd/stserrorcode_string.go index 1774e98cb25b6..b119283505cb4 100644 --- a/cmd/stserrorcode_string.go +++ b/cmd/stserrorcode_string.go @@ -18,15 +18,16 @@ func _() { _ = x[ErrSTSMalformedPolicyDocument-7] _ = x[ErrSTSInsecureConnection-8] _ = x[ErrSTSInvalidClientCertificate-9] - _ = x[ErrSTSNotInitialized-10] - _ = x[ErrSTSIAMNotInitialized-11] - _ = x[ErrSTSUpstreamError-12] - _ = x[ErrSTSInternalError-13] + _ = x[ErrSTSTooManyIntermediateCAs-10] + _ = x[ErrSTSNotInitialized-11] + _ = x[ErrSTSIAMNotInitialized-12] + _ = x[ErrSTSUpstreamError-13] + _ = x[ErrSTSInternalError-14] } -const _STSErrorCode_name = "STSNoneSTSAccessDeniedSTSMissingParameterSTSInvalidParameterValueSTSWebIdentityExpiredTokenSTSClientGrantsExpiredTokenSTSInvalidClientGrantsTokenSTSMalformedPolicyDocumentSTSInsecureConnectionSTSInvalidClientCertificateSTSNotInitializedSTSIAMNotInitializedSTSUpstreamErrorSTSInternalError" +const _STSErrorCode_name = "STSNoneSTSAccessDeniedSTSMissingParameterSTSInvalidParameterValueSTSWebIdentityExpiredTokenSTSClientGrantsExpiredTokenSTSInvalidClientGrantsTokenSTSMalformedPolicyDocumentSTSInsecureConnectionSTSInvalidClientCertificateSTSTooManyIntermediateCAsSTSNotInitializedSTSIAMNotInitializedSTSUpstreamErrorSTSInternalError" -var _STSErrorCode_index = [...]uint16{0, 7, 22, 41, 65, 91, 118, 145, 171, 192, 219, 236, 256, 272, 288} +var _STSErrorCode_index = [...]uint16{0, 7, 22, 41, 65, 91, 118, 145, 171, 192, 219, 244, 261, 281, 297, 313} func (i STSErrorCode) String() string { if i < 0 || i >= STSErrorCode(len(_STSErrorCode_index)-1) { diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 39970ccd0de58..0f903625c3244 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -66,7 +66,7 @@ import ( "github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/logger" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) // TestMain to set up global env. @@ -83,6 +83,8 @@ func TestMain(m *testing.M) { SecretKey: auth.DefaultSecretKey, } + globalNodeAuthToken, _ = authenticateNode(auth.DefaultAccessKey, auth.DefaultSecretKey) + // disable ENVs which interfere with tests. for _, env := range []string{ crypto.EnvKMSAutoEncryption, @@ -100,16 +102,16 @@ func TestMain(m *testing.M) { // Disable printing console messages during tests. color.Output = io.Discard // Disable Error logging in testing. - logger.DisableErrorLog = true + logger.DisableLog = true // Uncomment the following line to see trace logs during unit tests. // logger.AddTarget(console.New()) // Set system resources to maximum. - setMaxResources() + setMaxResources(serverCtxt{}) // Initialize globalConsoleSys system - globalConsoleSys = NewConsoleLogger(context.Background()) + globalConsoleSys = NewConsoleLogger(context.Background(), io.Discard) globalInternodeTransport = NewInternodeHTTPTransport(0)() @@ -217,6 +219,24 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err return nil, nil, err } + // Wait up to 10 seconds for disks to come online. + pools := obj.(*erasureServerPools) + t := time.Now() + for _, pool := range pools.serverPools { + for _, sets := range pool.erasureDisks { + for _, s := range sets { + if !s.IsLocal() { + for !s.IsOnline() { + time.Sleep(100 * time.Millisecond) + if time.Since(t) > 10*time.Second { + return nil, nil, errors.New("timeout waiting for disk to come online") + } + } + } + } + } + } + return obj, fsDirs, nil } @@ -282,7 +302,7 @@ func nextSuffix() string { } // isSameType - compares two object types via reflect.TypeOf -func isSameType(obj1, obj2 interface{}) bool { +func isSameType(obj1, obj2 any) bool { return reflect.TypeOf(obj1) == reflect.TypeOf(obj2) } @@ -328,7 +348,9 @@ func initTestServerWithBackend(ctx context.Context, t TestErrHandler, testServer // Test Server needs to start before formatting of disks. // Get credential. credentials := globalActiveCred - + if !globalReplicationPool.IsSet() { + globalReplicationPool.Set(nil) + } testServer.Obj = objLayer testServer.rawDiskPaths = disks testServer.Disks = mustGetPoolEndpoints(0, disks...) @@ -520,8 +542,8 @@ func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) { return nil, err } - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - hexChunkSize, chunkSignature)) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + hexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -542,8 +564,8 @@ func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) { return nil, err } - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - hexChunkSize, chunkSignature)) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + hexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -568,9 +590,9 @@ func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, err } n := badSize - newHexChunkSize := []byte(fmt.Sprintf("%x", n)) - newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n", - newHexChunkSize, chunkSignature)) + newHexChunkSize := fmt.Appendf(nil, "%x", n) + newChunkHdr := fmt.Appendf(nil, "%s"+s3ChunkSignatureStr+"%s\r\n", + newHexChunkSize, chunkSignature) newChunk, err := io.ReadAll(bufReader) if err != nil { return nil, err @@ -617,8 +639,8 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') - switch { - case k == "host": + switch k { + case "host": buf.WriteString(req.URL.Host) fallthrough default: @@ -729,7 +751,7 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64, func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize int64, secretKey, signature string, currTime time.Time) (*http.Request, error, ) { - regionStr := globalSite.Region + regionStr := globalSite.Region() var stream []byte var buffer []byte body.Seek(0, 0) @@ -769,7 +791,6 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in if n <= 0 { break } - } req.Body = io.NopCloser(bytes.NewReader(stream)) return req, nil @@ -837,7 +858,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i return errors.New("Presign cannot be generated without access and secret keys") } - region := globalSite.Region + region := globalSite.Region() date := UTCNow() scope := getScope(date, region) credential := fmt.Sprintf("%s/%s", accessKeyID, scope) @@ -965,15 +986,15 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { } sort.Strings(headers) - region := globalSite.Region + region := globalSite.Region() // Get canonical headers. var buf bytes.Buffer for _, k := range headers { buf.WriteString(k) buf.WriteByte(':') - switch { - case k == "host": + switch k { + case "host": buf.WriteString(req.URL.Host) fallthrough default: @@ -1065,8 +1086,8 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek // Save for subsequent use var hashedPayload string var md5Base64 string - switch { - case body == nil: + switch body { + case nil: hashedPayload = getSHA256Hash([]byte{}) default: payloadBytes, err := io.ReadAll(body) @@ -1378,7 +1399,7 @@ func getListObjectVersionsURL(endPoint, bucketName, prefix, maxKeys, encodingTyp } // return URL for listing objects in the bucket with V2 API. -func getListObjectsV2URL(endPoint, bucketName, prefix, maxKeys, fetchOwner, encodingType string) string { +func getListObjectsV2URL(endPoint, bucketName, prefix, maxKeys, fetchOwner, encodingType, delimiter string) string { queryValue := url.Values{} queryValue.Set("list-type", "2") // Enables list objects V2 URL. if maxKeys != "" { @@ -1390,7 +1411,13 @@ func getListObjectsV2URL(endPoint, bucketName, prefix, maxKeys, fetchOwner, enco if encodingType != "" { queryValue.Set("encoding-type", encodingType) } - return makeTestTargetURL(endPoint, bucketName, prefix, queryValue) + if prefix != "" { + queryValue.Set("prefix", prefix) + } + if delimiter != "" { + queryValue.Set("delimiter", delimiter) + } + return makeTestTargetURL(endPoint, bucketName, "", queryValue) } // return URL for a new multipart upload. @@ -1466,7 +1493,7 @@ func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, e // getRandomDisks - Creates a slice of N random disks, each of the form - minio-XXX func getRandomDisks(n int) ([]string, error) { var erasureDisks []string - for i := 0; i < n; i++ { + for range n { path, err := os.MkdirTemp(globalTestTmpDir, "minio-") if err != nil { // Remove directories created so far. @@ -1513,7 +1540,7 @@ func removeRoots(roots []string) { // initializes the specified API endpoints for the tests. // initializes the root and returns its path. // return credentials. -func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string) (string, http.Handler, error) { +func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string, makeBucketOptions MakeBucketOptions) (string, http.Handler, error) { initAllSubsystems(ctx) initConfigSubsystem(ctx, obj) @@ -1522,9 +1549,8 @@ func initAPIHandlerTest(ctx context.Context, obj ObjectLayer, endpoints []string // get random bucket name. bucketName := getRandomBucketName() - // Create bucket. - err := obj.MakeBucket(context.Background(), bucketName, MakeBucketOptions{}) + err := obj.MakeBucket(context.Background(), bucketName, makeBucketOptions) if err != nil { // failed to create newbucket, return err. return "", nil, err @@ -1608,7 +1634,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Object API Nil Test expected to fail with %d, but failed with %d", accessDenied, rec.Code))) } - // HEAD HTTTP request doesn't contain response body. + // HEAD HTTP request doesn't contain response body. if anonReq.Method != http.MethodHead { // read the response body. var actualContent []byte @@ -1664,7 +1690,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN } } -// ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls rhe registered object layer API endpoint, +// ExecObjectLayerAPINilTest - Sets the object layer to `nil`, and calls the registered object layer API endpoint, // and assert the error response. The purpose is to validate the API handlers response when the object layer is uninitialized. // Usage hint: Should be used at the end of the API end points tests (ex: check the last few lines of `testAPIListObjectPartsHandler`), // need a sample HTTP request to be sent as argument so that the relevant handler is called, the handler registration is expected @@ -1714,9 +1740,17 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc } } +type ExecObjectLayerAPITestArgs struct { + t *testing.T + objAPITest objAPITestType + endpoints []string + init func() + makeBucketOptions MakeBucketOptions +} + // ExecObjectLayerAPITest - executes object layer API tests. // Creates single node and Erasure ObjectLayer instance, registers the specified API end points and runs test for both the layers. -func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { +func ExecObjectLayerAPITest(args ExecObjectLayerAPITestArgs) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1726,24 +1760,28 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ objLayer, fsDir, err := prepareFS(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for single node setup: %s", err) + args.t.Fatalf("Initialization of object layer failed for single node setup: %s", err) } - bucketFS, fsAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints) + bucketFS, fsAPIRouter, err := initAPIHandlerTest(ctx, objLayer, args.endpoints, args.makeBucketOptions) if err != nil { - t.Fatalf("Initialization of API handler tests failed: %s", err) + args.t.Fatalf("Initialization of API handler tests failed: %s", err) + } + + if args.init != nil { + args.init() } // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + args.t.Fatalf("Unable to initialize server config. %s", err) } credentials := globalActiveCred // Executing the object layer tests for single node setup. - objAPITest(objLayer, ErasureSDStr, bucketFS, fsAPIRouter, credentials, t) + args.objAPITest(objLayer, ErasureSDStr, bucketFS, fsAPIRouter, credentials, args.t) // reset globals. // this is to make sure that the tests are not affected by modified value. @@ -1751,23 +1789,27 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ objLayer, erasureDisks, err := prepareErasure16(ctx) if err != nil { - t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) + args.t.Fatalf("Initialization of object layer failed for Erasure setup: %s", err) } defer objLayer.Shutdown(ctx) - bucketErasure, erAPIRouter, err := initAPIHandlerTest(ctx, objLayer, endpoints) + bucketErasure, erAPIRouter, err := initAPIHandlerTest(ctx, objLayer, args.endpoints, args.makeBucketOptions) if err != nil { - t.Fatalf("Initialization of API handler tests failed: %s", err) + args.t.Fatalf("Initialization of API handler tests failed: %s", err) + } + + if args.init != nil { + args.init() } // initialize the server and obtain the credentials and root. // credentials are necessary to sign the HTTP request. if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil { - t.Fatalf("Unable to initialize server config. %s", err) + args.t.Fatalf("Unable to initialize server config. %s", err) } // Executing the object layer tests for Erasure. - objAPITest(objLayer, ErasureTestStr, bucketErasure, erAPIRouter, credentials, t) + args.objAPITest(objLayer, ErasureTestStr, bucketErasure, erAPIRouter, credentials, args.t) // clean up the temporary test backend. removeRoots(append(erasureDisks, fsDir)) @@ -1776,8 +1818,8 @@ func ExecObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints [ // ExecExtendedObjectLayerTest will execute the tests with combinations of encrypted & compressed. // This can be used to test functionality when reading and writing data. func ExecExtendedObjectLayerAPITest(t *testing.T, objAPITest objAPITestType, endpoints []string) { - execExtended(t, func(t *testing.T) { - ExecObjectLayerAPITest(t, objAPITest, endpoints) + execExtended(t, func(t *testing.T, init func(), makeBucketOptions MakeBucketOptions) { + ExecObjectLayerAPITest(ExecObjectLayerAPITestArgs{t: t, objAPITest: objAPITest, endpoints: endpoints, init: init, makeBucketOptions: makeBucketOptions}) }) } @@ -1890,7 +1932,7 @@ func ExecObjectLayerTestWithDirs(t TestErrHandler, objTest objTestTypeWithDirs) // ExecObjectLayerDiskAlteredTest - executes object layer tests while altering // disks in between tests. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerDiskAlteredTest(t *testing.T, objTest objTestDiskNotFoundType) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() objLayer, fsDirs, err := prepareErasure16(ctx) @@ -1914,7 +1956,7 @@ type objTestStaleFilesType func(obj ObjectLayer, instanceType string, dirs []str // ExecObjectLayerStaleFilesTest - executes object layer tests those leaves stale // files/directories under .minio/tmp. Creates Erasure ObjectLayer instance and runs test for Erasure layer. func ExecObjectLayerStaleFilesTest(t *testing.T, objTest objTestStaleFilesType) { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) defer cancel() nDisks := 16 @@ -2077,7 +2119,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { return nil, nil, fmt.Errorf("Missing host parameter") } - publicKey := func(priv interface{}) interface{} { + publicKey := func(priv any) any { switch k := priv.(type) { case *rsa.PrivateKey: return &k.PublicKey @@ -2088,7 +2130,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { } } - pemBlockForKey := func(priv interface{}) *pem.Block { + pemBlockForKey := func(priv any) *pem.Block { switch k := priv.(type) { case *rsa.PrivateKey: return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} @@ -2104,7 +2146,7 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { } } - var priv interface{} + var priv any var err error priv, err = rsa.GenerateKey(crand.Reader, rsaBits) if err != nil { @@ -2133,8 +2175,8 @@ func generateTLSCertKey(host string) ([]byte, []byte, error) { BasicConstraintsValid: true, } - hosts := strings.Split(host, ",") - for _, h := range hosts { + hosts := strings.SplitSeq(host, ",") + for h := range hosts { if ip := net.ParseIP(h); ip != nil { template.IPAddresses = append(template.IPAddresses, ip) } else { @@ -2201,12 +2243,12 @@ func getEndpointsLocalAddr(endpointServerPools EndpointServerPools) string { } // fetches a random number between range min-max. -func getRandomRange(min, max int, seed int64) int { +func getRandomRange(minN, maxN int, seed int64) int { // special value -1 means no explicit seeding. - if seed != -1 { - rand.Seed(seed) + if seed == -1 { + return rand.New(rand.NewSource(time.Now().UnixNano())).Intn(maxN-minN) + minN } - return rand.Intn(max-min) + min + return rand.New(rand.NewSource(seed)).Intn(maxN-minN) + minN } // Randomizes the order of bytes in the byte array @@ -2232,7 +2274,7 @@ func TestToErrIsNil(t *testing.T) { if toStorageErr(nil) != nil { t.Errorf("Test expected to return nil, failed instead got a non-nil value %s", toStorageErr(nil)) } - ctx := context.Background() + ctx := t.Context() if toAPIError(ctx, nil) != noError { t.Errorf("Test expected error code to be ErrNone, failed instead provided %s", toAPIError(ctx, nil).Code) } @@ -2348,7 +2390,7 @@ func unzipArchive(zipFilePath, targetDir string) error { if err != nil { return err } - for _, file := range zipReader.Reader.File { + for _, file := range zipReader.File { zippedFile, err := file.Open() if err != nil { return err diff --git a/cmd/testdata/dillon_test_key.pub b/cmd/testdata/dillon_test_key.pub new file mode 100644 index 0000000000000..dc22abbd78ce1 --- /dev/null +++ b/cmd/testdata/dillon_test_key.pub @@ -0,0 +1 @@ +ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDVGk/SRz4fwTPK0+Ra7WYUGf3o08YkpI0yTMPpHwYoq dillon@example.io diff --git a/cmd/testdata/invalid_test_key.pub b/cmd/testdata/invalid_test_key.pub new file mode 100644 index 0000000000000..182a4766e7417 --- /dev/null +++ b/cmd/testdata/invalid_test_key.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDES4saDDRpoHDVmiYESEQrCYhw8EK7Utj/A/lqxiqZlP6Il3aN2fWu6uJQdWAovZxNeXUf8LIujisW1mJWGZPql0SLKVq6IZ707OAGmKA59IXfF5onRoU9+K4UDL7BJFfix6/3F5OV2WB3ChFrOrXhJ0CZ0sVAfGcV4q72kS19YjZNX3fqCc2HF8UQEaZGKIkw5MtdZI9a1P2bqnPuPGJybRFUzyoQXPge45QT5jnpcsAXOuXcGxbjuqaaHXFNTSKAkCU93TcjAbqUMkTz2mnFz/MnrKJTECN3Fy0GPCCQ5dxmG8p8DyMiNl7JYkX2r3XYgxmioCzkcg8fDs5p0CaQcipu+MA7iK7APKq7v4Zr/wNltXHI3DE9S8J88Hxb2FZAyEhCRfcgGmCVfoZxVNCRHNkGYzfe63BkxtnseUCzpYEhKv02H5u9rjFpdMY37kDfHDVqBbgutdMij+tQAEp1kyqi6TQL+4XHjPHkLaeekW07yB+VI90dK1A9dzTpOvE= liza@example.io diff --git a/cmd/testdata/undeleteable-object.tgz b/cmd/testdata/undeleteable-object.tgz new file mode 100644 index 0000000000000..b6abc0a94327d Binary files /dev/null and b/cmd/testdata/undeleteable-object.tgz differ diff --git a/cmd/tier-handlers.go b/cmd/tier-handlers.go index b588753ec0fde..8c81487491a16 100644 --- a/cmd/tier-handlers.go +++ b/cmd/tier-handlers.go @@ -27,7 +27,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/config/storageclass" "github.com/minio/mux" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/policy" ) var ( @@ -198,12 +198,14 @@ func (api adminAPIHandlers) RemoveTierHandler(w http.ResponseWriter, r *http.Req vars := mux.Vars(r) tier := vars["tier"] + force := r.Form.Get("force") == "true" + if err := globalTierConfigMgr.Reload(ctx, objAPI); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } - if err := globalTierConfigMgr.Remove(ctx, tier); err != nil { + if err := globalTierConfigMgr.Remove(ctx, tier, force); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return } diff --git a/cmd/tier-last-day-stats_gen.go b/cmd/tier-last-day-stats_gen.go index b996a7e117ca4..24e4dde778543 100644 --- a/cmd/tier-last-day-stats_gen.go +++ b/cmd/tier-last-day-stats_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -17,21 +17,19 @@ func (z *DailyAllTierStats) DecodeMsg(dc *msgp.Reader) (err error) { if (*z) == nil { (*z) = make(DailyAllTierStats, zb0004) } else if len((*z)) > 0 { - for key := range *z { - delete((*z), key) - } + clear((*z)) } + var field []byte + _ = field for zb0004 > 0 { zb0004-- var zb0001 string - var zb0002 lastDayTierStats zb0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err) return } - var field []byte - _ = field + var zb0002 lastDayTierStats var zb0005 uint32 zb0005, err = dc.ReadMapHeader() if err != nil { @@ -163,21 +161,19 @@ func (z *DailyAllTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) { if (*z) == nil { (*z) = make(DailyAllTierStats, zb0004) } else if len((*z)) > 0 { - for key := range *z { - delete((*z), key) - } + clear((*z)) } + var field []byte + _ = field for zb0004 > 0 { - var zb0001 string var zb0002 lastDayTierStats zb0004-- + var zb0001 string zb0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err) return } - var field []byte - _ = field var zb0005 uint32 zb0005, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { diff --git a/cmd/tier-last-day-stats_gen_test.go b/cmd/tier-last-day-stats_gen_test.go index 572e7d14799bc..590c2c4c49aa4 100644 --- a/cmd/tier-last-day-stats_gen_test.go +++ b/cmd/tier-last-day-stats_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/tier-sweeper.go b/cmd/tier-sweeper.go index f48c99718bb27..189c06663d8c2 100644 --- a/cmd/tier-sweeper.go +++ b/cmd/tier-sweeper.go @@ -143,13 +143,9 @@ type jentry struct { } func deleteObjectFromRemoteTier(ctx context.Context, objName, rvID, tierName string) error { - w, err := globalTierConfigMgr.getDriver(tierName) + w, err := globalTierConfigMgr.getDriver(ctx, tierName) if err != nil { return err } - err = w.Remove(ctx, objName, remoteVersionID(rvID)) - if err != nil { - return err - } - return nil + return w.Remove(ctx, objName, remoteVersionID(rvID)) } diff --git a/cmd/tier.go b/cmd/tier.go index f785ce8457b80..b53095261c81d 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -22,7 +22,9 @@ import ( "context" "encoding/base64" "encoding/binary" + "errors" "fmt" + "maps" "math/rand" "net/http" "path" @@ -34,7 +36,6 @@ import ( "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/hash" "github.com/minio/minio/internal/kms" - "github.com/minio/minio/internal/logger" "github.com/prometheus/client_golang/prometheus" ) @@ -64,6 +65,12 @@ var ( Message: "Specified remote backend is not empty", StatusCode: http.StatusBadRequest, } + + errTierInvalidConfig = AdminError{ + Code: "XMinioAdminTierInvalidConfig", + Message: "Unable to setup remote tier, check tier configuration", + StatusCode: http.StatusBadRequest, + } ) const ( @@ -158,17 +165,17 @@ var ( } ) -func (t *tierMetrics) Report() []Metric { - metrics := getHistogramMetrics(t.histogram, tierTTLBMD, true) +func (t *tierMetrics) Report() []MetricV2 { + metrics := getHistogramMetrics(t.histogram, tierTTLBMD, true, true) t.RLock() defer t.RUnlock() for tier, stat := range t.requestsCount { - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: tierRequestsSuccessMD, Value: float64(stat.success), VariableLabels: map[string]string{"tier": tier}, }) - metrics = append(metrics, Metric{ + metrics = append(metrics, MetricV2{ Description: tierRequestsFailureMD, Value: float64(stat.failure), VariableLabels: map[string]string{"tier": tier}, @@ -219,7 +226,7 @@ func (config *TierConfigMgr) Add(ctx context.Context, tier madmin.TierConfig, ig return errTierAlreadyExists } - d, err := newWarmBackend(ctx, tier) + d, err := newWarmBackend(ctx, tier, true) if err != nil { return err } @@ -242,15 +249,20 @@ func (config *TierConfigMgr) Add(ctx context.Context, tier madmin.TierConfig, ig } // Remove removes tier if it is empty. -func (config *TierConfigMgr) Remove(ctx context.Context, tier string) error { - d, err := config.getDriver(tier) +func (config *TierConfigMgr) Remove(ctx context.Context, tier string, force bool) error { + d, err := config.getDriver(ctx, tier) if err != nil { + if errors.Is(err, errTierNotFound) { + return nil + } return err } - if inuse, err := d.InUse(ctx); err != nil { - return err - } else if inuse { - return errTierBackendNotEmpty + if !force { + if inuse, err := d.InUse(ctx); err != nil { + return err + } else if inuse { + return errTierBackendNotEmpty + } } config.Lock() delete(config.Tiers, tier) @@ -262,7 +274,7 @@ func (config *TierConfigMgr) Remove(ctx context.Context, tier string) error { // Verify verifies if tier's config is valid by performing all supported // operations on the corresponding warmbackend. func (config *TierConfigMgr) Verify(ctx context.Context, tier string) error { - d, err := config.getDriver(tier) + d, err := config.getDriver(ctx, tier) if err != nil { return err } @@ -359,7 +371,7 @@ func (config *TierConfigMgr) Edit(ctx context.Context, tierName string, creds ma cfg.MinIO.SecretKey = creds.SecretKey } - d, err := newWarmBackend(ctx, cfg) + d, err := newWarmBackend(ctx, cfg, true) if err != nil { return err } @@ -383,7 +395,7 @@ func (config *TierConfigMgr) Bytes() ([]byte, error) { } // getDriver returns a warmBackend interface object initialized with remote tier config matching tierName -func (config *TierConfigMgr) getDriver(tierName string) (d WarmBackend, err error) { +func (config *TierConfigMgr) getDriver(ctx context.Context, tierName string) (d WarmBackend, err error) { config.Lock() defer config.Unlock() @@ -399,7 +411,7 @@ func (config *TierConfigMgr) getDriver(tierName string) (d WarmBackend, err erro if !ok { return nil, errTierNotFound } - d, err = newWarmBackend(context.TODO(), t) + d, err = newWarmBackend(ctx, t, false) if err != nil { return nil, err } @@ -463,6 +475,10 @@ func (config *TierConfigMgr) configReader(ctx context.Context) (*PutObjReader, * // Reload updates config by reloading remote tier config from config store. func (config *TierConfigMgr) Reload(ctx context.Context, objAPI ObjectLayer) error { newConfig, err := loadTierConfig(ctx, objAPI) + + config.Lock() + defer config.Unlock() + switch err { case nil: break @@ -475,20 +491,12 @@ func (config *TierConfigMgr) Reload(ctx context.Context, objAPI ObjectLayer) err return err } - config.Lock() - defer config.Unlock() // Reset drivercache built using current config - for k := range config.drivercache { - delete(config.drivercache, k) - } + clear(config.drivercache) // Remove existing tier configs - for k := range config.Tiers { - delete(config.Tiers, k) - } + clear(config.Tiers) // Copy over the new tier configs - for tier, cfg := range newConfig.Tiers { - config.Tiers[tier] = cfg - } + maps.Copy(config.Tiers, newConfig.Tiers) config.lastRefreshedAt = UTCNow() return nil } @@ -534,7 +542,7 @@ func (config *TierConfigMgr) refreshTierConfig(ctx context.Context, objAPI Objec case <-t.C: err := config.Reload(ctx, objAPI) if err != nil { - logger.LogIf(ctx, err) + tierLogIf(ctx, err) } } t.Reset(tierCfgRefresh + randInterval()) @@ -553,7 +561,7 @@ func loadTierConfig(ctx context.Context, objAPI ObjectLayer) (*TierConfigMgr, er } if len(data) <= 4 { - return nil, fmt.Errorf("tierConfigInit: no data") + return nil, errors.New("tierConfigInit: no data") } // Read header diff --git a/cmd/tier_gen.go b/cmd/tier_gen.go index f30a39a9797a8..38efbcad9a097 100644 --- a/cmd/tier_gen.go +++ b/cmd/tier_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/minio/madmin-go/v3" "github.com/tinylib/msgp/msgp" @@ -35,19 +35,17 @@ func (z *TierConfigMgr) DecodeMsg(dc *msgp.Reader) (err error) { if z.Tiers == nil { z.Tiers = make(map[string]madmin.TierConfig, zb0002) } else if len(z.Tiers) > 0 { - for key := range z.Tiers { - delete(z.Tiers, key) - } + clear(z.Tiers) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 madmin.TierConfig za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Tiers") return } + var za0002 madmin.TierConfig err = za0002.DecodeMsg(dc) if err != nil { err = msgp.WrapError(err, "Tiers", za0001) @@ -140,14 +138,12 @@ func (z *TierConfigMgr) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Tiers == nil { z.Tiers = make(map[string]madmin.TierConfig, zb0002) } else if len(z.Tiers) > 0 { - for key := range z.Tiers { - delete(z.Tiers, key) - } + clear(z.Tiers) } for zb0002 > 0 { - var za0001 string var za0002 madmin.TierConfig zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Tiers") diff --git a/cmd/tier_gen_test.go b/cmd/tier_gen_test.go index 174c215b1c960..f6bdf5cfd15c2 100644 --- a/cmd/tier_gen_test.go +++ b/cmd/tier_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/tier_test.go b/cmd/tier_test.go index 9cf62b8d9b533..2b2345c2ac264 100644 --- a/cmd/tier_test.go +++ b/cmd/tier_test.go @@ -27,10 +27,10 @@ func TestTierMetrics(t *testing.T) { globalTierMetrics.Observe(tier, 200*time.Millisecond) expSuccess := 10 expFailure := 5 - for i := 0; i < expSuccess; i++ { + for range expSuccess { globalTierMetrics.logSuccess(tier) } - for i := 0; i < expFailure; i++ { + for range expFailure { globalTierMetrics.logFailure(tier) } metrics := globalTierMetrics.Report() diff --git a/cmd/typed-errors.go b/cmd/typed-errors.go index ded5735bb2036..58f2590e43cdb 100644 --- a/cmd/typed-errors.go +++ b/cmd/typed-errors.go @@ -75,6 +75,9 @@ var errNoSuchServiceAccount = errors.New("Specified service account does not exi // error returned when temporary account is not found var errNoSuchTempAccount = errors.New("Specified temporary account does not exist") +// error returned when access key is not found +var errNoSuchAccessKey = errors.New("Specified access key does not exist") + // error returned in IAM subsystem when an account doesn't exist. var errNoSuchAccount = errors.New("Specified account does not exist") @@ -119,3 +122,12 @@ var errInvalidMaxParts = errors.New("Part number is greater than the maximum all // error returned for session policies > 2048 var errSessionPolicyTooLarge = errors.New("Session policy should not exceed 2048 characters") + +// error returned in SFTP when user used public key without certificate +var errSftpPublicKeyWithoutCert = errors.New("public key authentication without certificate is not accepted") + +// error returned in SFTP when user used certificate which does not contain principal(s) +var errSftpCertWithoutPrincipals = errors.New("certificates without principal(s) are not accepted") + +// error returned when group name contains reserved characters +var errGroupNameContainsReservedChars = errors.New("Group name contains reserved characters '=' or ','") diff --git a/cmd/untar.go b/cmd/untar.go index afbae59f81010..0f8c428a6cd8d 100644 --- a/cmd/untar.go +++ b/cmd/untar.go @@ -36,8 +36,8 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" - "github.com/minio/minio/internal/logger" - "github.com/pierrec/lz4" + xioutil "github.com/minio/minio/internal/ioutil" + "github.com/pierrec/lz4/v4" ) // Max bzip2 concurrency across calls. 50% of GOMAXPROCS. @@ -183,7 +183,6 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in header, err := tarReader.Next() switch { - // if no more files are found return case err == io.EOF: wg.Wait() @@ -227,13 +226,10 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in // Do small files async n++ - if header.Size <= smallFileThreshold { + if header.Size <= xioutil.MediumBlock { asyncWriters <- struct{}{} - b := poolBuf128k.Get().([]byte) - if cap(b) < int(header.Size) { - b = make([]byte, smallFileThreshold) - } - b = b[:header.Size] + bufp := xioutil.ODirectPoolMedium.Get() + b := (*bufp)[:header.Size] if _, err := io.ReadFull(tarReader, b); err != nil { return err } @@ -244,12 +240,11 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in rc.Close() <-asyncWriters wg.Done() - //nolint:staticcheck // SA6002 we are fine with the tiny alloc - poolBuf128k.Put(b) + xioutil.ODirectPoolMedium.Put(bufp) }() if err := putObject(&rc, fi, name); err != nil { if o.ignoreErrs { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) return } asyncErrMu.Lock() @@ -273,7 +268,7 @@ func untar(ctx context.Context, r io.Reader, putObject func(reader io.Reader, in if err := putObject(&rc, header.FileInfo(), name); err != nil { rc.Close() if o.ignoreErrs { - logger.LogIf(ctx, err) + s3LogIf(ctx, err) continue } return err diff --git a/cmd/update.go b/cmd/update.go index 0d1fc03ad4b14..ff3fcd0387599 100644 --- a/cmd/update.go +++ b/cmd/update.go @@ -38,8 +38,8 @@ import ( "github.com/klauspost/compress/zstd" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" "github.com/minio/selfupdate" gopsutilcpu "github.com/shirou/gopsutil/v3/cpu" "github.com/valyala/bytebufferpool" @@ -50,8 +50,13 @@ const ( updateTimeout = 10 * time.Second ) -// For windows our files have .exe additionally. -var minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum" +var ( + // Newer official download info URLs appear earlier below. + minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum" + + // For windows our files have .exe additionally. + minioReleaseWindowsInfoURL = MinioReleaseURL + "minio.exe.sha256sum" +) // minioVersionToReleaseTime - parses a standard official release // MinIO version string. @@ -142,7 +147,7 @@ func IsDocker() bool { } // Log error, as we will not propagate it to caller - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return err == nil } @@ -172,7 +177,7 @@ func IsBOSH() bool { } // Log error, as we will not propagate it to caller - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return err == nil } @@ -189,7 +194,7 @@ func getHelmVersion(helmInfoFilePath string) string { if !osIsNotExist(err) { reqInfo := (&logger.ReqInfo{}).AppendTags("helmInfoFilePath", helmInfoFilePath) ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogIf(ctx, err) + internalLogIf(ctx, err) } return "" } @@ -420,7 +425,7 @@ func parseReleaseData(data string) (sha256Sum []byte, releaseTime time.Time, rel func getUpdateTransport(timeout time.Duration) http.RoundTripper { var updateTransport http.RoundTripper = &http.Transport{ Proxy: http.ProxyFromEnvironment, - DialContext: xhttp.NewCustomDialContext(timeout, globalTCPOptions), + DialContext: xhttp.NewInternodeDialContext(timeout, globalTCPOptions), IdleConnTimeout: timeout, TLSHandshakeTimeout: timeout, ExpectContinueTimeout: timeout, @@ -440,15 +445,15 @@ func getLatestReleaseTime(u *url.URL, timeout time.Duration, mode string) (sha25 } sha256Sum, releaseTime, _, err = parseReleaseData(data) - return + return sha256Sum, releaseTime, err } const ( // Kubernetes deployment doc link. - kubernetesDeploymentDoc = "https://min.io/docs/minio/kubernetes/upstream/index.html#quickstart-for-kubernetes" + kubernetesDeploymentDoc = "https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html" // Mesos deployment doc link. - mesosDeploymentDoc = "https://min.io/docs/minio/kubernetes/upstream/index.html#quickstart-for-kubernetes" + mesosDeploymentDoc = "https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html" ) func getDownloadURL(releaseTag string) (downloadURL string) { diff --git a/cmd/update_fips.go b/cmd/update_fips.go deleted file mode 100644 index d14c1d5ce89f2..0000000000000 --- a/cmd/update_fips.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build fips -// +build fips - -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -// Newer official download info URLs appear earlier below. -var minioReleaseInfoURL = MinioReleaseURL + "minio.fips.sha256sum" diff --git a/cmd/update_nofips.go b/cmd/update_nofips.go deleted file mode 100644 index baeabc6e3717b..0000000000000 --- a/cmd/update_nofips.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !fips -// +build !fips - -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -// Newer official download info URLs appear earlier below. -var minioReleaseInfoURL = MinioReleaseURL + "minio.sha256sum" diff --git a/cmd/update_test.go b/cmd/update_test.go index d36dcd696b235..e1af9e38c9aa1 100644 --- a/cmd/update_test.go +++ b/cmd/update_test.go @@ -98,12 +98,6 @@ func TestReleaseTagToNFromTimeConversion(t *testing.T) { } func TestDownloadURL(t *testing.T) { - sci := globalIsCICD - globalIsCICD = false - defer func() { - globalIsCICD = sci - }() - minioVersion1 := releaseTimeToReleaseTag(UTCNow()) durl := getDownloadURL(minioVersion1) if IsDocker() { @@ -164,9 +158,6 @@ func TestUserAgent(t *testing.T) { } for i, testCase := range testCases { - sci := globalIsCICD - globalIsCICD = false - if testCase.envName != "" { t.Setenv(testCase.envName, testCase.envValue) if testCase.envName == "MESOS_CONTAINER_NAME" { @@ -182,7 +173,6 @@ func TestUserAgent(t *testing.T) { if !strings.Contains(str, expectedStr) { t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str) } - globalIsCICD = sci os.Unsetenv("MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION") os.Unsetenv(testCase.envName) } @@ -190,12 +180,6 @@ func TestUserAgent(t *testing.T) { // Tests if the environment we are running is in DCOS. func TestIsDCOS(t *testing.T) { - sci := globalIsCICD - globalIsCICD = false - defer func() { - globalIsCICD = sci - }() - t.Setenv("MESOS_CONTAINER_NAME", "mesos-1111") dcos := IsDCOS() if !dcos { @@ -210,12 +194,6 @@ func TestIsDCOS(t *testing.T) { // Tests if the environment we are running is in kubernetes. func TestIsKubernetes(t *testing.T) { - sci := globalIsCICD - globalIsCICD = false - defer func() { - globalIsCICD = sci - }() - t.Setenv("KUBERNETES_SERVICE_HOST", "10.11.148.5") kubernetes := IsKubernetes() if !kubernetes { @@ -232,7 +210,7 @@ func TestIsKubernetes(t *testing.T) { // Tests if the environment we are running is Helm chart. func TestGetHelmVersion(t *testing.T) { createTempFile := func(content string) string { - tmpfile, err := os.CreateTemp("", "helm-testfile-") + tmpfile, err := os.CreateTemp(t.TempDir(), "helm-testfile-") if err != nil { t.Fatalf("Unable to create temporary file. %s", err) } @@ -290,7 +268,7 @@ func TestDownloadReleaseData(t *testing.T) { }{ {httpServer1.URL, "", nil}, {httpServer2.URL, "fbe246edbd382902db9a4035df7dce8cb441357d minio.RELEASE.2016-10-07T01-16-39Z\n", nil}, - {httpServer3.URL, "", fmt.Errorf("Error downloading URL " + httpServer3.URL + ". Response: 404 Not Found")}, + {httpServer3.URL, "", fmt.Errorf("Error downloading URL %s. Response: 404 Not Found", httpServer3.URL)}, } for _, testCase := range testCases { diff --git a/cmd/user-provider-utils.go b/cmd/user-provider-utils.go new file mode 100644 index 0000000000000..086b8de8ec54e --- /dev/null +++ b/cmd/user-provider-utils.go @@ -0,0 +1,141 @@ +// Copyright (c) 2015-2023 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package cmd + +import ( + "context" + "strings" + + "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/auth" +) + +// getUserWithProvider - returns the appropriate internal username based on the user provider. +// if validate is true, an error is returned if the user does not exist. +func getUserWithProvider(ctx context.Context, userProvider, user string, validate bool) (string, error) { + switch userProvider { + case madmin.BuiltinProvider: + if validate { + if _, ok := globalIAMSys.GetUser(ctx, user); !ok { + return "", errNoSuchUser + } + } + return user, nil + case madmin.LDAPProvider: + if globalIAMSys.GetUsersSysType() != LDAPUsersSysType { + return "", errIAMActionNotAllowed + } + res, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(user) + if res == nil { + err = errNoSuchUser + } + if err != nil { + if validate { + return "", err + } + if !globalIAMSys.LDAPConfig.ParsesAsDN(user) { + return "", errNoSuchUser + } + } + return res.NormDN, nil + default: + return "", errIAMActionNotAllowed + } +} + +// guessUserProvider - guesses the user provider based on the access key and claims. +func guessUserProvider(credentials auth.Credentials) string { + if !credentials.IsServiceAccount() && !credentials.IsTemp() { + return madmin.BuiltinProvider // regular users are always internal + } + + claims := credentials.Claims + if _, ok := claims[ldapUser]; ok { + return madmin.LDAPProvider // ldap users + } + + if _, ok := claims[subClaim]; ok { + providerPrefix, _, found := strings.Cut(credentials.ParentUser, getKeySeparator()) + if found { + return providerPrefix // this is true for certificate and custom providers + } + return madmin.OpenIDProvider // openid users are already hashed, so no separator + } + + return madmin.BuiltinProvider // default to internal +} + +// getProviderInfoFromClaims - returns the provider info from the claims. +func populateProviderInfoFromClaims(claims map[string]any, provider string, resp *madmin.InfoAccessKeyResp) { + resp.UserProvider = provider + switch provider { + case madmin.LDAPProvider: + resp.LDAPSpecificInfo = getLDAPInfoFromClaims(claims) + case madmin.OpenIDProvider: + resp.OpenIDSpecificInfo = getOpenIDInfoFromClaims(claims) + } +} + +func getOpenIDCfgNameFromClaims(claims map[string]any) (string, bool) { + roleArn := claims[roleArnClaim] + + s := globalServerConfig.Clone() + configs, err := globalIAMSys.OpenIDConfig.GetConfigList(s) + if err != nil { + return "", false + } + for _, cfg := range configs { + if cfg.RoleARN == roleArn { + return cfg.Name, true + } + } + return "", false +} + +func getOpenIDInfoFromClaims(claims map[string]any) madmin.OpenIDSpecificAccessKeyInfo { + info := madmin.OpenIDSpecificAccessKeyInfo{} + + cfgName, ok := getOpenIDCfgNameFromClaims(claims) + if !ok { + return info + } + + info.ConfigName = cfgName + if displayNameClaim := globalIAMSys.OpenIDConfig.GetUserReadableClaim(cfgName); displayNameClaim != "" { + name, _ := claims[displayNameClaim].(string) + info.DisplayName = name + info.DisplayNameClaim = displayNameClaim + } + if idClaim := globalIAMSys.OpenIDConfig.GetUserIDClaim(cfgName); idClaim != "" { + id, _ := claims[idClaim].(string) + info.UserID = id + info.UserIDClaim = idClaim + } + + return info +} + +func getLDAPInfoFromClaims(claims map[string]any) madmin.LDAPSpecificAccessKeyInfo { + info := madmin.LDAPSpecificAccessKeyInfo{} + + if name, ok := claims[ldapUser].(string); ok { + info.Username = name + } + + return info +} diff --git a/cmd/utils.go b/cmd/utils.go index c50a27f97aa40..21fe5c61a7efb 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -20,13 +20,15 @@ package cmd import ( "bytes" "context" + "crypto/md5" "crypto/tls" + "encoding/base64" "encoding/json" "encoding/xml" "errors" "fmt" "io" - "net" + "maps" "net/http" "net/url" "os" @@ -35,21 +37,23 @@ import ( "runtime" "runtime/pprof" "runtime/trace" + "slices" "strings" "sync" "time" - "github.com/coreos/go-oidc" + "github.com/coreos/go-oidc/v3/oidc" "github.com/dustin/go-humanize" "github.com/felixge/fgprof" "github.com/minio/madmin-go/v3" + xaudit "github.com/minio/madmin-go/v3/logger/audit" "github.com/minio/minio-go/v7" miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/api" xtls "github.com/minio/minio/internal/config/identity/tls" - "github.com/minio/minio/internal/deadlineconn" - "github.com/minio/minio/internal/fips" + "github.com/minio/minio/internal/config/storageclass" + "github.com/minio/minio/internal/crypto" "github.com/minio/minio/internal/handlers" "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" @@ -58,10 +62,9 @@ import ( "github.com/minio/minio/internal/logger/message/audit" "github.com/minio/minio/internal/rest" "github.com/minio/mux" - "github.com/minio/pkg/v2/certs" - "github.com/minio/pkg/v2/env" - pkgAudit "github.com/minio/pkg/v2/logger/message/audit" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/certs" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" "golang.org/x/oauth2" ) @@ -216,9 +219,7 @@ func path2BucketObject(s string) (bucket, prefix string) { // If input is nil an empty map is returned, not nil. func cloneMSS(v map[string]string) map[string]string { r := make(map[string]string, len(v)) - for k, v := range v { - r[k] = v - } + maps.Copy(r, v) return r } @@ -235,7 +236,7 @@ func nopCharsetConverter(label string, input io.Reader) (io.Reader, error) { } // xmlDecoder provide decoded value in xml. -func xmlDecoder(body io.Reader, v interface{}, size int64) error { +func xmlDecoder(body io.Reader, v any, size int64) error { var lbody io.Reader if size > 0 { lbody = io.LimitReader(body, size) @@ -255,10 +256,28 @@ func xmlDecoder(body io.Reader, v interface{}, size int64) error { return err } -// hasContentMD5 returns true if Content-MD5 header is set. -func hasContentMD5(h http.Header) bool { - _, ok := h[xhttp.ContentMD5] - return ok +// validateLengthAndChecksum returns if a content checksum is set, +// and will replace r.Body with a reader that checks the provided checksum +func validateLengthAndChecksum(r *http.Request) bool { + if mdFive := r.Header.Get(xhttp.ContentMD5); mdFive != "" { + want, err := base64.StdEncoding.DecodeString(mdFive) + if err != nil { + return false + } + r.Body = hash.NewChecker(r.Body, md5.New(), want, r.ContentLength) + return true + } + cs, err := hash.GetContentChecksum(r.Header) + if err != nil { + return false + } + if cs == nil || !cs.Type.IsSet() { + return false + } + if cs.Valid() && !cs.Type.Trailing() { + r.Body = hash.NewChecker(r.Body, cs.Type.Hasher(), cs.Raw, r.ContentLength) + } + return true } // http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html @@ -412,7 +431,12 @@ func startProfiler(profilerType string) (minioProfiler, error) { return nil, err } stop := fgprof.Start(f, fgprof.FormatPprof) + startedAt := time.Now() prof.stopFn = func() ([]byte, error) { + if elapsed := time.Since(startedAt); elapsed < 100*time.Millisecond { + // Light hack around https://github.com/felixge/fgprof/pull/34 + time.Sleep(100*time.Millisecond - elapsed) + } err := stop() if err != nil { return nil, err @@ -572,13 +596,8 @@ func ToS3ETag(etag string) string { // GetDefaultConnSettings returns default HTTP connection settings. func GetDefaultConnSettings() xhttp.ConnSettings { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - return xhttp.ConnSettings{ - LookupHost: lookupHost, + LookupHost: globalDNSCache.LookupHost, DialTimeout: rest.DefaultTimeout, RootCAs: globalRootCAs, TCPOptions: globalTCPOptions, @@ -588,55 +607,28 @@ func GetDefaultConnSettings() xhttp.ConnSettings { // NewInternodeHTTPTransport returns a transport for internode MinIO // connections. func NewInternodeHTTPTransport(maxIdleConnsPerHost int) func() http.RoundTripper { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - return xhttp.ConnSettings{ - LookupHost: lookupHost, + LookupHost: globalDNSCache.LookupHost, DialTimeout: rest.DefaultTimeout, RootCAs: globalRootCAs, - CipherSuites: fips.TLSCiphers(), - CurvePreferences: fips.TLSCurveIDs(), + CipherSuites: crypto.TLSCiphers(), + CurvePreferences: crypto.TLSCurveIDs(), EnableHTTP2: false, TCPOptions: globalTCPOptions, }.NewInternodeHTTPTransport(maxIdleConnsPerHost) } -// NewCustomHTTPProxyTransport is used only for proxied requests, specifically -// only supports HTTP/1.1 -func NewCustomHTTPProxyTransport() func() *http.Transport { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - - return xhttp.ConnSettings{ - LookupHost: lookupHost, - DialTimeout: rest.DefaultTimeout, - RootCAs: globalRootCAs, - CipherSuites: fips.TLSCiphers(), - CurvePreferences: fips.TLSCurveIDs(), - EnableHTTP2: false, - TCPOptions: globalTCPOptions, - }.NewCustomHTTPProxyTransport() -} - // NewHTTPTransportWithClientCerts returns a new http configuration // used while communicating with the cloud backends. -func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transport { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - +func NewHTTPTransportWithClientCerts(clientCert, clientKey string) http.RoundTripper { s := xhttp.ConnSettings{ - LookupHost: lookupHost, - DialTimeout: defaultDialTimeout, - RootCAs: globalRootCAs, - TCPOptions: globalTCPOptions, - EnableHTTP2: false, + LookupHost: globalDNSCache.LookupHost, + DialTimeout: defaultDialTimeout, + RootCAs: globalRootCAs, + CipherSuites: crypto.TLSCiphersBackwardCompatible(), + CurvePreferences: crypto.TLSCurveIDs(), + TCPOptions: globalTCPOptions, + EnableHTTP2: false, } if clientCert != "" && clientKey != "" { @@ -644,12 +636,16 @@ func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transpo defer cancel() transport, err := s.NewHTTPTransportWithClientCerts(ctx, clientCert, clientKey) if err != nil { - logger.LogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err)) + internalLogIf(ctx, fmt.Errorf("Unable to load client key and cert, please check your client certificate configuration: %w", err)) + } + if transport == nil { + // Client certs are not readable return default transport. + return s.NewHTTPTransportWithTimeout(1 * time.Minute) } return transport } - return s.NewHTTPTransportWithTimeout(1 * time.Minute) + return globalRemoteTargetTransport } // NewHTTPTransport returns a new http configuration @@ -663,56 +659,27 @@ const defaultDialTimeout = 5 * time.Second // NewHTTPTransportWithTimeout allows setting a timeout. func NewHTTPTransportWithTimeout(timeout time.Duration) *http.Transport { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - return xhttp.ConnSettings{ - DialContext: newCustomDialContext(), - LookupHost: lookupHost, - DialTimeout: defaultDialTimeout, - RootCAs: globalRootCAs, - TCPOptions: globalTCPOptions, - EnableHTTP2: false, + LookupHost: globalDNSCache.LookupHost, + DialTimeout: defaultDialTimeout, + RootCAs: globalRootCAs, + TCPOptions: globalTCPOptions, + CipherSuites: crypto.TLSCiphersBackwardCompatible(), + CurvePreferences: crypto.TLSCurveIDs(), + EnableHTTP2: false, }.NewHTTPTransportWithTimeout(timeout) } -// newCustomDialContext setups a custom dialer for any external communication and proxies. -func newCustomDialContext() xhttp.DialContext { - return func(ctx context.Context, network, addr string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: 15 * time.Second, - KeepAlive: 30 * time.Second, - } - - conn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - - dconn := deadlineconn.New(conn). - WithReadDeadline(globalConnReadDeadline). - WithWriteDeadline(globalConnWriteDeadline) - - return dconn, nil - } -} - // NewRemoteTargetHTTPTransport returns a new http configuration // used while communicating with the remote replication targets. func NewRemoteTargetHTTPTransport(insecure bool) func() *http.Transport { - lookupHost := globalDNSCache.LookupHost - if IsKubernetes() || IsDocker() { - lookupHost = nil - } - return xhttp.ConnSettings{ - DialContext: newCustomDialContext(), - LookupHost: lookupHost, - RootCAs: globalRootCAs, - TCPOptions: globalTCPOptions, - EnableHTTP2: false, + LookupHost: globalDNSCache.LookupHost, + RootCAs: globalRootCAs, + CipherSuites: crypto.TLSCiphersBackwardCompatible(), + CurvePreferences: crypto.TLSCurveIDs(), + TCPOptions: globalTCPOptions, + EnableHTTP2: false, }.NewRemoteTargetHTTPTransport(insecure) } @@ -722,7 +689,7 @@ func NewRemoteTargetHTTPTransport(insecure bool) func() *http.Transport { func ceilFrac(numerator, denominator int64) (ceil int64) { if denominator == 0 { // do nothing on invalid input - return + return ceil } // Make denominator positive if denominator < 0 { @@ -733,7 +700,7 @@ func ceilFrac(numerator, denominator int64) (ceil int64) { if numerator > 0 && numerator%denominator != 0 { ceil++ } - return + return ceil } // cleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal @@ -876,14 +843,11 @@ func lcp(strs []string, pre bool) string { return "" } // maximum possible length - maxl := xfixl - if strl < maxl { - maxl = strl - } + maxl := min(strl, xfixl) // compare letters if pre { // prefix, iterate left to right - for i := 0; i < maxl; i++ { + for i := range maxl { if xfix[i] != str[i] { xfix = xfix[:i] break @@ -891,7 +855,7 @@ func lcp(strs []string, pre bool) string { } } else { // suffix, iterate right to left - for i := 0; i < maxl; i++ { + for i := range maxl { xi := xfixl - i - 1 si := strl - i - 1 if xfix[xi] != str[si] { @@ -968,7 +932,7 @@ type AuditLogOptions struct { Object string VersionID string Error string - Tags map[string]interface{} + Tags map[string]string } // sends audit logs for internal subsystem activity @@ -976,27 +940,24 @@ func auditLogInternal(ctx context.Context, opts AuditLogOptions) { if len(logger.AuditTargets()) == 0 { return } + entry := audit.NewEntry(globalDeploymentID()) entry.Trigger = opts.Event entry.Event = opts.Event entry.Error = opts.Error entry.API.Name = opts.APIName entry.API.Bucket = opts.Bucket - entry.API.Objects = []pkgAudit.ObjectVersion{{ObjectName: opts.Object, VersionID: opts.VersionID}} + entry.API.Objects = []xaudit.ObjectVersion{{ObjectName: opts.Object, VersionID: opts.VersionID}} entry.API.Status = opts.Status - if len(opts.Tags) > 0 { - entry.Tags = make(map[string]interface{}, len(opts.Tags)) - for k, v := range opts.Tags { - entry.Tags[k] = v - } - } else { - entry.Tags = make(map[string]interface{}) + entry.Tags = make(map[string]any, len(opts.Tags)) + for k, v := range opts.Tags { + entry.Tags[k] = v } // Merge tag information if found - this is currently needed for tags // set during decommissioning. if reqInfo := logger.GetReqInfo(ctx); reqInfo != nil { - reqInfo.PopulateTagsMap(entry.Tags) + reqInfo.PopulateTagsMap(opts.Tags) } ctx = logger.SetAuditEntry(ctx, &entry) logger.AuditLog(ctx, nil, nil, nil) @@ -1021,11 +982,11 @@ func newTLSConfig(getCert certs.GetCertificateFunc) *tls.Config { } if secureCiphers := env.Get(api.EnvAPISecureCiphers, config.EnableOn) == config.EnableOn; secureCiphers { - tlsConfig.CipherSuites = fips.TLSCiphers() + tlsConfig.CipherSuites = crypto.TLSCiphers() } else { - tlsConfig.CipherSuites = fips.TLSCiphersBackwardCompatible() + tlsConfig.CipherSuites = crypto.TLSCiphersBackwardCompatible() } - tlsConfig.CurvePreferences = fips.TLSCurveIDs() + tlsConfig.CurvePreferences = crypto.TLSCurveIDs() return tlsConfig } @@ -1189,16 +1150,40 @@ func ptr[T any](a T) *T { return &a } -func max(a, b int) int { - if a > b { - return a +// sleepContext sleeps for d duration or until ctx is done. +func sleepContext(ctx context.Context, d time.Duration) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(d): + } + return nil +} + +// helper type to return either item or error. +type itemOrErr[V any] struct { + Item V + Err error +} + +func filterStorageClass(ctx context.Context, s string) string { + // Veeam 14.0 and later clients are not compatible with custom storage classes. + if globalVeeamForceSC != "" && s != storageclass.STANDARD && s != storageclass.RRS && isVeeamClient(ctx) { + return globalVeeamForceSC } - return b + return s +} + +type ordered interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr | ~float32 | ~float64 | string } -func min(a, b int) int { - if a < b { - return a +// mapKeysSorted returns the map keys as a sorted slice. +func mapKeysSorted[Map ~map[K]V, K ordered, V any](m Map) []K { + res := make([]K, 0, len(m)) + for k := range m { + res = append(res, k) } - return b + slices.Sort(res) + return res } diff --git a/cmd/utils_test.go b/cmd/utils_test.go index bdbf17f4dc024..6d4e26a38b897 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -163,7 +163,6 @@ func TestPath2BucketObjectName(t *testing.T) { // Validate all test cases. for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { bucketName, objectName := path2BucketObject(testCase.path) if bucketName != testCase.bucket { diff --git a/cmd/veeam-sos-api.go b/cmd/veeam-sos-api.go index 61f5f38a41f14..33ff9e1327648 100644 --- a/cmd/veeam-sos-api.go +++ b/cmd/veeam-sos-api.go @@ -22,8 +22,11 @@ import ( "context" "encoding/xml" "io" + "os" + "strings" "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/logger" ) // From Veeam-SOSAPI_1.0_Document_v1.02d.pdf @@ -83,6 +86,11 @@ type apiEndpoints struct { STSEndpoint string `xml:"STSEndpoint"` } +// globalVeeamForceSC is set by the environment variable _MINIO_VEEAM_FORCE_SC +// This will override the storage class returned by the storage backend if it is non-standard +// and we detect a Veeam client by checking the User Agent. +var globalVeeamForceSC = os.Getenv("_MINIO_VEEAM_FORCE_SC") + type systemInfo struct { XMLName xml.Name `xml:"SystemInfo" json:"-"` ProtocolVersion string `xml:"ProtocolVersion"` @@ -115,6 +123,7 @@ type capacityInfo struct { const ( systemXMLObject = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/system.xml" capacityXMLObject = ".system-d26a9498-cb7c-4a87-a44a-8ae204f5ba6c/capacity.xml" + veeamAgentSubstr = "APN/1.0 Veeam/1.0" ) func isVeeamSOSAPIObject(object string) bool { @@ -126,6 +135,12 @@ func isVeeamSOSAPIObject(object string) bool { } } +// isVeeamClient - returns true if the request is from Veeam client. +func isVeeamClient(ctx context.Context) bool { + ri := logger.GetReqInfo(ctx) + return ri != nil && strings.Contains(ri.UserAgent, veeamAgentSubstr) +} + func veeamSOSAPIHeadObject(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { gr, err := veeamSOSAPIGetObject(ctx, bucket, object, nil, opts) if gr != nil { @@ -156,7 +171,7 @@ func veeamSOSAPIGetObject(ctx context.Context, bucket, object string, rs *HTTPRa } q, _ := globalBucketQuotaSys.Get(ctx, bucket) - binfo, _ := globalBucketQuotaSys.GetBucketUsageInfo(bucket) + binfo := globalBucketQuotaSys.GetBucketUsageInfo(ctx, bucket) ci := capacityInfo{ Used: int64(binfo.Size), diff --git a/cmd/warm-backend-azure.go b/cmd/warm-backend-azure.go index 256835670b638..bb087e749cca8 100644 --- a/cmd/warm-backend-azure.go +++ b/cmd/warm-backend-azure.go @@ -19,28 +19,39 @@ package cmd import ( "context" - "encoding/base64" "errors" "fmt" "io" "net/http" - "net/url" "strings" - "time" - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/Azure/go-autorest/autorest/adal" - "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/azure-sdk-for-go/sdk/azcore" + "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob" "github.com/minio/madmin-go/v3" ) type warmBackendAzure struct { - serviceURL azblob.ServiceURL + clnt *azblob.Client Bucket string Prefix string StorageClass string } +func (az *warmBackendAzure) tier() *blob.AccessTier { + if az.StorageClass == "" { + return nil + } + for _, t := range blob.PossibleAccessTierValues() { + if strings.EqualFold(az.StorageClass, string(t)) { + return &t + } + } + return nil +} + func (az *warmBackendAzure) getDest(object string) string { destObj := object if az.Prefix != "" { @@ -49,150 +60,123 @@ func (az *warmBackendAzure) getDest(object string) string { return destObj } -func (az *warmBackendAzure) tier() azblob.AccessTierType { - for _, t := range azblob.PossibleAccessTierTypeValues() { - if strings.EqualFold(az.StorageClass, string(t)) { - return t - } +func (az *warmBackendAzure) PutWithMeta(ctx context.Context, object string, r io.Reader, length int64, meta map[string]string) (remoteVersionID, error) { + azMeta := map[string]*string{} + for k, v := range meta { + azMeta[k] = to.Ptr(v) } - return azblob.AccessTierType("") + resp, err := az.clnt.UploadStream(ctx, az.Bucket, az.getDest(object), io.LimitReader(r, length), &azblob.UploadStreamOptions{ + Concurrency: 4, + AccessTier: az.tier(), // set tier if specified + Metadata: azMeta, + }) + if err != nil { + return "", azureToObjectError(err, az.Bucket, az.getDest(object)) + } + vid := "" + if resp.VersionID != nil { + vid = *resp.VersionID + } + return remoteVersionID(vid), nil } -// FIXME: add support for remote version ID in Azure remote tier and remove -// this. Currently it's a no-op. - func (az *warmBackendAzure) Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) { - blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlockBlobURL(az.getDest(object)) - // set tier if specified - - if az.StorageClass != "" { - if _, err := blobURL.SetTier(ctx, az.tier(), azblob.LeaseAccessConditions{}, azblob.RehydratePriorityStandard); err != nil { - return "", azureToObjectError(err, az.Bucket, object) - } - } - res, err := azblob.UploadStreamToBlockBlob(ctx, r, blobURL, azblob.UploadStreamToBlockBlobOptions{}) - if err != nil { - return "", azureToObjectError(err, az.Bucket, object) - } - return remoteVersionID(res.Version()), nil + return az.PutWithMeta(ctx, object, r, length, map[string]string{}) } func (az *warmBackendAzure) Get(ctx context.Context, object string, rv remoteVersionID, opts WarmBackendGetOpts) (r io.ReadCloser, err error) { if opts.startOffset < 0 { return nil, InvalidRange{} } - blobURL := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object)) - blob, err := blobURL.Download(ctx, opts.startOffset, opts.length, azblob.BlobAccessConditions{}, false, azblob.ClientProvidedKeyOptions{}) + resp, err := az.clnt.DownloadStream(ctx, az.Bucket, az.getDest(object), &azblob.DownloadStreamOptions{ + Range: blob.HTTPRange{Offset: opts.startOffset, Count: opts.length}, + }) if err != nil { - return nil, azureToObjectError(err, az.Bucket, object) + return nil, azureToObjectError(err, az.Bucket, az.getDest(object)) } - rc := blob.Body(azblob.RetryReaderOptions{}) - return rc, nil + return resp.Body, nil } func (az *warmBackendAzure) Remove(ctx context.Context, object string, rv remoteVersionID) error { - blob := az.serviceURL.NewContainerURL(az.Bucket).NewBlobURL(az.getDest(object)) - _, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - return azureToObjectError(err, az.Bucket, object) + _, err := az.clnt.DeleteBlob(ctx, az.Bucket, az.getDest(object), &azblob.DeleteBlobOptions{}) + return azureToObjectError(err, az.Bucket, az.getDest(object)) } func (az *warmBackendAzure) InUse(ctx context.Context) (bool, error) { - containerURL := az.serviceURL.NewContainerURL(az.Bucket) - resp, err := containerURL.ListBlobsHierarchySegment(ctx, azblob.Marker{}, "/", azblob.ListBlobsSegmentOptions{ - Prefix: az.Prefix, - MaxResults: int32(1), + maxResults := int32(1) + pager := az.clnt.NewListBlobsFlatPager(az.Bucket, &azblob.ListBlobsFlatOptions{ + Prefix: &az.Prefix, + MaxResults: &maxResults, }) - if err != nil { - return false, azureToObjectError(err, az.Bucket, az.Prefix) - } - if len(resp.Segment.BlobPrefixes) > 0 || len(resp.Segment.BlobItems) > 0 { - return true, nil - } - return false, nil -} - -func newCredentialFromSP(conf madmin.TierAzure) (azblob.Credential, error) { - oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, conf.SPAuth.TenantID) - if err != nil { - return nil, err - } - spt, err := adal.NewServicePrincipalToken(*oauthConfig, conf.SPAuth.ClientID, conf.SPAuth.ClientSecret, azure.PublicCloud.ResourceIdentifiers.Storage) - if err != nil { - return nil, err + if !pager.More() { + return false, nil } - // Refresh obtains a fresh token - err = spt.Refresh() + resp, err := pager.NextPage(ctx) if err != nil { - return nil, err - } - - tc := azblob.NewTokenCredential(spt.Token().AccessToken, func(tc azblob.TokenCredential) time.Duration { - err := spt.Refresh() - if err != nil { - return 0 - } - // set the new token value - tc.SetToken(spt.Token().AccessToken) - - // get the next token before the current one expires - nextRenewal := float64(time.Until(spt.Token().Expires())) * 0.8 - if nextRenewal <= 0 { - nextRenewal = float64(time.Second) + if strings.Contains(err.Error(), "no more pages") { + return false, nil } + return false, azureToObjectError(err, az.Bucket, az.Prefix) + } - return time.Duration(nextRenewal) - }) - - return tc, nil + return len(resp.Segment.BlobItems) > 0, nil } -func newWarmBackendAzure(conf madmin.TierAzure, _ string) (*warmBackendAzure, error) { - var ( - credential azblob.Credential - err error - ) +type azureConf struct { + madmin.TierAzure +} +func (conf azureConf) Validate() error { switch { case conf.AccountName == "": - return nil, errors.New("the account name is required") + return errors.New("the account name is required") case conf.AccountKey != "" && (conf.SPAuth.TenantID != "" || conf.SPAuth.ClientID != "" || conf.SPAuth.ClientSecret != ""): - return nil, errors.New("multiple authentication mechanisms are provided") + return errors.New("multiple authentication mechanisms are provided") case conf.AccountKey == "" && (conf.SPAuth.TenantID == "" || conf.SPAuth.ClientID == "" || conf.SPAuth.ClientSecret == ""): - return nil, errors.New("no authentication mechanism was provided") + return errors.New("no authentication mechanism was provided") } if conf.Bucket == "" { - return nil, errors.New("no bucket name was provided") + return errors.New("no bucket name was provided") } - if conf.IsSPEnabled() { - credential, err = newCredentialFromSP(conf) - } else { - credential, err = azblob.NewSharedKeyCredential(conf.AccountName, conf.AccountKey) - } - if err != nil { - if _, ok := err.(base64.CorruptInputError); ok { - return nil, errors.New("invalid Azure credentials") - } + return nil +} + +func (conf azureConf) NewClient() (clnt *azblob.Client, clntErr error) { + if err := conf.Validate(); err != nil { return nil, err } - p := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - var u *url.URL - if conf.Endpoint != "" { - u, err = url.Parse(conf.Endpoint) - if err != nil { - return nil, err - } - } else { - u, err = url.Parse(fmt.Sprintf("https://%s.blob.core.windows.net", conf.AccountName)) + + ep := conf.Endpoint + if ep == "" { + ep = fmt.Sprintf("https://%s.blob.core.windows.net", conf.AccountName) + } + + if conf.IsSPEnabled() { + credential, err := azidentity.NewClientSecretCredential(conf.SPAuth.TenantID, conf.SPAuth.ClientID, conf.SPAuth.ClientSecret, &azidentity.ClientSecretCredentialOptions{}) if err != nil { return nil, err } + return azblob.NewClient(ep, credential, &azblob.ClientOptions{}) + } + credential, err := azblob.NewSharedKeyCredential(conf.AccountName, conf.AccountKey) + if err != nil { + return nil, err + } + return azblob.NewClientWithSharedKeyCredential(ep, credential, &azblob.ClientOptions{}) +} + +func newWarmBackendAzure(conf madmin.TierAzure, _ string) (*warmBackendAzure, error) { + clnt, err := azureConf{conf}.NewClient() + if err != nil { + return nil, err } - serviceURL := azblob.NewServiceURL(*u, p) + return &warmBackendAzure{ - serviceURL: serviceURL, + clnt: clnt, Bucket: conf.Bucket, Prefix: strings.TrimSuffix(conf.Prefix, slashSeparator), StorageClass: conf.StorageClass, @@ -214,15 +198,15 @@ func azureToObjectError(err error, params ...string) error { object = params[1] } - azureErr, ok := err.(azblob.StorageError) + azureErr, ok := err.(*azcore.ResponseError) if !ok { // We don't interpret non Azure errors. As azure errors will // have StatusCode to help to convert to object errors. return err } - serviceCode := string(azureErr.ServiceCode()) - statusCode := azureErr.Response().StatusCode + serviceCode := azureErr.ErrorCode + statusCode := azureErr.StatusCode return azureCodesToObjectError(err, serviceCode, statusCode, bucket, object) } diff --git a/cmd/warm-backend-gcs.go b/cmd/warm-backend-gcs.go index c2fbde3446a6b..cbfcd422442c1 100644 --- a/cmd/warm-backend-gcs.go +++ b/cmd/warm-backend-gcs.go @@ -47,16 +47,17 @@ func (gcs *warmBackendGCS) getDest(object string) string { return destObj } -// FIXME: add support for remote version ID in GCS remote tier and remove this. -// Currently it's a no-op. - -func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader, length int64) (remoteVersionID, error) { +func (gcs *warmBackendGCS) PutWithMeta(ctx context.Context, key string, data io.Reader, length int64, meta map[string]string) (remoteVersionID, error) { object := gcs.client.Bucket(gcs.Bucket).Object(gcs.getDest(key)) - // TODO: set storage class w := object.NewWriter(ctx) if gcs.StorageClass != "" { - w.ObjectAttrs.StorageClass = gcs.StorageClass + w.StorageClass = gcs.StorageClass } + w.Metadata = meta + if _, err := xioutil.Copy(w, data); err != nil { + return "", gcsToObjectError(err, gcs.Bucket, key) + } + if _, err := xioutil.Copy(w, data); err != nil { return "", gcsToObjectError(err, gcs.Bucket, key) } @@ -64,6 +65,12 @@ func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader, return "", w.Close() } +// FIXME: add support for remote version ID in GCS remote tier and remove this. +// Currently it's a no-op. +func (gcs *warmBackendGCS) Put(ctx context.Context, key string, data io.Reader, length int64) (remoteVersionID, error) { + return gcs.PutWithMeta(ctx, key, data, length, map[string]string{}) +} + func (gcs *warmBackendGCS) Get(ctx context.Context, key string, rv remoteVersionID, opts WarmBackendGetOpts) (r io.ReadCloser, err error) { // GCS storage decompresses a gzipped object by default and returns the data. // Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding @@ -102,7 +109,7 @@ func (gcs *warmBackendGCS) InUse(ctx context.Context) (bool, error) { return false, nil } -func newWarmBackendGCS(conf madmin.TierGCS, _ string) (*warmBackendGCS, error) { +func newWarmBackendGCS(conf madmin.TierGCS, tier string) (*warmBackendGCS, error) { // Validation code if conf.Creds == "" { return nil, errors.New("empty credentials unsupported") @@ -117,7 +124,11 @@ func newWarmBackendGCS(conf madmin.TierGCS, _ string) (*warmBackendGCS, error) { return nil, err } - client, err := storage.NewClient(context.Background(), option.WithCredentialsJSON(credsJSON), option.WithScopes(storage.ScopeReadWrite)) + client, err := storage.NewClient(context.Background(), + option.WithCredentialsJSON(credsJSON), + option.WithScopes(storage.ScopeReadWrite), + option.WithUserAgent(fmt.Sprintf("gcs-tier-%s", tier)+SlashSeparator+ReleaseTag), + ) if err != nil { return nil, err } diff --git a/cmd/warm-backend-minio.go b/cmd/warm-backend-minio.go index bbbfb764f29db..56ca5120168bf 100644 --- a/cmd/warm-backend-minio.go +++ b/cmd/warm-backend-minio.go @@ -18,11 +18,13 @@ package cmd import ( + "context" "errors" "fmt" + "io" + "math" "net/url" "strings" - "time" "github.com/minio/madmin-go/v3" minio "github.com/minio/minio-go/v7" @@ -35,6 +37,65 @@ type warmBackendMinIO struct { var _ WarmBackend = (*warmBackendMinIO)(nil) +const ( + maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 + maxPartsCount = 10000 + maxPartSize = 1024 * 1024 * 1024 * 5 + minPartSize = 1024 * 1024 * 128 // chosen by us to be optimal for HDDs +) + +// optimalPartInfo - calculate the optimal part info for a given +// object size. +// +// NOTE: Assumption here is that for any object to be uploaded to any S3 compatible +// object storage it will have the following parameters as constants. +// +// maxPartsCount - 10000 +// maxMultipartPutObjectSize - 5TiB +func optimalPartSize(objectSize int64) (partSize int64, err error) { + // object size is '-1' set it to 5TiB. + if objectSize == -1 { + objectSize = maxMultipartPutObjectSize + } + + // object size is larger than supported maximum. + if objectSize > maxMultipartPutObjectSize { + err = errors.New("entity too large") + return partSize, err + } + + configuredPartSize := minPartSize + // Use floats for part size for all calculations to avoid + // overflows during float64 to int64 conversions. + partSizeFlt := float64(objectSize / maxPartsCount) + partSizeFlt = math.Ceil(partSizeFlt/float64(configuredPartSize)) * float64(configuredPartSize) + + // Part size. + partSize = int64(partSizeFlt) + if partSize == 0 { + return minPartSize, nil + } + return partSize, nil +} + +func (m *warmBackendMinIO) PutWithMeta(ctx context.Context, object string, r io.Reader, length int64, meta map[string]string) (remoteVersionID, error) { + partSize, err := optimalPartSize(length) + if err != nil { + return remoteVersionID(""), err + } + res, err := m.client.PutObject(ctx, m.Bucket, m.getDest(object), r, length, minio.PutObjectOptions{ + StorageClass: m.StorageClass, + PartSize: uint64(partSize), + DisableContentSha256: true, + UserMetadata: meta, + }) + return remoteVersionID(res.VersionID), m.ToObjectError(err, object) +} + +func (m *warmBackendMinIO) Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) { + return m.PutWithMeta(ctx, object, r, length, map[string]string{}) +} + func newWarmBackendMinIO(conf madmin.TierMinIO, tier string) (*warmBackendMinIO, error) { // Validation of credentials if conf.AccessKey == "" || conf.SecretKey == "" { @@ -51,14 +112,11 @@ func newWarmBackendMinIO(conf madmin.TierMinIO, tier string) (*warmBackendMinIO, } creds := credentials.NewStaticV4(conf.AccessKey, conf.SecretKey, "") - - getRemoteTierTargetInstanceTransportOnce.Do(func() { - getRemoteTierTargetInstanceTransport = NewHTTPTransportWithTimeout(10 * time.Minute) - }) opts := &minio.Options{ - Creds: creds, - Secure: u.Scheme == "https", - Transport: getRemoteTierTargetInstanceTransport, + Creds: creds, + Secure: u.Scheme == "https", + Transport: globalRemoteTargetTransport, + TrailingHeaders: true, } client, err := minio.New(u.Host, opts) if err != nil { diff --git a/cmd/warm-backend-s3.go b/cmd/warm-backend-s3.go index 28b314c36ddee..5905923b7d3bc 100644 --- a/cmd/warm-backend-s3.go +++ b/cmd/warm-backend-s3.go @@ -25,20 +25,12 @@ import ( "net/http" "net/url" "strings" - "sync" - "time" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" ) -// getRemoteTierTargetInstanceTransport contains a singleton roundtripper. -var ( - getRemoteTierTargetInstanceTransport http.RoundTripper - getRemoteTierTargetInstanceTransportOnce sync.Once -) - type warmBackendS3 struct { client *minio.Client core *minio.Core @@ -64,14 +56,19 @@ func (s3 *warmBackendS3) getDest(object string) string { return destObj } -func (s3 *warmBackendS3) Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) { +func (s3 *warmBackendS3) PutWithMeta(ctx context.Context, object string, r io.Reader, length int64, meta map[string]string) (remoteVersionID, error) { res, err := s3.client.PutObject(ctx, s3.Bucket, s3.getDest(object), r, length, minio.PutObjectOptions{ SendContentMd5: true, StorageClass: s3.StorageClass, + UserMetadata: meta, }) return remoteVersionID(res.VersionID), s3.ToObjectError(err, object) } +func (s3 *warmBackendS3) Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) { + return s3.PutWithMeta(ctx, object, r, length, map[string]string{}) +} + func (s3 *warmBackendS3) Get(ctx context.Context, object string, rv remoteVersionID, opts WarmBackendGetOpts) (io.ReadCloser, error) { gopts := minio.GetObjectOptions{} @@ -162,13 +159,11 @@ func newWarmBackendS3(conf madmin.TierS3, tier string) (*warmBackendS3, error) { default: return nil, errors.New("insufficient parameters for S3 backend authentication") } - getRemoteTierTargetInstanceTransportOnce.Do(func() { - getRemoteTierTargetInstanceTransport = NewHTTPTransportWithTimeout(10 * time.Minute) - }) opts := &minio.Options{ Creds: creds, Secure: u.Scheme == "https", - Transport: getRemoteTierTargetInstanceTransport, + Transport: globalRemoteTargetTransport, + Region: conf.Region, } client, err := minio.New(u.Host, opts) if err != nil { diff --git a/cmd/warm-backend.go b/cmd/warm-backend.go index f161c037ead63..91a936004d021 100644 --- a/cmd/warm-backend.go +++ b/cmd/warm-backend.go @@ -18,11 +18,11 @@ package cmd import ( - "bytes" "context" "errors" "fmt" "io" + "strings" "github.com/minio/madmin-go/v3" xhttp "github.com/minio/minio/internal/http" @@ -38,6 +38,7 @@ type WarmBackendGetOpts struct { // WarmBackend provides interface to be implemented by remote tier backends type WarmBackend interface { Put(ctx context.Context, object string, r io.Reader, length int64) (remoteVersionID, error) + PutWithMeta(ctx context.Context, object string, r io.Reader, length int64, meta map[string]string) (remoteVersionID, error) Get(ctx context.Context, object string, rv remoteVersionID, opts WarmBackendGetOpts) (io.ReadCloser, error) Remove(ctx context.Context, object string, rv remoteVersionID) error InUse(ctx context.Context) (bool, error) @@ -48,8 +49,7 @@ const probeObject = "probeobject" // checkWarmBackend checks if tier config credentials have sufficient privileges // to perform all operations defined in the WarmBackend interface. func checkWarmBackend(ctx context.Context, w WarmBackend) error { - var empty bytes.Reader - remoteVersionID, err := w.Put(ctx, probeObject, &empty, 0) + remoteVersionID, err := w.Put(ctx, probeObject, strings.NewReader("MinIO"), 5) if err != nil { if _, ok := err.(BackendDown); ok { return err @@ -131,7 +131,7 @@ type remoteVersionID string // newWarmBackend instantiates the tier type specific WarmBackend, runs // checkWarmBackend on it. -func newWarmBackend(ctx context.Context, tier madmin.TierConfig) (d WarmBackend, err error) { +func newWarmBackend(ctx context.Context, tier madmin.TierConfig, probe bool) (d WarmBackend, err error) { switch tier.Type { case madmin.S3: d, err = newWarmBackendS3(*tier.S3, tier.Name) @@ -145,12 +145,15 @@ func newWarmBackend(ctx context.Context, tier madmin.TierConfig) (d WarmBackend, return nil, errTierTypeUnsupported } if err != nil { - return nil, errTierTypeUnsupported + tierLogIf(ctx, err) + return nil, errTierInvalidConfig } - err = checkWarmBackend(ctx, d) - if err != nil { - return nil, err + if probe { + if err = checkWarmBackend(ctx, d); err != nil { + return nil, err + } } + return d, nil } diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go index 8d24b3234a4bc..89fac54936be4 100644 --- a/cmd/xl-storage-disk-id-check.go +++ b/cmd/xl-storage-disk-id-check.go @@ -23,6 +23,7 @@ import ( "fmt" "io" "math/rand" + "path" "runtime" "strconv" "strings" @@ -32,6 +33,7 @@ import ( "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/cachevalue" + "github.com/minio/minio/internal/grid" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" ) @@ -69,6 +71,9 @@ const ( storageMetricReadMultiple storageMetricDeleteAbandonedParts storageMetricDiskInfo + storageMetricDeleteBulk + storageMetricRenamePart + storageMetricReadParts // .... add more @@ -79,13 +84,13 @@ const ( type xlStorageDiskIDCheck struct { totalWrites atomic.Uint64 totalDeletes atomic.Uint64 - totalErrsAvailability atomic.Uint64 // Captures all data availability errors such as permission denied, faulty disk and timeout errors. + totalErrsAvailability atomic.Uint64 // Captures all data availability errors such as faulty disk, timeout errors. totalErrsTimeout atomic.Uint64 // Captures all timeout only errors // apiCalls should be placed first so alignment is guaranteed for atomic operations. apiCalls [storageMetricLast]uint64 apiLatencies [storageMetricLast]*lockedLastMinuteLatency - diskID string + diskID atomic.Pointer[string] storage *xlStorage health *diskHealthTracker healthCheck bool @@ -98,7 +103,7 @@ type xlStorageDiskIDCheck struct { func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics { p.metricsCache.InitOnce(5*time.Second, cachevalue.Opts{}, - func() (DiskMetrics, error) { + func(ctx context.Context) (DiskMetrics, error) { diskMetric := DiskMetrics{ LastMinute: make(map[string]AccElem, len(p.apiLatencies)), APICalls: make(map[string]uint64, len(p.apiCalls)), @@ -113,7 +118,7 @@ func (p *xlStorageDiskIDCheck) getMetrics() DiskMetrics { }, ) - diskMetric, _ := p.metricsCache.Get() + diskMetric, _ := p.metricsCache.GetWithCtx(context.Background()) // Do not need this value to be cached. diskMetric.TotalErrorsTimeout = p.totalErrsTimeout.Load() diskMetric.TotalErrorsAvailability = p.totalErrsAvailability.Load() @@ -154,7 +159,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) { a.Total = atomic.LoadInt64(&old.Total) a.N = atomic.LoadInt64(&old.N) e.mu.Lock() - e.lastMinuteLatency.addAll(t-1, a) + e.addAll(t-1, a) e.mu.Unlock() acc = newAcc } else { @@ -172,7 +177,7 @@ func (e *lockedLastMinuteLatency) addSize(value time.Duration, sz int64) { func (e *lockedLastMinuteLatency) total() AccElem { e.mu.Lock() defer e.mu.Unlock() - return e.lastMinuteLatency.getTotal() + return e.getTotal() } func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDiskIDCheck { @@ -182,6 +187,7 @@ func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDis healthCheck: healthCheck && globalDriveMonitoring, metricsCache: cachevalue.New[DiskMetrics](), } + xl.SetDiskID(emptyDiskID) xl.totalWrites.Store(xl.storage.getWriteAttribute()) xl.totalDeletes.Store(xl.storage.getDeleteAttribute()) @@ -204,7 +210,7 @@ func (p *xlStorageDiskIDCheck) IsOnline() bool { if err != nil { return false } - return storedDiskID == p.diskID + return storedDiskID == *p.diskID.Load() } func (p *xlStorageDiskIDCheck) LastConn() time.Time { @@ -245,18 +251,10 @@ func (p *xlStorageDiskIDCheck) NSScanner(ctx context.Context, cache dataUsageCac return p.storage.NSScanner(ctx, cache, updates, scanMode, weSleep) } -func (p *xlStorageDiskIDCheck) SetFormatData(b []byte) { - p.storage.SetFormatData(b) -} - func (p *xlStorageDiskIDCheck) GetDiskLoc() (poolIdx, setIdx, diskIdx int) { return p.storage.GetDiskLoc() } -func (p *xlStorageDiskIDCheck) SetDiskLoc(poolIdx, setIdx, diskIdx int) { - p.storage.SetDiskLoc(poolIdx, setIdx, diskIdx) -} - func (p *xlStorageDiskIDCheck) Close() error { p.diskCancel() return p.storage.Close() @@ -267,11 +265,11 @@ func (p *xlStorageDiskIDCheck) GetDiskID() (string, error) { } func (p *xlStorageDiskIDCheck) SetDiskID(id string) { - p.diskID = id + p.diskID.Store(&id) } func (p *xlStorageDiskIDCheck) checkDiskStale() error { - if p.diskID == "" { + if *p.diskID.Load() == emptyDiskID { // For empty disk-id we allow the call as the server might be // coming up and trying to read format.json or create format.json return nil @@ -281,7 +279,7 @@ func (p *xlStorageDiskIDCheck) checkDiskStale() error { // return any error generated while reading `format.json` return err } - if err == nil && p.diskID == storedDiskID { + if err == nil && *p.diskID.Load() == storedDiskID { return nil } // not the same disk we remember, take it offline. @@ -294,7 +292,7 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context, opts DiskInfoOption } si := p.updateStorageMetrics(storageMetricDiskInfo) - defer si(&err) + defer si(0, &err) if opts.NoOp { if opts.Metrics { @@ -335,7 +333,8 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context, opts DiskInfoOption // check cached diskID against backend // only if its non-empty. - if p.diskID != "" && p.diskID != info.ID { + cachedID := *p.diskID.Load() + if cachedID != "" && cachedID != info.ID { return info, errDiskNotFound } return info, nil @@ -346,7 +345,7 @@ func (p *xlStorageDiskIDCheck) MakeVolBulk(ctx context.Context, volumes ...strin if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.MakeVolBulk(ctx, volumes...) }) @@ -357,7 +356,7 @@ func (p *xlStorageDiskIDCheck) MakeVol(ctx context.Context, volume string) (err if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.MakeVol(ctx, volume) }) @@ -368,7 +367,7 @@ func (p *xlStorageDiskIDCheck) ListVols(ctx context.Context) (vi []VolInfo, err if err != nil { return nil, err } - defer done(&err) + defer done(0, &err) return p.storage.ListVols(ctx) } @@ -378,7 +377,7 @@ func (p *xlStorageDiskIDCheck) StatVol(ctx context.Context, volume string) (vol if err != nil { return vol, err } - defer done(&err) + defer done(0, &err) return xioutil.WithDeadline[VolInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result VolInfo, err error) { return p.storage.StatVol(ctx, volume) @@ -390,7 +389,7 @@ func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, for if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.DeleteVol(ctx, volume, forceDelete) }) @@ -401,7 +400,7 @@ func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, origvolume, volume, if err != nil { return nil, err } - defer done(&err) + defer done(0, &err) return p.storage.ListDir(ctx, origvolume, volume, dirPath, count) } @@ -412,7 +411,9 @@ func (p *xlStorageDiskIDCheck) ReadFile(ctx context.Context, volume string, path if err != nil { return 0, err } - defer done(&err) + defer func() { + done(n, &err) + }() return xioutil.WithDeadline[int64](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result int64, err error) { return p.storage.ReadFile(ctx, volume, path, offset, buf, verifier) @@ -425,7 +426,7 @@ func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, pa if err != nil { return err } - defer done(&err) + defer done(int64(len(buf)), &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { @@ -438,7 +439,7 @@ func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, origvolume, volum if err != nil { return err } - defer done(&err) + defer done(size, &err) return p.storage.CreateFile(ctx, origvolume, volume, path, size, io.NopCloser(reader)) } @@ -448,50 +449,79 @@ func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path if err != nil { return nil, err } - defer done(&err) + defer done(length, &err) return xioutil.WithDeadline[io.ReadCloser](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result io.ReadCloser, err error) { return p.storage.ReadFileStream(ctx, volume, path, offset, length) }) } +func (p *xlStorageDiskIDCheck) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string, meta []byte, skipParent string) (err error) { + ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenamePart, srcVolume, srcPath, dstVolume, dstPath) + if err != nil { + return err + } + defer done(0, &err) + + w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) + return w.Run(func() error { + return p.storage.RenamePart(ctx, srcVolume, srcPath, dstVolume, dstPath, meta, skipParent) + }) +} + func (p *xlStorageDiskIDCheck) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) { ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameFile, srcVolume, srcPath, dstVolume, dstPath) if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.RenameFile(ctx, srcVolume, srcPath, dstVolume, dstPath) }) } -func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) { +func (p *xlStorageDiskIDCheck) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (res RenameDataResp, err error) { ctx, done, err := p.TrackDiskHealth(ctx, storageMetricRenameData, srcPath, fi.DataDir, dstVolume, dstPath) if err != nil { - return 0, err + return res, err } defer func() { if err == nil && !skipAccessChecks(dstVolume) { p.storage.setWriteAttribute(p.totalWrites.Add(1)) } - done(&err) + done(0, &err) }() - return xioutil.WithDeadline[uint64](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result uint64, err error) { + // Copy inline data to a new buffer to function with deadlines. + if len(fi.Data) > 0 { + fi.Data = append(grid.GetByteBufferCap(len(fi.Data))[:0], fi.Data...) + } + return xioutil.WithDeadline[RenameDataResp](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (res RenameDataResp, err error) { + if len(fi.Data) > 0 { + defer grid.PutByteBuffer(fi.Data) + } return p.storage.RenameData(ctx, srcVolume, srcPath, fi, dstVolume, dstPath, opts) }) } -func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (err error) { +func (p *xlStorageDiskIDCheck) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) { ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCheckParts, volume, path) + if err != nil { + return nil, err + } + defer done(0, &err) + + return p.storage.CheckParts(ctx, volume, path, fi) +} + +func (p *xlStorageDiskIDCheck) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) { + ctx, done, err := p.TrackDiskHealth(ctx, storageMetricDeleteBulk, append([]string{volume}, paths...)...) if err != nil { return err } - defer done(&err) + defer done(0, &err) - w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) - return w.Run(func() error { return p.storage.CheckParts(ctx, volume, path, fi) }) + return p.storage.DeleteBulk(ctx, volume, paths...) } func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) { @@ -499,7 +529,7 @@ func (p *xlStorageDiskIDCheck) Delete(ctx context.Context, volume string, path s if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.Delete(ctx, volume, path, deleteOpts) }) @@ -546,7 +576,7 @@ func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string p.storage.setDeleteAttribute(p.totalDeletes.Add(permanentDeletes)) } } - done(&err) + done(0, &err) }() errs = p.storage.DeleteVersions(ctx, volume, versions, opts) @@ -560,12 +590,12 @@ func (p *xlStorageDiskIDCheck) DeleteVersions(ctx context.Context, volume string return errs } -func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) { +func (p *xlStorageDiskIDCheck) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) { ctx, done, err := p.TrackDiskHealth(ctx, storageMetricVerifyFile, volume, path) if err != nil { - return err + return nil, err } - defer done(&err) + defer done(0, &err) return p.storage.VerifyFile(ctx, volume, path, fi) } @@ -575,7 +605,7 @@ func (p *xlStorageDiskIDCheck) WriteAll(ctx context.Context, volume string, path if err != nil { return err } - defer done(&err) + defer done(int64(len(b)), &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.WriteAll(ctx, volume, path, b) }) @@ -587,7 +617,7 @@ func (p *xlStorageDiskIDCheck) DeleteVersion(ctx context.Context, volume, path s return err } defer func() { - defer done(&err) + defer done(0, &err) if err == nil && !skipAccessChecks(volume) { if opts.UndoWrite { @@ -614,7 +644,7 @@ func (p *xlStorageDiskIDCheck) UpdateMetadata(ctx context.Context, volume, path if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.UpdateMetadata(ctx, volume, path, fi, opts) }) @@ -625,7 +655,7 @@ func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, origvolume, vo if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.WriteMetadata(ctx, origvolume, volume, path, fi) }) @@ -636,7 +666,7 @@ func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, origvolume, volu if err != nil { return fi, err } - defer done(&err) + defer done(0, &err) return xioutil.WithDeadline[FileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result FileInfo, err error) { return p.storage.ReadVersion(ctx, origvolume, volume, path, versionID, opts) @@ -648,7 +678,11 @@ func (p *xlStorageDiskIDCheck) ReadAll(ctx context.Context, volume string, path if err != nil { return nil, err } - defer done(&err) + var sz int + defer func() { + sz = len(buf) + done(int64(sz), &err) + }() return xioutil.WithDeadline[[]byte](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result []byte, err error) { return p.storage.ReadAll(ctx, volume, path) @@ -660,7 +694,9 @@ func (p *xlStorageDiskIDCheck) ReadXL(ctx context.Context, volume string, path s if err != nil { return RawFileInfo{}, err } - defer done(&err) + defer func() { + done(int64(len(rf.Buf)), &err) + }() return xioutil.WithDeadline[RawFileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result RawFileInfo, err error) { return p.storage.ReadXL(ctx, volume, path, readData) @@ -672,11 +708,21 @@ func (p *xlStorageDiskIDCheck) StatInfoFile(ctx context.Context, volume, path st if err != nil { return nil, err } - defer done(&err) + defer done(0, &err) return p.storage.StatInfoFile(ctx, volume, path, glob) } +func (p *xlStorageDiskIDCheck) ReadParts(ctx context.Context, volume string, partMetaPaths ...string) ([]*ObjectPartInfo, error) { + ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadParts, volume, path.Dir(partMetaPaths[0])) + if err != nil { + return nil, err + } + defer done(0, &err) + + return p.storage.ReadParts(ctx, volume, partMetaPaths...) +} + // ReadMultiple will read multiple files and send each files as response. // Files are read and returned in the given order. // The resp channel is closed before the call returns. @@ -687,7 +733,7 @@ func (p *xlStorageDiskIDCheck) ReadMultiple(ctx context.Context, req ReadMultipl xioutil.SafeClose(resp) return err } - defer done(&err) + defer done(0, &err) return p.storage.ReadMultiple(ctx, req, resp) } @@ -699,19 +745,20 @@ func (p *xlStorageDiskIDCheck) CleanAbandonedData(ctx context.Context, volume st if err != nil { return err } - defer done(&err) + defer done(0, &err) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) return w.Run(func() error { return p.storage.CleanAbandonedData(ctx, volume, path) }) } -func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string, err string, custom map[string]string) madmin.TraceInfo { +func storageTrace(s storageMetric, startTime time.Time, duration time.Duration, path string, size int64, err string, custom map[string]string) madmin.TraceInfo { return madmin.TraceInfo{ TraceType: madmin.TraceStorage, Time: startTime, NodeName: globalLocalNodeName, FuncName: "storage." + s.String(), Duration: duration, + Bytes: size, Path: path, Error: err, Custom: custom, @@ -731,10 +778,10 @@ func scannerTrace(s scannerMetric, startTime time.Time, duration time.Duration, } // Update storage metrics -func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...string) func(err *error) { +func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...string) func(sz int64, err *error) { startTime := time.Now() trace := globalTrace.NumSubscribers(madmin.TraceStorage) > 0 - return func(errp *error) { + return func(sz int64, errp *error) { duration := time.Since(startTime) var err error @@ -744,9 +791,6 @@ func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...st atomic.AddUint64(&p.apiCalls[s], 1) if IsErr(err, []error{ - errVolumeAccessDenied, - errFileAccessDenied, - errDiskAccessDenied, errFaultyDisk, errFaultyRemoteDisk, context.DeadlineExceeded, @@ -768,7 +812,7 @@ func (p *xlStorageDiskIDCheck) updateStorageMetrics(s storageMetric, paths ...st } custom["total-errs-timeout"] = strconv.FormatUint(p.totalErrsTimeout.Load(), 10) custom["total-errs-availability"] = strconv.FormatUint(p.totalErrsAvailability.Load(), 10) - globalTrace.Publish(storageTrace(s, startTime, duration, strings.Join(paths, " "), errStr, custom)) + globalTrace.Publish(storageTrace(s, startTime, duration, strings.Join(paths, " "), sz, errStr, custom)) } } } @@ -825,7 +869,7 @@ func (h *healthDiskCtxValue) logSuccess() { // noopDoneFunc is a no-op done func. // Can be reused. -var noopDoneFunc = func(_ *error) {} +var noopDoneFunc = func(_ int64, _ *error) {} // TrackDiskHealth for this request. // When a non-nil error is returned 'done' MUST be called @@ -834,7 +878,7 @@ var noopDoneFunc = func(_ *error) {} // is either nil or io.EOF the disk is considered good. // So if unsure if the disk status is ok, return nil as a parameter to done. // Shadowing will work as long as return error is named: https://go.dev/play/p/sauq86SsTN2 -func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMetric, paths ...string) (c context.Context, done func(*error), err error) { +func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMetric, paths ...string) (c context.Context, done func(int64, *error), err error) { done = noopDoneFunc if contextCanceled(ctx) { return ctx, done, ctx.Err() @@ -867,7 +911,7 @@ func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMet ctx = context.WithValue(ctx, healthDiskCtxKey{}, &healthDiskCtxValue{lastSuccess: &p.health.lastSuccess}) si := p.updateStorageMetrics(s, paths...) var once sync.Once - return ctx, func(errp *error) { + return ctx, func(sz int64, errp *error) { p.health.waiting.Add(-1) once.Do(func() { if errp != nil { @@ -876,7 +920,7 @@ func (p *xlStorageDiskIDCheck) TrackDiskHealth(ctx context.Context, s storageMet p.health.logSuccess() } } - si(errp) + si(sz, errp) }) }, nil } @@ -910,7 +954,8 @@ func (p *xlStorageDiskIDCheck) monitorDiskStatus(spent time.Duration, fn string) }) if err == nil { - logger.Event(context.Background(), "node(%s): Read/Write/Delete successful, bringing drive %s online", globalLocalNodeName, p.storage.String()) + logger.Event(context.Background(), "healthcheck", + "node(%s): Read/Write/Delete successful, bringing drive %s online", globalLocalNodeName, p.storage.String()) p.health.status.Store(diskHealthOK) p.health.waiting.Add(-1) return @@ -969,7 +1014,7 @@ func (p *xlStorageDiskIDCheck) monitorDiskWritable(ctx context.Context) { goOffline := func(err error, spent time.Duration) { if p.health.status.CompareAndSwap(diskHealthOK, diskHealthFaulty) { - logger.LogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err)) + storageLogAlwaysIf(ctx, fmt.Errorf("node(%s): taking drive %s offline: %v", globalLocalNodeName, p.storage.String(), err)) p.health.waiting.Add(1) go p.monitorDiskStatus(spent, fn) } diff --git a/cmd/xl-storage-format-utils.go b/cmd/xl-storage-format-utils.go index b9b0fab2b7927..13c6836e63b7f 100644 --- a/cmd/xl-storage-format-utils.go +++ b/cmd/xl-storage-format-utils.go @@ -23,22 +23,40 @@ import ( "github.com/zeebo/xxh3" ) -func getFileInfoVersions(xlMetaBuf []byte, volume, path string, allParts bool) (FileInfoVersions, error) { - fivs, err := getAllFileInfoVersions(xlMetaBuf, volume, path, allParts) +// getFileInfoVersions partitions this object's versions such that, +// - fivs.Versions has all the non-free versions +// - fivs.FreeVersions has all the free versions +// +// if inclFreeVersions is true all the versions are in fivs.Versions, free and non-free versions alike. +// +// Note: Only the scanner requires fivs.Versions to have exclusively non-free versions. This is used while enforcing NewerNoncurrentVersions lifecycle element. +func getFileInfoVersions(xlMetaBuf []byte, volume, path string, inclFreeVersions bool) (FileInfoVersions, error) { + fivs, err := getAllFileInfoVersions(xlMetaBuf, volume, path, true) if err != nil { return fivs, err } + + // If inclFreeVersions is false, partition the versions in fivs.Versions + // such that finally fivs.Versions has + // all the non-free versions and fivs.FreeVersions has all the free + // versions. n := 0 for _, fi := range fivs.Versions { - // Filter our tier object delete marker - if !fi.TierFreeVersion() { - fivs.Versions[n] = fi - n++ + // filter our tier object delete marker + if fi.TierFreeVersion() { + if !inclFreeVersions { + fivs.FreeVersions = append(fivs.FreeVersions, fi) + } } else { - fivs.FreeVersions = append(fivs.FreeVersions, fi) + if !inclFreeVersions { + fivs.Versions[n] = fi + } + n++ } } - fivs.Versions = fivs.Versions[:n] + if !inclFreeVersions { + fivs.Versions = fivs.Versions[:n] + } // Update numversions for i := range fivs.Versions { fivs.Versions[i].NumVersions = n @@ -85,7 +103,12 @@ func getAllFileInfoVersions(xlMetaBuf []byte, volume, path string, allParts bool }, nil } -func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data, allParts bool) (FileInfo, error) { +type fileInfoOpts struct { + InclFreeVersions bool + Data bool +} + +func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, opts fileInfoOpts) (FileInfo, error) { var fi FileInfo var err error var inData xlMetaInlineData @@ -93,7 +116,7 @@ func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data, allPart return FileInfo{}, e } else if buf != nil { inData = data - fi, err = buf.ToFileInfo(volume, path, versionID, allParts) + fi, err = buf.ToFileInfo(volume, path, versionID, true) if len(buf) != 0 && errors.Is(err, errFileNotFound) { // This special case is needed to handle len(xlMeta.versions) == 0 return FileInfo{ @@ -122,15 +145,16 @@ func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data, allPart }, nil } inData = xlMeta.data - fi, err = xlMeta.ToFileInfo(volume, path, versionID, false, allParts) + fi, err = xlMeta.ToFileInfo(volume, path, versionID, opts.InclFreeVersions, true) } - if !data || err != nil { + if !opts.Data || err != nil { return fi, err } versionID = fi.VersionID if versionID == "" { versionID = nullVersionID } + fi.Data = inData.find(versionID) if len(fi.Data) == 0 { // PR #11758 used DataDir, preserve it @@ -141,22 +165,6 @@ func getFileInfo(xlMetaBuf []byte, volume, path, versionID string, data, allPart return fi, nil } -// getXLDiskLoc will return the pool/set/disk id if it can be located in the object layer. -// Will return -1 for unknown values. -func getXLDiskLoc(diskID string) (poolIdx, setIdx, diskIdx int) { - if api := newObjectLayerFn(); api != nil { - if globalIsErasureSD { - return 0, 0, 0 - } - if ep, ok := api.(*erasureServerPools); ok { - if pool, set, disk, err := ep.getPoolAndSet(diskID); err == nil { - return pool, set, disk - } - } - } - return -1, -1, -1 -} - // hashDeterministicString will return a deterministic hash for the map values. // Trivial collisions are avoided, but this is by no means a strong hash. func hashDeterministicString(m map[string]string) uint64 { diff --git a/cmd/xl-storage-format-utils_test.go b/cmd/xl-storage-format-utils_test.go index dfbb43408c333..12f36315357bc 100644 --- a/cmd/xl-storage-format-utils_test.go +++ b/cmd/xl-storage-format-utils_test.go @@ -18,6 +18,7 @@ package cmd import ( + "slices" "sort" "testing" "time" @@ -67,7 +68,7 @@ func Test_hashDeterministicString(t *testing.T) { const n = 100 want := hashDeterministicString(tt.arg) m := tt.arg - for i := 0; i < n; i++ { + for range n { if got := hashDeterministicString(m); got != want { t.Errorf("hashDeterministicString() = %v, want %v", got, want) } @@ -145,8 +146,8 @@ func TestGetFileInfoVersions(t *testing.T) { } xl := xlMetaV2{} var versions []FileInfo - var freeVersionIDs []string - for i := 0; i < 5; i++ { + var allVersionIDs, freeVersionIDs []string + for i := range 5 { fi := basefi fi.VersionID = mustGetUUID() fi.DataDir = mustGetUUID() @@ -167,18 +168,31 @@ func TestGetFileInfoVersions(t *testing.T) { // delete this version leading to a free version xl.DeleteVersion(fi) freeVersionIDs = append(freeVersionIDs, fi.TierFreeVersionID()) + allVersionIDs = append(allVersionIDs, fi.TierFreeVersionID()) } else { versions = append(versions, fi) + allVersionIDs = append(allVersionIDs, fi.VersionID) } } buf, err := xl.AppendTo(nil) if err != nil { t.Fatalf("Failed to serialize xlmeta %v", err) } - fivs, err := getFileInfoVersions(buf, basefi.Volume, basefi.Name, true) + fivs, err := getFileInfoVersions(buf, basefi.Volume, basefi.Name, false) if err != nil { t.Fatalf("getFileInfoVersions failed: %v", err) } + chkNumVersions := func(fis []FileInfo) bool { + for i := 0; i < len(fis)-1; i++ { + if fis[i].NumVersions != fis[i+1].NumVersions { + return false + } + } + return true + } + if !chkNumVersions(fivs.Versions) { + t.Fatalf("Expected all versions to have the same NumVersions") + } sort.Slice(versions, func(i, j int) bool { if versions[i].IsLatest { @@ -194,6 +208,9 @@ func TestGetFileInfoVersions(t *testing.T) { if fi.VersionID != versions[i].VersionID { t.Fatalf("getFileInfoVersions: versions don't match at %d, version id expected %s but got %s", i, fi.VersionID, versions[i].VersionID) } + if fi.NumVersions != len(fivs.Versions) { + t.Fatalf("getFileInfoVersions: version with %s version id expected to have %d as NumVersions but got %d", fi.VersionID, len(fivs.Versions), fi.NumVersions) + } } for i, free := range fivs.FreeVersions { @@ -201,4 +218,20 @@ func TestGetFileInfoVersions(t *testing.T) { t.Fatalf("getFileInfoVersions: free versions don't match at %d, version id expected %s but got %s", i, free.VersionID, freeVersionIDs[i]) } } + + // versions are stored in xl-meta sorted in descending order of their ModTime + slices.Reverse(allVersionIDs) + + fivs, err = getFileInfoVersions(buf, basefi.Volume, basefi.Name, true) + if err != nil { + t.Fatalf("getFileInfoVersions failed: %v", err) + } + if !chkNumVersions(fivs.Versions) { + t.Fatalf("Expected all versions to have the same NumVersions") + } + for i, fi := range fivs.Versions { + if fi.VersionID != allVersionIDs[i] { + t.Fatalf("getFileInfoVersions: all versions don't match at %d expected %s but got %s", i, allVersionIDs[i], fi.VersionID) + } + } } diff --git a/cmd/xl-storage-format-v1.go b/cmd/xl-storage-format-v1.go index a9d0023f33703..ae9ed631907b9 100644 --- a/cmd/xl-storage-format-v1.go +++ b/cmd/xl-storage-format-v1.go @@ -26,7 +26,6 @@ import ( "github.com/cespare/xxhash/v2" jsoniter "github.com/json-iterator/go" - "github.com/minio/minio/internal/logger" ) // XL constants. @@ -56,6 +55,8 @@ func isXLMetaErasureInfoValid(data, parity int) bool { return ((data >= parity) && (data > 0) && (parity >= 0)) } +//msgp:clearomitted + //go:generate msgp -file=$GOFILE -unexported // A xlMetaV1Object represents `xl.meta` metadata header. @@ -160,13 +161,14 @@ const ( // ObjectPartInfo Info of each part kept in the multipart metadata // file after CompleteMultipartUpload() is called. type ObjectPartInfo struct { - ETag string `json:"etag,omitempty"` - Number int `json:"number"` - Size int64 `json:"size"` // Size of the part on the disk. - ActualSize int64 `json:"actualSize"` // Original size of the part without compression or encryption bytes. - ModTime time.Time `json:"modTime"` // Date and time at which the part was uploaded. - Index []byte `json:"index,omitempty" msg:"index,omitempty"` - Checksums map[string]string `json:"crc,omitempty" msg:"crc,omitempty"` // Content Checksums + ETag string `json:"etag,omitempty" msg:"e"` + Number int `json:"number" msg:"n"` + Size int64 `json:"size" msg:"s"` // Size of the part on the disk. + ActualSize int64 `json:"actualSize" msg:"as"` // Original size of the part without compression or encryption bytes. + ModTime time.Time `json:"modTime" msg:"mt"` // Date and time at which the part was uploaded. + Index []byte `json:"index,omitempty" msg:"i,omitempty"` + Checksums map[string]string `json:"crc,omitempty" msg:"crc,omitempty"` // Content Checksums + Error string `json:"error,omitempty" msg:"err,omitempty"` // only set while reading part meta from drive. } // ChecksumInfo - carries checksums of individual scattered parts per disk. @@ -210,7 +212,7 @@ func (c *ChecksumInfo) UnmarshalJSON(data []byte) error { } if !c.Algorithm.Available() { - logger.LogIf(GlobalContext, errBitrotHashAlgoInvalid) + internalLogIf(GlobalContext, errBitrotHashAlgoInvalid) return errBitrotHashAlgoInvalid } return nil diff --git a/cmd/xl-storage-format-v1_gen.go b/cmd/xl-storage-format-v1_gen.go index c25d01263669e..02d484254a848 100644 --- a/cmd/xl-storage-format-v1_gen.go +++ b/cmd/xl-storage-format-v1_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -561,6 +561,8 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 3 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -569,42 +571,43 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { return } switch msgp.UnsafeString(field) { - case "ETag": + case "e": z.ETag, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "ETag") return } - case "Number": + case "n": z.Number, err = dc.ReadInt() if err != nil { err = msgp.WrapError(err, "Number") return } - case "Size": + case "s": z.Size, err = dc.ReadInt64() if err != nil { err = msgp.WrapError(err, "Size") return } - case "ActualSize": + case "as": z.ActualSize, err = dc.ReadInt64() if err != nil { err = msgp.WrapError(err, "ActualSize") return } - case "ModTime": + case "mt": z.ModTime, err = dc.ReadTime() if err != nil { err = msgp.WrapError(err, "ModTime") return } - case "index": + case "i": z.Index, err = dc.ReadBytes(z.Index) if err != nil { err = msgp.WrapError(err, "Index") return } + zb0001Mask |= 0x1 case "crc": var zb0002 uint32 zb0002, err = dc.ReadMapHeader() @@ -615,19 +618,17 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { if z.Checksums == nil { z.Checksums = make(map[string]string, zb0002) } else if len(z.Checksums) > 0 { - for key := range z.Checksums { - delete(z.Checksums, key) - } + clear(z.Checksums) } for zb0002 > 0 { zb0002-- var za0001 string - var za0002 string za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Checksums") return } + var za0002 string za0002, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Checksums", za0001) @@ -635,6 +636,14 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { } z.Checksums[za0001] = za0002 } + zb0001Mask |= 0x2 + case "err": + z.Error, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + zb0001Mask |= 0x4 default: err = dc.Skip() if err != nil { @@ -643,14 +652,26 @@ func (z *ObjectPartInfo) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if zb0001Mask != 0x7 { + if (zb0001Mask & 0x1) == 0 { + z.Index = nil + } + if (zb0001Mask & 0x2) == 0 { + z.Checksums = nil + } + if (zb0001Mask & 0x4) == 0 { + z.Error = "" + } + } return } // EncodeMsg implements msgp.Encodable func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values - zb0001Len := uint32(7) - var zb0001Mask uint8 /* 7 bits */ + // check for omitted fields + zb0001Len := uint32(8) + var zb0001Mask uint8 /* 8 bits */ _ = zb0001Mask if z.Index == nil { zb0001Len-- @@ -660,96 +681,113 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { zb0001Len-- zb0001Mask |= 0x40 } + if z.Error == "" { + zb0001Len-- + zb0001Mask |= 0x80 + } // variable map header, size zb0001Len err = en.Append(0x80 | uint8(zb0001Len)) if err != nil { return } - if zb0001Len == 0 { - return - } - // write "ETag" - err = en.Append(0xa4, 0x45, 0x54, 0x61, 0x67) - if err != nil { - return - } - err = en.WriteString(z.ETag) - if err != nil { - err = msgp.WrapError(err, "ETag") - return - } - // write "Number" - err = en.Append(0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteInt(z.Number) - if err != nil { - err = msgp.WrapError(err, "Number") - return - } - // write "Size" - err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.Size) - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - // write "ActualSize" - err = en.Append(0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.ActualSize) - if err != nil { - err = msgp.WrapError(err, "ActualSize") - return - } - // write "ModTime" - err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteTime(z.ModTime) - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - if (zb0001Mask & 0x20) == 0 { // if not empty - // write "index" - err = en.Append(0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "e" + err = en.Append(0xa1, 0x65) if err != nil { return } - err = en.WriteBytes(z.Index) + err = en.WriteString(z.ETag) if err != nil { - err = msgp.WrapError(err, "Index") + err = msgp.WrapError(err, "ETag") return } - } - if (zb0001Mask & 0x40) == 0 { // if not empty - // write "crc" - err = en.Append(0xa3, 0x63, 0x72, 0x63) + // write "n" + err = en.Append(0xa1, 0x6e) if err != nil { return } - err = en.WriteMapHeader(uint32(len(z.Checksums))) + err = en.WriteInt(z.Number) if err != nil { - err = msgp.WrapError(err, "Checksums") + err = msgp.WrapError(err, "Number") return } - for za0001, za0002 := range z.Checksums { - err = en.WriteString(za0001) + // write "s" + err = en.Append(0xa1, 0x73) + if err != nil { + return + } + err = en.WriteInt64(z.Size) + if err != nil { + err = msgp.WrapError(err, "Size") + return + } + // write "as" + err = en.Append(0xa2, 0x61, 0x73) + if err != nil { + return + } + err = en.WriteInt64(z.ActualSize) + if err != nil { + err = msgp.WrapError(err, "ActualSize") + return + } + // write "mt" + err = en.Append(0xa2, 0x6d, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + if (zb0001Mask & 0x20) == 0 { // if not omitted + // write "i" + err = en.Append(0xa1, 0x69) + if err != nil { + return + } + err = en.WriteBytes(z.Index) + if err != nil { + err = msgp.WrapError(err, "Index") + return + } + } + if (zb0001Mask & 0x40) == 0 { // if not omitted + // write "crc" + err = en.Append(0xa3, 0x63, 0x72, 0x63) + if err != nil { + return + } + err = en.WriteMapHeader(uint32(len(z.Checksums))) if err != nil { err = msgp.WrapError(err, "Checksums") return } - err = en.WriteString(za0002) + for za0001, za0002 := range z.Checksums { + err = en.WriteString(za0001) + if err != nil { + err = msgp.WrapError(err, "Checksums") + return + } + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "Checksums", za0001) + return + } + } + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // write "err" + err = en.Append(0xa3, 0x65, 0x72, 0x72) + if err != nil { + return + } + err = en.WriteString(z.Error) if err != nil { - err = msgp.WrapError(err, "Checksums", za0001) + err = msgp.WrapError(err, "Error") return } } @@ -760,9 +798,9 @@ func (z *ObjectPartInfo) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values - zb0001Len := uint32(7) - var zb0001Mask uint8 /* 7 bits */ + // check for omitted fields + zb0001Len := uint32(8) + var zb0001Mask uint8 /* 8 bits */ _ = zb0001Mask if z.Index == nil { zb0001Len-- @@ -772,38 +810,48 @@ func (z *ObjectPartInfo) MarshalMsg(b []byte) (o []byte, err error) { zb0001Len-- zb0001Mask |= 0x40 } + if z.Error == "" { + zb0001Len-- + zb0001Mask |= 0x80 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - // string "ETag" - o = append(o, 0xa4, 0x45, 0x54, 0x61, 0x67) - o = msgp.AppendString(o, z.ETag) - // string "Number" - o = append(o, 0xa6, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - o = msgp.AppendInt(o, z.Number) - // string "Size" - o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.Size) - // string "ActualSize" - o = append(o, 0xaa, 0x41, 0x63, 0x74, 0x75, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.ActualSize) - // string "ModTime" - o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendTime(o, z.ModTime) - if (zb0001Mask & 0x20) == 0 { // if not empty - // string "index" - o = append(o, 0xa5, 0x69, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendBytes(o, z.Index) - } - if (zb0001Mask & 0x40) == 0 { // if not empty - // string "crc" - o = append(o, 0xa3, 0x63, 0x72, 0x63) - o = msgp.AppendMapHeader(o, uint32(len(z.Checksums))) - for za0001, za0002 := range z.Checksums { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "e" + o = append(o, 0xa1, 0x65) + o = msgp.AppendString(o, z.ETag) + // string "n" + o = append(o, 0xa1, 0x6e) + o = msgp.AppendInt(o, z.Number) + // string "s" + o = append(o, 0xa1, 0x73) + o = msgp.AppendInt64(o, z.Size) + // string "as" + o = append(o, 0xa2, 0x61, 0x73) + o = msgp.AppendInt64(o, z.ActualSize) + // string "mt" + o = append(o, 0xa2, 0x6d, 0x74) + o = msgp.AppendTime(o, z.ModTime) + if (zb0001Mask & 0x20) == 0 { // if not omitted + // string "i" + o = append(o, 0xa1, 0x69) + o = msgp.AppendBytes(o, z.Index) + } + if (zb0001Mask & 0x40) == 0 { // if not omitted + // string "crc" + o = append(o, 0xa3, 0x63, 0x72, 0x63) + o = msgp.AppendMapHeader(o, uint32(len(z.Checksums))) + for za0001, za0002 := range z.Checksums { + o = msgp.AppendString(o, za0001) + o = msgp.AppendString(o, za0002) + } + } + if (zb0001Mask & 0x80) == 0 { // if not omitted + // string "err" + o = append(o, 0xa3, 0x65, 0x72, 0x72) + o = msgp.AppendString(o, z.Error) } } return @@ -819,6 +867,8 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 3 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -827,42 +877,43 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { return } switch msgp.UnsafeString(field) { - case "ETag": + case "e": z.ETag, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "ETag") return } - case "Number": + case "n": z.Number, bts, err = msgp.ReadIntBytes(bts) if err != nil { err = msgp.WrapError(err, "Number") return } - case "Size": + case "s": z.Size, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "Size") return } - case "ActualSize": + case "as": z.ActualSize, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "ActualSize") return } - case "ModTime": + case "mt": z.ModTime, bts, err = msgp.ReadTimeBytes(bts) if err != nil { err = msgp.WrapError(err, "ModTime") return } - case "index": + case "i": z.Index, bts, err = msgp.ReadBytesBytes(bts, z.Index) if err != nil { err = msgp.WrapError(err, "Index") return } + zb0001Mask |= 0x1 case "crc": var zb0002 uint32 zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) @@ -873,14 +924,12 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Checksums == nil { z.Checksums = make(map[string]string, zb0002) } else if len(z.Checksums) > 0 { - for key := range z.Checksums { - delete(z.Checksums, key) - } + clear(z.Checksums) } for zb0002 > 0 { - var za0001 string var za0002 string zb0002-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Checksums") @@ -893,6 +942,14 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.Checksums[za0001] = za0002 } + zb0001Mask |= 0x2 + case "err": + z.Error, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Error") + return + } + zb0001Mask |= 0x4 default: bts, err = msgp.Skip(bts) if err != nil { @@ -901,19 +958,32 @@ func (z *ObjectPartInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if zb0001Mask != 0x7 { + if (zb0001Mask & 0x1) == 0 { + z.Index = nil + } + if (zb0001Mask & 0x2) == 0 { + z.Checksums = nil + } + if (zb0001Mask & 0x4) == 0 { + z.Error = "" + } + } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ObjectPartInfo) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.ETag) + 7 + msgp.IntSize + 5 + msgp.Int64Size + 11 + msgp.Int64Size + 8 + msgp.TimeSize + 6 + msgp.BytesPrefixSize + len(z.Index) + 4 + msgp.MapHeaderSize + s = 1 + 2 + msgp.StringPrefixSize + len(z.ETag) + 2 + msgp.IntSize + 2 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.TimeSize + 2 + msgp.BytesPrefixSize + len(z.Index) + 4 + msgp.MapHeaderSize if z.Checksums != nil { for za0001, za0002 := range z.Checksums { _ = za0002 s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) } } + s += 4 + msgp.StringPrefixSize + len(z.Error) return } @@ -1354,19 +1424,17 @@ func (z *xlMetaV1Object) DecodeMsg(dc *msgp.Reader) (err error) { if z.Meta == nil { z.Meta = make(map[string]string, zb0003) } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } + clear(z.Meta) } for zb0003 > 0 { zb0003-- var za0001 string - var za0002 string za0001, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Meta") return } + var za0002 string za0002, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Meta", za0001) @@ -1674,14 +1742,12 @@ func (z *xlMetaV1Object) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.Meta == nil { z.Meta = make(map[string]string, zb0003) } else if len(z.Meta) > 0 { - for key := range z.Meta { - delete(z.Meta, key) - } + clear(z.Meta) } for zb0003 > 0 { - var za0001 string var za0002 string zb0003-- + var za0001 string za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Meta") diff --git a/cmd/xl-storage-format-v1_gen_test.go b/cmd/xl-storage-format-v1_gen_test.go index 0b66c8938fdba..fba5a0252e323 100644 --- a/cmd/xl-storage-format-v1_gen_test.go +++ b/cmd/xl-storage-format-v1_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/xl-storage-format-v2-legacy.go b/cmd/xl-storage-format-v2-legacy.go index 02e017c2bed23..ec2132279acca 100644 --- a/cmd/xl-storage-format-v2-legacy.go +++ b/cmd/xl-storage-format-v2-legacy.go @@ -29,6 +29,9 @@ func (x *xlMetaV2VersionHeader) unmarshalV(v uint8, bts []byte) (o []byte, err e switch v { case 1: return x.unmarshalV1(bts) + case 2: + x2 := xlMetaV2VersionHeaderV2{xlMetaV2VersionHeader: x} + return x2.UnmarshalMsg(bts) case xlHeaderVersion: return x.UnmarshalMsg(bts) } @@ -41,28 +44,28 @@ func (x *xlMetaV2VersionHeader) unmarshalV1(bts []byte) (o []byte, err error) { zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } if zb0001 != 4 { err = msgp.ArrayError{Wanted: 4, Got: zb0001} - return + return o, err } bts, err = msgp.ReadExactBytes(bts, (x.VersionID)[:]) if err != nil { err = msgp.WrapError(err, "VersionID") - return + return o, err } x.ModTime, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "ModTime") - return + return o, err } { var zb0002 uint8 zb0002, bts, err = msgp.ReadUint8Bytes(bts) if err != nil { err = msgp.WrapError(err, "Type") - return + return o, err } x.Type = VersionType(zb0002) } @@ -71,12 +74,12 @@ func (x *xlMetaV2VersionHeader) unmarshalV1(bts []byte) (o []byte, err error) { zb0003, bts, err = msgp.ReadUint8Bytes(bts) if err != nil { err = msgp.WrapError(err, "Flags") - return + return o, err } x.Flags = xlFlags(zb0003) } o = bts - return + return o, err } // unmarshalV unmarshals with a specific metadata version. @@ -123,3 +126,107 @@ func (j *xlMetaV2Version) unmarshalV(v uint8, bts []byte) (o []byte, err error) } return o, err } + +// xlMetaV2VersionHeaderV2 is a version 2 of xlMetaV2VersionHeader before EcN and EcM were added. +type xlMetaV2VersionHeaderV2 struct { + *xlMetaV2VersionHeader +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, err error) { + z.EcN, z.EcN = 0, 0 + var zb0001 uint32 + zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return o, err + } + if zb0001 != 5 { + err = msgp.ArrayError{Wanted: 5, Got: zb0001} + return o, err + } + bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return o, err + } + z.ModTime, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return o, err + } + bts, err = msgp.ReadExactBytes(bts, (z.Signature)[:]) + if err != nil { + err = msgp.WrapError(err, "Signature") + return o, err + } + { + var zb0002 uint8 + zb0002, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return o, err + } + z.Type = VersionType(zb0002) + } + { + var zb0003 uint8 + zb0003, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Flags") + return o, err + } + z.Flags = xlFlags(zb0003) + } + o = bts + return o, err +} + +// DecodeMsg implements msgp.Decodable +func (z *xlMetaV2VersionHeaderV2) DecodeMsg(dc *msgp.Reader) (err error) { + z.EcN, z.EcN = 0, 0 + var zb0001 uint32 + zb0001, err = dc.ReadArrayHeader() + if err != nil { + err = msgp.WrapError(err) + return err + } + if zb0001 != 5 { + err = msgp.ArrayError{Wanted: 5, Got: zb0001} + return err + } + err = dc.ReadExactBytes((z.VersionID)[:]) + if err != nil { + err = msgp.WrapError(err, "VersionID") + return err + } + z.ModTime, err = dc.ReadInt64() + if err != nil { + err = msgp.WrapError(err, "ModTime") + return err + } + err = dc.ReadExactBytes((z.Signature)[:]) + if err != nil { + err = msgp.WrapError(err, "Signature") + return err + } + { + var zb0002 uint8 + zb0002, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "Type") + return err + } + z.Type = VersionType(zb0002) + } + { + var zb0003 uint8 + zb0003, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "Flags") + return err + } + z.Flags = xlFlags(zb0003) + } + return err +} diff --git a/cmd/xl-storage-format-v2.go b/cmd/xl-storage-format-v2.go index 6bfb3ac8ac1b1..7d26b13e5955c 100644 --- a/cmd/xl-storage-format-v2.go +++ b/cmd/xl-storage-format-v2.go @@ -25,44 +25,20 @@ import ( "fmt" "io" "sort" - "strconv" "strings" - "sync" "time" "github.com/cespare/xxhash/v2" "github.com/google/uuid" jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/bucket/lifecycle" "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/config/storageclass" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" "github.com/tinylib/msgp/msgp" ) -// Reject creating new versions when a single object is cross maxObjectVersions -var maxObjectVersions = 10000 - -func init() { - v := env.Get("_MINIO_OBJECT_MAX_VERSIONS", "") - if v != "" { - maxv, err := strconv.Atoi(v) - if err != nil { - logger.Info("invalid _MINIO_OBJECT_MAX_VERSIONS value: %s, defaulting to '10000'", v) - maxObjectVersions = 10000 - } else { - if maxv < 10 { - logger.Info("invalid _MINIO_OBJECT_MAX_VERSIONS value: %s, minimum allowed is '10' defaulting to '10000'", v) - maxObjectVersions = 10000 - } else { - maxObjectVersions = maxv - } - } - } -} - var ( // XL header specifies the format xlHeader = [4]byte{'X', 'L', '2', ' '} @@ -71,6 +47,8 @@ var ( xlVersionCurrent [4]byte ) +//msgp:clearomitted + //go:generate msgp -file=$GOFILE -unexported //go:generate stringer -type VersionType,ErasureAlgo -output=xl-storage-format-v2_string.go $GOFILE @@ -274,28 +252,41 @@ type xlMetaV2VersionHeader struct { Signature [4]byte Type VersionType Flags xlFlags + EcN, EcM uint8 // Note that these will be 0/0 for non-v2 objects and older xl.meta } func (x xlMetaV2VersionHeader) String() string { - return fmt.Sprintf("Type: %s, VersionID: %s, Signature: %s, ModTime: %s, Flags: %s", + return fmt.Sprintf("Type: %s, VersionID: %s, Signature: %s, ModTime: %s, Flags: %s, N: %d, M: %d", x.Type.String(), hex.EncodeToString(x.VersionID[:]), hex.EncodeToString(x.Signature[:]), time.Unix(0, x.ModTime), x.Flags.String(), + x.EcN, x.EcM, ) } // matchesNotStrict returns whether x and o have both have non-zero version, // their versions match and their type match. // If they have zero version, modtime must match. -func (x xlMetaV2VersionHeader) matchesNotStrict(o xlMetaV2VersionHeader) bool { +func (x xlMetaV2VersionHeader) matchesNotStrict(o xlMetaV2VersionHeader) (ok bool) { + ok = x.VersionID == o.VersionID && x.Type == o.Type && x.matchesEC(o) if x.VersionID == [16]byte{} { - return x.VersionID == o.VersionID && - x.Type == o.Type && o.ModTime == x.ModTime + ok = ok && o.ModTime == x.ModTime } - return x.VersionID == o.VersionID && - x.Type == o.Type + return ok +} + +func (x xlMetaV2VersionHeader) matchesEC(o xlMetaV2VersionHeader) bool { + if x.hasEC() && o.hasEC() { + return x.EcN == o.EcN && x.EcM == o.EcM + } // if no EC header this is an older object + return true +} + +// hasEC will return true if the version has erasure coding information. +func (x xlMetaV2VersionHeader) hasEC() bool { + return x.EcM > 0 && x.EcN > 0 } // sortsBefore can be used as a tiebreaker for stable sorting/selecting. @@ -375,12 +366,18 @@ func (j *xlMetaV2Version) header() xlMetaV2VersionHeader { if j.Type == ObjectType && j.ObjectV2.InlineData() { flags |= xlFlagInlineData } + var ecM, ecN uint8 + if j.Type == ObjectType && j.ObjectV2 != nil { + ecM, ecN = uint8(j.ObjectV2.ErasureM), uint8(j.ObjectV2.ErasureN) + } return xlMetaV2VersionHeader{ VersionID: j.getVersionID(), ModTime: j.getModTime().UnixNano(), Signature: j.getSignature(), Type: j.Type, Flags: flags, + EcN: ecN, + EcM: ecM, } } @@ -464,8 +461,8 @@ func (j *xlMetaV2Version) ToFileInfo(volume, path string, allParts bool) (fi Fil } const ( - xlHeaderVersion = 2 - xlMetaVersion = 2 + xlHeaderVersion = 3 + xlMetaVersion = 3 ) func (j xlMetaV2DeleteMarker) ToFileInfo(volume, path string) (FileInfo, error) { @@ -706,18 +703,17 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string, allParts bool) (FileInfo const metaDataReadDefault = 4 << 10 // Return used metadata byte slices here. -var metaDataPool = sync.Pool{New: func() interface{} { return make([]byte, 0, metaDataReadDefault) }} +var metaDataPool = bpool.Pool[[]byte]{New: func() []byte { return make([]byte, 0, metaDataReadDefault) }} // metaDataPoolGet will return a byte slice with capacity at least metaDataReadDefault. // It will be length 0. func metaDataPoolGet() []byte { - return metaDataPool.Get().([]byte)[:0] + return metaDataPool.Get()[:0] } // metaDataPoolPut will put an unused small buffer back into the pool. func metaDataPoolPut(buf []byte) { if cap(buf) >= metaDataReadDefault && cap(buf) < metaDataReadDefault*4 { - //nolint:staticcheck // SA6002 we are fine with the tiny alloc metaDataPool.Put(buf) } } @@ -790,10 +786,7 @@ func readXLMetaNoData(r io.Reader, size int64) ([]byte, error) { } // CRC is variable length, so we need to truncate exactly that. - wantMax := want + msgp.Uint32Size - if wantMax > size { - wantMax = size - } + wantMax := min(want+msgp.Uint32Size, size) if err := readMore(wantMax); err != nil { return nil, err } @@ -829,7 +822,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, return 0, 0, 0, buf, err } if hdrVer > xlHeaderVersion { - return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer) + return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", hdrVer) } if metaVer > xlMetaVersion { return 0, 0, 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer) @@ -850,7 +843,7 @@ func decodeXLHeaders(buf []byte) (versions int, headerV, metaV uint8, b []byte, // Any non-nil error is returned. func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) { var tHdr, tMeta []byte // Zero copy bytes - for i := 0; i < versions; i++ { + for i := range versions { tHdr, buf, err = msgp.ReadBytesZC(buf) if err != nil { return err @@ -962,7 +955,7 @@ func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error { x.metaV = metaV if err = x.data.validate(); err != nil { x.data.repair() - logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())) + storageLogIf(GlobalContext, fmt.Errorf("xlMetaV2.loadIndexed: data validation failed: %v. %d entries after repair", err, x.data.entries())) } return decodeVersions(buf, versions, func(i int, hdr, meta []byte) error { ver := &x.versions[i] @@ -972,6 +965,50 @@ func (x *xlMetaV2) loadIndexed(buf xlMetaBuf, data xlMetaInlineData) error { } ver.meta = meta + // Fix inconsistent compression index due to https://github.com/minio/minio/pull/20575 + // First search marshaled content for encoded values. + // We have bumped metaV to make this check cheaper. + if metaV < 3 && ver.header.Type == ObjectType && bytes.Contains(meta, []byte("\xa7PartIdx")) && + bytes.Contains(meta, []byte("\xbcX-Minio-Internal-compression\xc4\x15klauspost/compress/s2")) { + // Likely candidate... + version, err := x.getIdx(i) + if err == nil { + // Check write date... + // RELEASE.2023-12-02T10-51-33Z -> RELEASE.2024-10-29T16-01-48Z + const dateStart = 1701471618 + const dateEnd = 1730156418 + if version.WrittenByVersion > dateStart && version.WrittenByVersion < dateEnd && + version.ObjectV2 != nil && len(version.ObjectV2.PartIndices) > 0 { + var changed bool + clearField := true + for i, sz := range version.ObjectV2.PartActualSizes { + if len(version.ObjectV2.PartIndices) > i { + // 8<<20 is current 'compMinIndexSize', but we detach it in case it should change in the future. + if sz <= 8<<20 && len(version.ObjectV2.PartIndices[i]) > 0 { + changed = true + version.ObjectV2.PartIndices[i] = nil + } + clearField = clearField && len(version.ObjectV2.PartIndices[i]) == 0 + } + } + if changed { + // All empty, clear. + if clearField { + version.ObjectV2.PartIndices = nil + } + + // Reindex since it was changed. + meta, err := version.MarshalMsg(make([]byte, 0, len(ver.meta)+10)) + if err == nil { + // Override both if fine. + ver.header = version.header() + ver.meta = meta + } + } + } + } + } + // Fix inconsistent x-minio-internal-replication-timestamp by loading and reindexing. if metaV < 2 && ver.header.Type == DeleteType { // load (and convert) version. @@ -1029,7 +1066,7 @@ func (x *xlMetaV2) loadLegacy(buf []byte) error { x.data = buf if err = x.data.validate(); err != nil { x.data.repair() - logger.LogIf(GlobalContext, fmt.Errorf("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())) + storageLogIf(GlobalContext, fmt.Errorf("xlMetaV2.Load: data validation failed: %v. %d entries after repair", err, x.data.entries())) } default: return errors.New("unknown minor metadata version") @@ -1111,8 +1148,8 @@ func (x *xlMetaV2) addVersion(ver xlMetaV2Version) error { return err } - // returns error if we have exceeded maxObjectVersions - if len(x.versions)+1 > maxObjectVersions { + // returns error if we have exceeded configured object max versions + if int64(len(x.versions)+1) > globalAPIConfig.getObjectMaxVersions() { return errMaxVersionsExceeded } @@ -1256,7 +1293,7 @@ func (x *xlMetaV2) setIdx(idx int, ver xlMetaV2Version) (err error) { // getDataDirs will return all data directories in the metadata // as well as all version ids used for inline data. func (x *xlMetaV2) getDataDirs() ([]string, error) { - dds := make([]string, len(x.versions)*2) + dds := make([]string, 0, len(x.versions)*2) for i, ver := range x.versions { if ver.header.Type == DeleteType { continue @@ -1344,13 +1381,13 @@ func (x *xlMetaV2) DeleteVersion(fi FileInfo) (string, error) { updateVersion = fi.MarkDeleted } else { // for replication scenario - if fi.Deleted && fi.VersionPurgeStatus() != Complete { + if fi.Deleted && fi.VersionPurgeStatus() != replication.VersionPurgeComplete { if !fi.VersionPurgeStatus().Empty() || fi.DeleteMarkerReplicationStatus().Empty() { updateVersion = true } } // object or delete-marker versioned delete is not complete - if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != Complete { + if !fi.VersionPurgeStatus().Empty() && fi.VersionPurgeStatus() != replication.VersionPurgeComplete { updateVersion = true } } @@ -1418,7 +1455,7 @@ func (x *xlMetaV2) DeleteVersion(fi FileInfo) (string, error) { return "", err } x.versions = append(x.versions[:i], x.versions[i+1:]...) - if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != Complete)) { + if fi.MarkDeleted && (fi.VersionPurgeStatus().Empty() || (fi.VersionPurgeStatus() != replication.VersionPurgeComplete)) { err = x.addVersion(ventry) } else if fi.Deleted && uv.String() == emptyUUID { return "", x.addVersion(ventry) @@ -1634,7 +1671,7 @@ func (x *xlMetaV2) AddVersion(fi FileInfo) error { } ventry.ObjectV2.PartNumbers[i] = fi.Parts[i].Number ventry.ObjectV2.PartActualSizes[i] = fi.Parts[i].ActualSize - if len(ventry.ObjectV2.PartIndices) > 0 { + if len(ventry.ObjectV2.PartIndices) > i { ventry.ObjectV2.PartIndices[i] = fi.Parts[i].Index } } @@ -1768,7 +1805,7 @@ func (x xlMetaV2) ToFileInfo(volume, path, versionID string, inclFreeVers, allPa if versionID != "" && versionID != nullVersionID { uv, err = uuid.Parse(versionID) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) + storageLogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) return fi, errFileVersionNotFound } } @@ -1941,7 +1978,6 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions if !latest.header.FreeVersion() { nVersions++ } - } else { // Find latest. var latestCount int @@ -1978,6 +2014,11 @@ func mergeXLV2Versions(quorum int, strict bool, requestedVersions int, versions continue } if !strict { + // we must match EC, when we are not strict. + if !a.header.matchesEC(ver.header) { + continue + } + a.header.Signature = [4]byte{} } x[a.header]++ @@ -2074,7 +2115,7 @@ func (x xlMetaBuf) ToFileInfo(volume, path, versionID string, allParts bool) (fi if versionID != "" && versionID != nullVersionID { uv, err = uuid.Parse(versionID) if err != nil { - logger.LogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) + storageLogIf(GlobalContext, fmt.Errorf("invalid versionID specified %s", versionID)) return fi, errFileVersionNotFound } } diff --git a/cmd/xl-storage-format-v2_gen.go b/cmd/xl-storage-format-v2_gen.go index 1813fde66f20d..e9d00cd024d50 100644 --- a/cmd/xl-storage-format-v2_gen.go +++ b/cmd/xl-storage-format-v2_gen.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "github.com/tinylib/msgp/msgp" ) @@ -276,6 +276,8 @@ func (z *xlMetaDataDirDecoder) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -327,6 +329,7 @@ func (z *xlMetaDataDirDecoder) DecodeMsg(dc *msgp.Reader) (err error) { } } } + zb0001Mask |= 0x1 default: err = dc.Skip() if err != nil { @@ -335,12 +338,17 @@ func (z *xlMetaDataDirDecoder) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.ObjectV2 = nil + } + return } // EncodeMsg implements msgp.Encodable func (z *xlMetaDataDirDecoder) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(1) var zb0001Mask uint8 /* 1 bits */ _ = zb0001Mask @@ -353,10 +361,7 @@ func (z *xlMetaDataDirDecoder) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - if zb0001Len == 0 { - return - } - if (zb0001Mask & 0x1) == 0 { // if not empty + if (zb0001Mask & 0x1) == 0 { // if not omitted // write "V2Obj" err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) if err != nil { @@ -387,7 +392,7 @@ func (z *xlMetaDataDirDecoder) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *xlMetaDataDirDecoder) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(1) var zb0001Mask uint8 /* 1 bits */ _ = zb0001Mask @@ -397,10 +402,7 @@ func (z *xlMetaDataDirDecoder) MarshalMsg(b []byte) (o []byte, err error) { } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - if (zb0001Mask & 0x1) == 0 { // if not empty + if (zb0001Mask & 0x1) == 0 { // if not omitted // string "V2Obj" o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) if z.ObjectV2 == nil { @@ -425,6 +427,8 @@ func (z *xlMetaDataDirDecoder) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -475,6 +479,7 @@ func (z *xlMetaDataDirDecoder) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + zb0001Mask |= 0x1 default: bts, err = msgp.Skip(bts) if err != nil { @@ -483,6 +488,11 @@ func (z *xlMetaDataDirDecoder) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.ObjectV2 = nil + } + o = bts return } @@ -508,6 +518,8 @@ func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -538,19 +550,17 @@ func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { if z.MetaSys == nil { z.MetaSys = make(map[string][]byte, zb0002) } else if len(z.MetaSys) > 0 { - for key := range z.MetaSys { - delete(z.MetaSys, key) - } + clear(z.MetaSys) } for zb0002 > 0 { zb0002-- var za0002 string - var za0003 []byte za0002, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MetaSys") return } + var za0003 []byte za0003, err = dc.ReadBytes(za0003) if err != nil { err = msgp.WrapError(err, "MetaSys", za0002) @@ -558,6 +568,7 @@ func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { } z.MetaSys[za0002] = za0003 } + zb0001Mask |= 0x1 default: err = dc.Skip() if err != nil { @@ -566,12 +577,17 @@ func (z *xlMetaV2DeleteMarker) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.MetaSys = nil + } + return } // EncodeMsg implements msgp.Encodable func (z *xlMetaV2DeleteMarker) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(3) var zb0001Mask uint8 /* 3 bits */ _ = zb0001Mask @@ -584,51 +600,52 @@ func (z *xlMetaV2DeleteMarker) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - if zb0001Len == 0 { - return - } - // write "ID" - err = en.Append(0xa2, 0x49, 0x44) - if err != nil { - return - } - err = en.WriteBytes((z.VersionID)[:]) - if err != nil { - err = msgp.WrapError(err, "VersionID") - return - } - // write "MTime" - err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.ModTime) - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // write "MetaSys" - err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "ID" + err = en.Append(0xa2, 0x49, 0x44) if err != nil { return } - err = en.WriteMapHeader(uint32(len(z.MetaSys))) + err = en.WriteBytes((z.VersionID)[:]) if err != nil { - err = msgp.WrapError(err, "MetaSys") + err = msgp.WrapError(err, "VersionID") return } - for za0002, za0003 := range z.MetaSys { - err = en.WriteString(za0002) + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "MetaSys" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) if err != nil { - err = msgp.WrapError(err, "MetaSys") return } - err = en.WriteBytes(za0003) + err = en.WriteMapHeader(uint32(len(z.MetaSys))) if err != nil { - err = msgp.WrapError(err, "MetaSys", za0002) + err = msgp.WrapError(err, "MetaSys") return } + for za0002, za0003 := range z.MetaSys { + err = en.WriteString(za0002) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + err = en.WriteBytes(za0003) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0002) + return + } + } } } return @@ -637,7 +654,7 @@ func (z *xlMetaV2DeleteMarker) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *xlMetaV2DeleteMarker) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(3) var zb0001Mask uint8 /* 3 bits */ _ = zb0001Mask @@ -647,22 +664,23 @@ func (z *xlMetaV2DeleteMarker) MarshalMsg(b []byte) (o []byte, err error) { } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - // string "ID" - o = append(o, 0xa2, 0x49, 0x44) - o = msgp.AppendBytes(o, (z.VersionID)[:]) - // string "MTime" - o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendInt64(o, z.ModTime) - if (zb0001Mask & 0x4) == 0 { // if not empty - // string "MetaSys" - o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) - for za0002, za0003 := range z.MetaSys { - o = msgp.AppendString(o, za0002) - o = msgp.AppendBytes(o, za0003) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "ID" + o = append(o, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.ModTime) + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "MetaSys" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) + for za0002, za0003 := range z.MetaSys { + o = msgp.AppendString(o, za0002) + o = msgp.AppendBytes(o, za0003) + } } } return @@ -678,6 +696,8 @@ func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -708,14 +728,12 @@ func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.MetaSys == nil { z.MetaSys = make(map[string][]byte, zb0002) } else if len(z.MetaSys) > 0 { - for key := range z.MetaSys { - delete(z.MetaSys, key) - } + clear(z.MetaSys) } for zb0002 > 0 { - var za0002 string var za0003 []byte zb0002-- + var za0002 string za0002, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "MetaSys") @@ -728,6 +746,7 @@ func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.MetaSys[za0002] = za0003 } + zb0001Mask |= 0x1 default: bts, err = msgp.Skip(bts) if err != nil { @@ -736,6 +755,11 @@ func (z *xlMetaV2DeleteMarker) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.MetaSys = nil + } + o = bts return } @@ -762,6 +786,8 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -879,7 +905,7 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "PartETags") return } - if cap(z.PartETags) >= int(zb0006) { + if z.PartETags != nil && cap(z.PartETags) >= int(zb0006) { z.PartETags = (z.PartETags)[:zb0006] } else { z.PartETags = make([]string, zb0006) @@ -926,7 +952,7 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "PartActualSizes") return } - if cap(z.PartActualSizes) >= int(zb0008) { + if z.PartActualSizes != nil && cap(z.PartActualSizes) >= int(zb0008) { z.PartActualSizes = (z.PartActualSizes)[:zb0008] } else { z.PartActualSizes = make([]int64, zb0008) @@ -958,6 +984,7 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { return } } + zb0001Mask |= 0x1 case "Size": z.Size, err = dc.ReadInt64() if err != nil { @@ -988,19 +1015,17 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { if z.MetaSys == nil { z.MetaSys = make(map[string][]byte, zb0010) } else if len(z.MetaSys) > 0 { - for key := range z.MetaSys { - delete(z.MetaSys, key) - } + clear(z.MetaSys) } for zb0010 > 0 { zb0010-- var za0009 string - var za0010 []byte za0009, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MetaSys") return } + var za0010 []byte za0010, err = dc.ReadBytes(za0010) if err != nil { err = msgp.WrapError(err, "MetaSys", za0009) @@ -1027,19 +1052,17 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { if z.MetaUser == nil { z.MetaUser = make(map[string]string, zb0011) } else if len(z.MetaUser) > 0 { - for key := range z.MetaUser { - delete(z.MetaUser, key) - } + clear(z.MetaUser) } for zb0011 > 0 { zb0011-- var za0011 string - var za0012 string za0011, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MetaUser") return } + var za0012 string za0012, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "MetaUser", za0011) @@ -1056,12 +1079,17 @@ func (z *xlMetaV2Object) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.PartIndices = nil + } + return } // EncodeMsg implements msgp.Encodable func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(18) var zb0001Mask uint32 /* 18 bits */ _ = zb0001Mask @@ -1074,283 +1102,284 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - if zb0001Len == 0 { - return - } - // write "ID" - err = en.Append(0xa2, 0x49, 0x44) - if err != nil { - return - } - err = en.WriteBytes((z.VersionID)[:]) - if err != nil { - err = msgp.WrapError(err, "VersionID") - return - } - // write "DDir" - err = en.Append(0xa4, 0x44, 0x44, 0x69, 0x72) - if err != nil { - return - } - err = en.WriteBytes((z.DataDir)[:]) - if err != nil { - err = msgp.WrapError(err, "DataDir") - return - } - // write "EcAlgo" - err = en.Append(0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) - if err != nil { - return - } - err = en.WriteUint8(uint8(z.ErasureAlgorithm)) - if err != nil { - err = msgp.WrapError(err, "ErasureAlgorithm") - return - } - // write "EcM" - err = en.Append(0xa3, 0x45, 0x63, 0x4d) - if err != nil { - return - } - err = en.WriteInt(z.ErasureM) - if err != nil { - err = msgp.WrapError(err, "ErasureM") - return - } - // write "EcN" - err = en.Append(0xa3, 0x45, 0x63, 0x4e) - if err != nil { - return - } - err = en.WriteInt(z.ErasureN) - if err != nil { - err = msgp.WrapError(err, "ErasureN") - return - } - // write "EcBSize" - err = en.Append(0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.ErasureBlockSize) - if err != nil { - err = msgp.WrapError(err, "ErasureBlockSize") - return - } - // write "EcIndex" - err = en.Append(0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) - if err != nil { - return - } - err = en.WriteInt(z.ErasureIndex) - if err != nil { - err = msgp.WrapError(err, "ErasureIndex") - return - } - // write "EcDist" - err = en.Append(0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.ErasureDist))) - if err != nil { - err = msgp.WrapError(err, "ErasureDist") - return - } - for za0003 := range z.ErasureDist { - err = en.WriteUint8(z.ErasureDist[za0003]) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "ID" + err = en.Append(0xa2, 0x49, 0x44) if err != nil { - err = msgp.WrapError(err, "ErasureDist", za0003) return } - } - // write "CSumAlgo" - err = en.Append(0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) - if err != nil { - return - } - err = en.WriteUint8(uint8(z.BitrotChecksumAlgo)) - if err != nil { - err = msgp.WrapError(err, "BitrotChecksumAlgo") - return - } - // write "PartNums" - err = en.Append(0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PartNumbers))) - if err != nil { - err = msgp.WrapError(err, "PartNumbers") - return - } - for za0004 := range z.PartNumbers { - err = en.WriteInt(z.PartNumbers[za0004]) + err = en.WriteBytes((z.VersionID)[:]) if err != nil { - err = msgp.WrapError(err, "PartNumbers", za0004) + err = msgp.WrapError(err, "VersionID") return } - } - // write "PartETags" - err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) - if err != nil { - return - } - if z.PartETags == nil { // allownil: if nil - err = en.WriteNil() + // write "DDir" + err = en.Append(0xa4, 0x44, 0x44, 0x69, 0x72) if err != nil { return } - } else { - err = en.WriteArrayHeader(uint32(len(z.PartETags))) + err = en.WriteBytes((z.DataDir)[:]) + if err != nil { + err = msgp.WrapError(err, "DataDir") + return + } + // write "EcAlgo" + err = en.Append(0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + if err != nil { + return + } + err = en.WriteUint8(uint8(z.ErasureAlgorithm)) if err != nil { - err = msgp.WrapError(err, "PartETags") + err = msgp.WrapError(err, "ErasureAlgorithm") return } - for za0005 := range z.PartETags { - err = en.WriteString(z.PartETags[za0005]) + // write "EcM" + err = en.Append(0xa3, 0x45, 0x63, 0x4d) + if err != nil { + return + } + err = en.WriteInt(z.ErasureM) + if err != nil { + err = msgp.WrapError(err, "ErasureM") + return + } + // write "EcN" + err = en.Append(0xa3, 0x45, 0x63, 0x4e) + if err != nil { + return + } + err = en.WriteInt(z.ErasureN) + if err != nil { + err = msgp.WrapError(err, "ErasureN") + return + } + // write "EcBSize" + err = en.Append(0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ErasureBlockSize) + if err != nil { + err = msgp.WrapError(err, "ErasureBlockSize") + return + } + // write "EcIndex" + err = en.Append(0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + if err != nil { + return + } + err = en.WriteInt(z.ErasureIndex) + if err != nil { + err = msgp.WrapError(err, "ErasureIndex") + return + } + // write "EcDist" + err = en.Append(0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.ErasureDist))) + if err != nil { + err = msgp.WrapError(err, "ErasureDist") + return + } + for za0003 := range z.ErasureDist { + err = en.WriteUint8(z.ErasureDist[za0003]) if err != nil { - err = msgp.WrapError(err, "PartETags", za0005) + err = msgp.WrapError(err, "ErasureDist", za0003) return } } - } - // write "PartSizes" - err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) - if err != nil { - return - } - err = en.WriteArrayHeader(uint32(len(z.PartSizes))) - if err != nil { - err = msgp.WrapError(err, "PartSizes") - return - } - for za0006 := range z.PartSizes { - err = en.WriteInt64(z.PartSizes[za0006]) + // write "CSumAlgo" + err = en.Append(0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) if err != nil { - err = msgp.WrapError(err, "PartSizes", za0006) return } - } - // write "PartASizes" - err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) - if err != nil { - return - } - if z.PartActualSizes == nil { // allownil: if nil - err = en.WriteNil() + err = en.WriteUint8(uint8(z.BitrotChecksumAlgo)) if err != nil { + err = msgp.WrapError(err, "BitrotChecksumAlgo") return } - } else { - err = en.WriteArrayHeader(uint32(len(z.PartActualSizes))) + // write "PartNums" + err = en.Append(0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.PartNumbers))) if err != nil { - err = msgp.WrapError(err, "PartActualSizes") + err = msgp.WrapError(err, "PartNumbers") return } - for za0007 := range z.PartActualSizes { - err = en.WriteInt64(z.PartActualSizes[za0007]) + for za0004 := range z.PartNumbers { + err = en.WriteInt(z.PartNumbers[za0004]) if err != nil { - err = msgp.WrapError(err, "PartActualSizes", za0007) + err = msgp.WrapError(err, "PartNumbers", za0004) return } } - } - if (zb0001Mask & 0x2000) == 0 { // if not empty - // write "PartIdx" - err = en.Append(0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78) + // write "PartETags" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + if err != nil { + return + } + if z.PartETags == nil { // allownil: if nil + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteArrayHeader(uint32(len(z.PartETags))) + if err != nil { + err = msgp.WrapError(err, "PartETags") + return + } + for za0005 := range z.PartETags { + err = en.WriteString(z.PartETags[za0005]) + if err != nil { + err = msgp.WrapError(err, "PartETags", za0005) + return + } + } + } + // write "PartSizes" + err = en.Append(0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) if err != nil { return } - err = en.WriteArrayHeader(uint32(len(z.PartIndices))) + err = en.WriteArrayHeader(uint32(len(z.PartSizes))) if err != nil { - err = msgp.WrapError(err, "PartIndices") + err = msgp.WrapError(err, "PartSizes") return } - for za0008 := range z.PartIndices { - err = en.WriteBytes(z.PartIndices[za0008]) + for za0006 := range z.PartSizes { + err = en.WriteInt64(z.PartSizes[za0006]) if err != nil { - err = msgp.WrapError(err, "PartIndices", za0008) + err = msgp.WrapError(err, "PartSizes", za0006) return } } - } - // write "Size" - err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.Size) - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - // write "MTime" - err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.ModTime) - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - // write "MetaSys" - err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) - if err != nil { - return - } - if z.MetaSys == nil { // allownil: if nil - err = en.WriteNil() + // write "PartASizes" + err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) if err != nil { return } - } else { - err = en.WriteMapHeader(uint32(len(z.MetaSys))) - if err != nil { - err = msgp.WrapError(err, "MetaSys") - return + if z.PartActualSizes == nil { // allownil: if nil + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteArrayHeader(uint32(len(z.PartActualSizes))) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes") + return + } + for za0007 := range z.PartActualSizes { + err = en.WriteInt64(z.PartActualSizes[za0007]) + if err != nil { + err = msgp.WrapError(err, "PartActualSizes", za0007) + return + } + } } - for za0009, za0010 := range z.MetaSys { - err = en.WriteString(za0009) + if (zb0001Mask & 0x2000) == 0 { // if not omitted + // write "PartIdx" + err = en.Append(0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78) if err != nil { - err = msgp.WrapError(err, "MetaSys") return } - err = en.WriteBytes(za0010) + err = en.WriteArrayHeader(uint32(len(z.PartIndices))) if err != nil { - err = msgp.WrapError(err, "MetaSys", za0009) + err = msgp.WrapError(err, "PartIndices") return } + for za0008 := range z.PartIndices { + err = en.WriteBytes(z.PartIndices[za0008]) + if err != nil { + err = msgp.WrapError(err, "PartIndices", za0008) + return + } + } } - } - // write "MetaUsr" - err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) - if err != nil { - return - } - if z.MetaUser == nil { // allownil: if nil - err = en.WriteNil() + // write "Size" + err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) if err != nil { return } - } else { - err = en.WriteMapHeader(uint32(len(z.MetaUser))) + err = en.WriteInt64(z.Size) if err != nil { - err = msgp.WrapError(err, "MetaUser") + err = msgp.WrapError(err, "Size") return } - for za0011, za0012 := range z.MetaUser { - err = en.WriteString(za0011) + // write "MTime" + err = en.Append(0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteInt64(z.ModTime) + if err != nil { + err = msgp.WrapError(err, "ModTime") + return + } + // write "MetaSys" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + if err != nil { + return + } + if z.MetaSys == nil { // allownil: if nil + err = en.WriteNil() if err != nil { - err = msgp.WrapError(err, "MetaUser") return } - err = en.WriteString(za0012) + } else { + err = en.WriteMapHeader(uint32(len(z.MetaSys))) if err != nil { - err = msgp.WrapError(err, "MetaUser", za0011) + err = msgp.WrapError(err, "MetaSys") return } + for za0009, za0010 := range z.MetaSys { + err = en.WriteString(za0009) + if err != nil { + err = msgp.WrapError(err, "MetaSys") + return + } + err = en.WriteBytes(za0010) + if err != nil { + err = msgp.WrapError(err, "MetaSys", za0009) + return + } + } + } + // write "MetaUsr" + err = en.Append(0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + if err != nil { + return + } + if z.MetaUser == nil { // allownil: if nil + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteMapHeader(uint32(len(z.MetaUser))) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + for za0011, za0012 := range z.MetaUser { + err = en.WriteString(za0011) + if err != nil { + err = msgp.WrapError(err, "MetaUser") + return + } + err = en.WriteString(za0012) + if err != nil { + err = msgp.WrapError(err, "MetaUser", za0011) + return + } + } } } return @@ -1359,7 +1388,7 @@ func (z *xlMetaV2Object) EncodeMsg(en *msgp.Writer) (err error) { // MarshalMsg implements msgp.Marshaler func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(18) var zb0001Mask uint32 /* 18 bits */ _ = zb0001Mask @@ -1369,105 +1398,106 @@ func (z *xlMetaV2Object) MarshalMsg(b []byte) (o []byte, err error) { } // variable map header, size zb0001Len o = msgp.AppendMapHeader(o, zb0001Len) - if zb0001Len == 0 { - return - } - // string "ID" - o = append(o, 0xa2, 0x49, 0x44) - o = msgp.AppendBytes(o, (z.VersionID)[:]) - // string "DDir" - o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72) - o = msgp.AppendBytes(o, (z.DataDir)[:]) - // string "EcAlgo" - o = append(o, 0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) - o = msgp.AppendUint8(o, uint8(z.ErasureAlgorithm)) - // string "EcM" - o = append(o, 0xa3, 0x45, 0x63, 0x4d) - o = msgp.AppendInt(o, z.ErasureM) - // string "EcN" - o = append(o, 0xa3, 0x45, 0x63, 0x4e) - o = msgp.AppendInt(o, z.ErasureN) - // string "EcBSize" - o = append(o, 0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.ErasureBlockSize) - // string "EcIndex" - o = append(o, 0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) - o = msgp.AppendInt(o, z.ErasureIndex) - // string "EcDist" - o = append(o, 0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) - o = msgp.AppendArrayHeader(o, uint32(len(z.ErasureDist))) - for za0003 := range z.ErasureDist { - o = msgp.AppendUint8(o, z.ErasureDist[za0003]) - } - // string "CSumAlgo" - o = append(o, 0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) - o = msgp.AppendUint8(o, uint8(z.BitrotChecksumAlgo)) - // string "PartNums" - o = append(o, 0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PartNumbers))) - for za0004 := range z.PartNumbers { - o = msgp.AppendInt(o, z.PartNumbers[za0004]) - } - // string "PartETags" - o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) - if z.PartETags == nil { // allownil: if nil - o = msgp.AppendNil(o) - } else { - o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags))) - for za0005 := range z.PartETags { - o = msgp.AppendString(o, z.PartETags[za0005]) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "ID" + o = append(o, 0xa2, 0x49, 0x44) + o = msgp.AppendBytes(o, (z.VersionID)[:]) + // string "DDir" + o = append(o, 0xa4, 0x44, 0x44, 0x69, 0x72) + o = msgp.AppendBytes(o, (z.DataDir)[:]) + // string "EcAlgo" + o = append(o, 0xa6, 0x45, 0x63, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.ErasureAlgorithm)) + // string "EcM" + o = append(o, 0xa3, 0x45, 0x63, 0x4d) + o = msgp.AppendInt(o, z.ErasureM) + // string "EcN" + o = append(o, 0xa3, 0x45, 0x63, 0x4e) + o = msgp.AppendInt(o, z.ErasureN) + // string "EcBSize" + o = append(o, 0xa7, 0x45, 0x63, 0x42, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.ErasureBlockSize) + // string "EcIndex" + o = append(o, 0xa7, 0x45, 0x63, 0x49, 0x6e, 0x64, 0x65, 0x78) + o = msgp.AppendInt(o, z.ErasureIndex) + // string "EcDist" + o = append(o, 0xa6, 0x45, 0x63, 0x44, 0x69, 0x73, 0x74) + o = msgp.AppendArrayHeader(o, uint32(len(z.ErasureDist))) + for za0003 := range z.ErasureDist { + o = msgp.AppendUint8(o, z.ErasureDist[za0003]) + } + // string "CSumAlgo" + o = append(o, 0xa8, 0x43, 0x53, 0x75, 0x6d, 0x41, 0x6c, 0x67, 0x6f) + o = msgp.AppendUint8(o, uint8(z.BitrotChecksumAlgo)) + // string "PartNums" + o = append(o, 0xa8, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartNumbers))) + for za0004 := range z.PartNumbers { + o = msgp.AppendInt(o, z.PartNumbers[za0004]) + } + // string "PartETags" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x45, 0x54, 0x61, 0x67, 0x73) + if z.PartETags == nil { // allownil: if nil + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len(z.PartETags))) + for za0005 := range z.PartETags { + o = msgp.AppendString(o, z.PartETags[za0005]) + } } - } - // string "PartSizes" - o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.PartSizes))) - for za0006 := range z.PartSizes { - o = msgp.AppendInt64(o, z.PartSizes[za0006]) - } - // string "PartASizes" - o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) - if z.PartActualSizes == nil { // allownil: if nil - o = msgp.AppendNil(o) - } else { - o = msgp.AppendArrayHeader(o, uint32(len(z.PartActualSizes))) - for za0007 := range z.PartActualSizes { - o = msgp.AppendInt64(o, z.PartActualSizes[za0007]) + // string "PartSizes" + o = append(o, 0xa9, 0x50, 0x61, 0x72, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartSizes))) + for za0006 := range z.PartSizes { + o = msgp.AppendInt64(o, z.PartSizes[za0006]) } - } - if (zb0001Mask & 0x2000) == 0 { // if not empty - // string "PartIdx" - o = append(o, 0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78) - o = msgp.AppendArrayHeader(o, uint32(len(z.PartIndices))) - for za0008 := range z.PartIndices { - o = msgp.AppendBytes(o, z.PartIndices[za0008]) + // string "PartASizes" + o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x41, 0x53, 0x69, 0x7a, 0x65, 0x73) + if z.PartActualSizes == nil { // allownil: if nil + o = msgp.AppendNil(o) + } else { + o = msgp.AppendArrayHeader(o, uint32(len(z.PartActualSizes))) + for za0007 := range z.PartActualSizes { + o = msgp.AppendInt64(o, z.PartActualSizes[za0007]) + } } - } - // string "Size" - o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.Size) - // string "MTime" - o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendInt64(o, z.ModTime) - // string "MetaSys" - o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) - if z.MetaSys == nil { // allownil: if nil - o = msgp.AppendNil(o) - } else { - o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) - for za0009, za0010 := range z.MetaSys { - o = msgp.AppendString(o, za0009) - o = msgp.AppendBytes(o, za0010) + if (zb0001Mask & 0x2000) == 0 { // if not omitted + // string "PartIdx" + o = append(o, 0xa7, 0x50, 0x61, 0x72, 0x74, 0x49, 0x64, 0x78) + o = msgp.AppendArrayHeader(o, uint32(len(z.PartIndices))) + for za0008 := range z.PartIndices { + o = msgp.AppendBytes(o, z.PartIndices[za0008]) + } } - } - // string "MetaUsr" - o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) - if z.MetaUser == nil { // allownil: if nil - o = msgp.AppendNil(o) - } else { - o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser))) - for za0011, za0012 := range z.MetaUser { - o = msgp.AppendString(o, za0011) - o = msgp.AppendString(o, za0012) + // string "Size" + o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) + o = msgp.AppendInt64(o, z.Size) + // string "MTime" + o = append(o, 0xa5, 0x4d, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendInt64(o, z.ModTime) + // string "MetaSys" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x79, 0x73) + if z.MetaSys == nil { // allownil: if nil + o = msgp.AppendNil(o) + } else { + o = msgp.AppendMapHeader(o, uint32(len(z.MetaSys))) + for za0009, za0010 := range z.MetaSys { + o = msgp.AppendString(o, za0009) + o = msgp.AppendBytes(o, za0010) + } + } + // string "MetaUsr" + o = append(o, 0xa7, 0x4d, 0x65, 0x74, 0x61, 0x55, 0x73, 0x72) + if z.MetaUser == nil { // allownil: if nil + o = msgp.AppendNil(o) + } else { + o = msgp.AppendMapHeader(o, uint32(len(z.MetaUser))) + for za0011, za0012 := range z.MetaUser { + o = msgp.AppendString(o, za0011) + o = msgp.AppendString(o, za0012) + } } } return @@ -1483,6 +1513,8 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 1 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -1596,7 +1628,7 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "PartETags") return } - if cap(z.PartETags) >= int(zb0006) { + if z.PartETags != nil && cap(z.PartETags) >= int(zb0006) { z.PartETags = (z.PartETags)[:zb0006] } else { z.PartETags = make([]string, zb0006) @@ -1639,7 +1671,7 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "PartActualSizes") return } - if cap(z.PartActualSizes) >= int(zb0008) { + if z.PartActualSizes != nil && cap(z.PartActualSizes) >= int(zb0008) { z.PartActualSizes = (z.PartActualSizes)[:zb0008] } else { z.PartActualSizes = make([]int64, zb0008) @@ -1671,6 +1703,7 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + zb0001Mask |= 0x1 case "Size": z.Size, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { @@ -1697,14 +1730,12 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.MetaSys == nil { z.MetaSys = make(map[string][]byte, zb0010) } else if len(z.MetaSys) > 0 { - for key := range z.MetaSys { - delete(z.MetaSys, key) - } + clear(z.MetaSys) } for zb0010 > 0 { - var za0009 string var za0010 []byte zb0010-- + var za0009 string za0009, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "MetaSys") @@ -1732,14 +1763,12 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { if z.MetaUser == nil { z.MetaUser = make(map[string]string, zb0011) } else if len(z.MetaUser) > 0 { - for key := range z.MetaUser { - delete(z.MetaUser, key) - } + clear(z.MetaUser) } for zb0011 > 0 { - var za0011 string var za0012 string zb0011-- + var za0011 string za0011, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "MetaUser") @@ -1761,6 +1790,11 @@ func (z *xlMetaV2Object) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if (zb0001Mask & 0x1) == 0 { + z.PartIndices = nil + } + o = bts return } @@ -1802,6 +1836,8 @@ func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 3 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, err = dc.ReadMapKeyPtr() @@ -1838,6 +1874,7 @@ func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { return } } + zb0001Mask |= 0x1 case "V2Obj": if dc.IsNil() { err = dc.ReadNil() @@ -1856,6 +1893,7 @@ func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { return } } + zb0001Mask |= 0x2 case "DelObj": if dc.IsNil() { err = dc.ReadNil() @@ -1874,6 +1912,7 @@ func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { return } } + zb0001Mask |= 0x4 case "v": z.WrittenByVersion, err = dc.ReadUint64() if err != nil { @@ -1888,12 +1927,24 @@ func (z *xlMetaV2Version) DecodeMsg(dc *msgp.Reader) (err error) { } } } + // Clear omitted fields. + if zb0001Mask != 0x7 { + if (zb0001Mask & 0x1) == 0 { + z.ObjectV1 = nil + } + if (zb0001Mask & 0x2) == 0 { + z.ObjectV2 = nil + } + if (zb0001Mask & 0x4) == 0 { + z.DeleteMarker = nil + } + } return } // EncodeMsg implements msgp.Encodable func (z *xlMetaV2Version) EncodeMsg(en *msgp.Writer) (err error) { - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(5) var zb0001Mask uint8 /* 5 bits */ _ = zb0001Mask @@ -1914,93 +1965,94 @@ func (z *xlMetaV2Version) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - if zb0001Len == 0 { - return - } - // write "Type" - err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) - if err != nil { - return - } - err = en.WriteUint8(uint8(z.Type)) - if err != nil { - err = msgp.WrapError(err, "Type") - return - } - if (zb0001Mask & 0x2) == 0 { // if not empty - // write "V1Obj" - err = en.Append(0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // write "Type" + err = en.Append(0xa4, 0x54, 0x79, 0x70, 0x65) if err != nil { return } - if z.ObjectV1 == nil { - err = en.WriteNil() + err = en.WriteUint8(uint8(z.Type)) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + if (zb0001Mask & 0x2) == 0 { // if not omitted + // write "V1Obj" + err = en.Append(0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) if err != nil { return } - } else { - err = z.ObjectV1.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "ObjectV1") - return + if z.ObjectV1 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV1.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } } } - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // write "V2Obj" - err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) - if err != nil { - return - } - if z.ObjectV2 == nil { - err = en.WriteNil() + if (zb0001Mask & 0x4) == 0 { // if not omitted + // write "V2Obj" + err = en.Append(0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) if err != nil { return } - } else { - err = z.ObjectV2.EncodeMsg(en) + if z.ObjectV2 == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.ObjectV2.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } + } + } + if (zb0001Mask & 0x8) == 0 { // if not omitted + // write "DelObj" + err = en.Append(0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) if err != nil { - err = msgp.WrapError(err, "ObjectV2") return } + if z.DeleteMarker == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = z.DeleteMarker.EncodeMsg(en) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } + } } - } - if (zb0001Mask & 0x8) == 0 { // if not empty - // write "DelObj" - err = en.Append(0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + // write "v" + err = en.Append(0xa1, 0x76) if err != nil { return } - if z.DeleteMarker == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = z.DeleteMarker.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "DeleteMarker") - return - } + err = en.WriteUint64(z.WrittenByVersion) + if err != nil { + err = msgp.WrapError(err, "WrittenByVersion") + return } } - // write "v" - err = en.Append(0xa1, 0x76) - if err != nil { - return - } - err = en.WriteUint64(z.WrittenByVersion) - if err != nil { - err = msgp.WrapError(err, "WrittenByVersion") - return - } return } // MarshalMsg implements msgp.Marshaler func (z *xlMetaV2Version) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // omitempty: check for empty values + // check for omitted fields zb0001Len := uint32(5) var zb0001Mask uint8 /* 5 bits */ _ = zb0001Mask @@ -2018,54 +2070,55 @@ func (z *xlMetaV2Version) MarshalMsg(b []byte) (o []byte, err error) { } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) - if zb0001Len == 0 { - return - } - // string "Type" - o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) - o = msgp.AppendUint8(o, uint8(z.Type)) - if (zb0001Mask & 0x2) == 0 { // if not empty - // string "V1Obj" - o = append(o, 0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) - if z.ObjectV1 == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.ObjectV1.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "ObjectV1") - return + + // skip if no fields are to be emitted + if zb0001Len != 0 { + // string "Type" + o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendUint8(o, uint8(z.Type)) + if (zb0001Mask & 0x2) == 0 { // if not omitted + // string "V1Obj" + o = append(o, 0xa5, 0x56, 0x31, 0x4f, 0x62, 0x6a) + if z.ObjectV1 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV1.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV1") + return + } } } - } - if (zb0001Mask & 0x4) == 0 { // if not empty - // string "V2Obj" - o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) - if z.ObjectV2 == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.ObjectV2.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "ObjectV2") - return + if (zb0001Mask & 0x4) == 0 { // if not omitted + // string "V2Obj" + o = append(o, 0xa5, 0x56, 0x32, 0x4f, 0x62, 0x6a) + if z.ObjectV2 == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ObjectV2.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ObjectV2") + return + } } } - } - if (zb0001Mask & 0x8) == 0 { // if not empty - // string "DelObj" - o = append(o, 0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) - if z.DeleteMarker == nil { - o = msgp.AppendNil(o) - } else { - o, err = z.DeleteMarker.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "DeleteMarker") - return + if (zb0001Mask & 0x8) == 0 { // if not omitted + // string "DelObj" + o = append(o, 0xa6, 0x44, 0x65, 0x6c, 0x4f, 0x62, 0x6a) + if z.DeleteMarker == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.DeleteMarker.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "DeleteMarker") + return + } } } + // string "v" + o = append(o, 0xa1, 0x76) + o = msgp.AppendUint64(o, z.WrittenByVersion) } - // string "v" - o = append(o, 0xa1, 0x76) - o = msgp.AppendUint64(o, z.WrittenByVersion) return } @@ -2079,6 +2132,8 @@ func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } + var zb0001Mask uint8 /* 3 bits */ + _ = zb0001Mask for zb0001 > 0 { zb0001-- field, bts, err = msgp.ReadMapKeyZC(bts) @@ -2114,6 +2169,7 @@ func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + zb0001Mask |= 0x1 case "V2Obj": if msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts) @@ -2131,6 +2187,7 @@ func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + zb0001Mask |= 0x2 case "DelObj": if msgp.IsNil(bts) { bts, err = msgp.ReadNilBytes(bts) @@ -2148,6 +2205,7 @@ func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + zb0001Mask |= 0x4 case "v": z.WrittenByVersion, bts, err = msgp.ReadUint64Bytes(bts) if err != nil { @@ -2162,6 +2220,18 @@ func (z *xlMetaV2Version) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + // Clear omitted fields. + if zb0001Mask != 0x7 { + if (zb0001Mask & 0x1) == 0 { + z.ObjectV1 = nil + } + if (zb0001Mask & 0x2) == 0 { + z.ObjectV2 = nil + } + if (zb0001Mask & 0x4) == 0 { + z.DeleteMarker = nil + } + } o = bts return } @@ -2198,8 +2268,8 @@ func (z *xlMetaV2VersionHeader) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} + if zb0001 != 7 { + err = msgp.ArrayError{Wanted: 7, Got: zb0001} return } err = dc.ReadExactBytes((z.VersionID)[:]) @@ -2235,13 +2305,23 @@ func (z *xlMetaV2VersionHeader) DecodeMsg(dc *msgp.Reader) (err error) { } z.Flags = xlFlags(zb0003) } + z.EcN, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "EcN") + return + } + z.EcM, err = dc.ReadUint8() + if err != nil { + err = msgp.WrapError(err, "EcM") + return + } return } // EncodeMsg implements msgp.Encodable func (z *xlMetaV2VersionHeader) EncodeMsg(en *msgp.Writer) (err error) { - // array header, size 5 - err = en.Append(0x95) + // array header, size 7 + err = en.Append(0x97) if err != nil { return } @@ -2270,19 +2350,31 @@ func (z *xlMetaV2VersionHeader) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Flags") return } + err = en.WriteUint8(z.EcN) + if err != nil { + err = msgp.WrapError(err, "EcN") + return + } + err = en.WriteUint8(z.EcM) + if err != nil { + err = msgp.WrapError(err, "EcM") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *xlMetaV2VersionHeader) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // array header, size 5 - o = append(o, 0x95) + // array header, size 7 + o = append(o, 0x97) o = msgp.AppendBytes(o, (z.VersionID)[:]) o = msgp.AppendInt64(o, z.ModTime) o = msgp.AppendBytes(o, (z.Signature)[:]) o = msgp.AppendUint8(o, uint8(z.Type)) o = msgp.AppendUint8(o, uint8(z.Flags)) + o = msgp.AppendUint8(o, z.EcN) + o = msgp.AppendUint8(o, z.EcM) return } @@ -2294,8 +2386,8 @@ func (z *xlMetaV2VersionHeader) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err) return } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} + if zb0001 != 7 { + err = msgp.ArrayError{Wanted: 7, Got: zb0001} return } bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) @@ -2331,12 +2423,22 @@ func (z *xlMetaV2VersionHeader) UnmarshalMsg(bts []byte) (o []byte, err error) { } z.Flags = xlFlags(zb0003) } + z.EcN, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "EcN") + return + } + z.EcM, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "EcM") + return + } o = bts return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *xlMetaV2VersionHeader) Msgsize() (s int) { - s = 1 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + msgp.Int64Size + msgp.ArrayHeaderSize + (4 * (msgp.ByteSize)) + msgp.Uint8Size + msgp.Uint8Size + s = 1 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + msgp.Int64Size + msgp.ArrayHeaderSize + (4 * (msgp.ByteSize)) + msgp.Uint8Size + msgp.Uint8Size + msgp.Uint8Size + msgp.Uint8Size return } diff --git a/cmd/xl-storage-format-v2_gen_test.go b/cmd/xl-storage-format-v2_gen_test.go index afcc74dae6ac6..fa7a667007e6a 100644 --- a/cmd/xl-storage-format-v2_gen_test.go +++ b/cmd/xl-storage-format-v2_gen_test.go @@ -1,7 +1,7 @@ -package cmd - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package cmd + import ( "bytes" "testing" diff --git a/cmd/xl-storage-format-v2_test.go b/cmd/xl-storage-format-v2_test.go index 29bfa5f9d0c5f..6a9b15a3f4d6e 100644 --- a/cmd/xl-storage-format-v2_test.go +++ b/cmd/xl-storage-format-v2_test.go @@ -21,8 +21,8 @@ import ( "bufio" "bytes" "compress/gzip" - "context" "encoding/base64" + "encoding/binary" "encoding/json" "fmt" "io" @@ -429,7 +429,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 0, vers...) } }) @@ -438,7 +438,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 1, vers...) } }) @@ -447,7 +447,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { mergeXLV2Versions(8, false, 1, vers...) } }) @@ -469,7 +469,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { err = xl.Load(data) if err != nil { b.Fatal(err) @@ -490,7 +490,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) { b.ReportAllocs() b.ResetTimer() b.SetBytes(855) // number of versions... - for i := 0; i < b.N; i++ { + for b.Loop() { err = xl.Load(data) if err != nil { b.Fatal(err) @@ -583,6 +583,28 @@ func Test_xlMetaV2Shallow_Load(t *testing.T) { } // t.Logf("data := %#v\n", data) }) + // Test compressed index consistency fix + t.Run("comp-index", func(t *testing.T) { + // This file has a compressed index, due to https://github.com/minio/minio/pull/20575 + // We ensure it is rewritten without an index. + // We compare this against the signature of the files stored without a version. + data, err := base64.StdEncoding.DecodeString(`WEwyIAEAAwDGAAACKgMCAcQml8QQAAAAAAAAAAAAAAAAAAAAANMYGu+UIK7akcQEofwXhAECCAjFAfyDpFR5cGUBpVYyT2Jq3gASoklExBAAAAAAAAAAAAAAAAAAAAAApEREaXLEEFTyKFqhkkXVoWn+8R1Lr2ymRWNBbGdvAaNFY00Io0VjTginRWNCU2l6ZdIAEAAAp0VjSW5kZXgBpkVjRGlzdNwAEAECAwQFBgcICQoLDA0ODxCoQ1N1bUFsZ28BqFBhcnROdW1zkgECqVBhcnRFVGFnc8CpUGFydFNpemVzktIAFtgq0gAGvb+qUGFydEFTaXplc5LSAFKb69IAGZg0p1BhcnRJZHiSxFqKm+4h9J7JCYCAgAFEABSPlBzH5g6z9gah3wOPnwLDlAGeD+os0xbjFd8O8w+TBoM8rz6bHO0KzQWtBu4GwgGSBocH6QPUSu8J5A/8gwSWtQPOtgL0euoMmAPEAKRTaXpl0gAdlemlTVRpbWXTGBrvlCCu2pGnTWV0YVN5c4K8WC1NaW5pby1JbnRlcm5hbC1hY3R1YWwtc2l6ZcQHNzA5MTIzMbxYLU1pbmlvLUludGVybmFsLWNvbXByZXNzaW9uxBVrbGF1c3Bvc3QvY29tcHJlc3MvczKnTWV0YVVzcoKsY29udGVudC10eXBlqHRleHQvY3N2pGV0YWfZIjEzYmYyMDU0NGVjN2VmY2YxNzhiYWRmNjc4NzNjODg2LTKhds5mYYMqzv8Vdtk=`) + if err != nil { + t.Fatal(err) + } + var xl xlMetaV2 + err = xl.Load(data) + if err != nil { + t.Fatal(err) + } + for _, v := range xl.versions { + // Signature should match + if binary.BigEndian.Uint32(v.header.Signature[:]) != 0x8e5a6406 { + t.Log(v.header.String()) + t.Fatalf("invalid signature 0x%x", binary.BigEndian.Uint32(v.header.Signature[:])) + } + } + }) } func Test_xlMetaV2Shallow_LoadTimeStamp(t *testing.T) { @@ -996,7 +1018,7 @@ func Test_mergeXLV2Versions2(t *testing.T) { for _, test := range testCases { t.Run(test.name, func(t *testing.T) { // Run multiple times, shuffling the input order. - for i := int64(0); i < 50; i++ { + for i := range int64(50) { t.Run(fmt.Sprint(i), func(t *testing.T) { rng := rand.New(rand.NewSource(i)) rng.Shuffle(len(test.input), func(i, j int) { @@ -1045,7 +1067,7 @@ func Test_mergeEntryChannels(t *testing.T) { } // Shuffle... - for i := 0; i < 100; i++ { + for i := range 100 { rng := rand.New(rand.NewSource(int64(i))) rng.Shuffle(len(vers), func(i, j int) { vers[i], vers[j] = vers[j], vers[i] @@ -1059,7 +1081,7 @@ func Test_mergeEntryChannels(t *testing.T) { entries = append(entries, ch) } out := make(chan metaCacheEntry, 1) - err := mergeEntryChannels(context.Background(), entries, out, 1) + err := mergeEntryChannels(t.Context(), entries, out, 1) if err != nil { t.Fatal(err) } @@ -1152,7 +1174,7 @@ func benchmarkManyPartsOptionally(b *testing.B, allParts bool) { b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { _, err = buf.ToFileInfo("volume", "path", "", allParts) if err != nil { b.Fatal(err) diff --git a/cmd/xl-storage-format_test.go b/cmd/xl-storage-format_test.go index 19bad9d138b71..12f4b052e0b63 100644 --- a/cmd/xl-storage-format_test.go +++ b/cmd/xl-storage-format_test.go @@ -136,7 +136,7 @@ func getSampleXLMeta(totalParts int) xlMetaV1Object { xlMeta.Erasure.Checksums = make([]ChecksumInfo, totalParts) // total number of parts. xlMeta.Parts = make([]ObjectPartInfo, totalParts) - for i := 0; i < totalParts; i++ { + for i := range totalParts { // hard coding hash and algo value for the checksum, Since we are benchmarking the parsing of xl.meta the magnitude doesn't affect the test, // The magnitude doesn't make a difference, only the size does. xlMeta.AddTestObjectCheckSum(i+1, BLAKE2b512, "a23f5eff248c4372badd9f3b2455a285cd4ca86c3d9a570b091d3fc5cd7ca6d9484bbea3f8c5d8d4f84daae96874419eda578fd736455334afbac2c924b3915a") @@ -225,7 +225,6 @@ func compareXLMetaV1(t *testing.T, unMarshalXLMeta, jsoniterXLMeta xlMetaV1Objec if val != jsoniterVal { t.Errorf("Expected the value for Meta data key \"%s\" to be \"%s\", but got \"%s\".", key, val, jsoniterVal) } - } } @@ -379,7 +378,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.Run(fmt.Sprint(size, "-versions"), func(b *testing.B) { var xl xlMetaV2 ids := make([]string, size) - for i := 0; i < size; i++ { + for i := range size { fi.VersionID = mustGetUUID() fi.DataDir = mustGetUUID() ids[i] = fi.VersionID @@ -398,7 +397,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -425,7 +424,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -450,7 +449,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -477,7 +476,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -495,7 +494,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { // Load... xl = xlMetaV2{} err := xl.Load(enc) @@ -513,7 +512,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { buf, _, _ := isIndexedMetaV2(enc) if buf == nil { b.Fatal("buf == nil") @@ -528,7 +527,7 @@ func BenchmarkXlMetaV2Shallow(b *testing.B) { b.SetBytes(int64(size)) b.ResetTimer() b.ReportAllocs() - for i := 0; i < b.N; i++ { + for b.Loop() { buf, _, _ := isIndexedMetaV2(enc) if buf == nil { b.Fatal("buf == nil") diff --git a/cmd/xl-storage-meta-inline.go b/cmd/xl-storage-meta-inline.go index d8259c5553231..06ed65e8d941d 100644 --- a/cmd/xl-storage-meta-inline.go +++ b/cmd/xl-storage-meta-inline.go @@ -20,8 +20,8 @@ package cmd import ( "errors" "fmt" + "slices" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" ) @@ -57,7 +57,7 @@ func (x xlMetaInlineData) find(key string) []byte { if err != nil || sz == 0 { return nil } - for i := uint32(0); i < sz; i++ { + for range sz { var found []byte found, buf, err = msgp.ReadMapKeyZC(buf) if err != nil || sz == 0 { @@ -92,7 +92,7 @@ func (x xlMetaInlineData) validate() error { return fmt.Errorf("xlMetaInlineData: %w", err) } - for i := uint32(0); i < sz; i++ { + for i := range sz { var key []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -132,7 +132,7 @@ func (x *xlMetaInlineData) repair() { // Remove all current data keys := make([][]byte, 0, sz) vals := make([][]byte, 0, sz) - for i := uint32(0); i < sz; i++ { + for range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -166,7 +166,7 @@ func (x xlMetaInlineData) list() ([]string, error) { return nil, err } keys := make([]string, 0, sz) - for i := uint32(0); i < sz; i++ { + for i := range sz { var key []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -232,7 +232,7 @@ func (x *xlMetaInlineData) replace(key string, value []byte) { // Version plus header... plSize := 1 + msgp.MapHeaderSize replaced := false - for i := uint32(0); i < sz; i++ { + for range sz { var found, foundVal []byte var err error found, buf, err = msgp.ReadMapKeyZC(buf) @@ -277,7 +277,7 @@ func (x *xlMetaInlineData) rename(oldKey, newKey string) bool { // Version plus header... plSize := 1 + msgp.MapHeaderSize found := false - for i := uint32(0); i < sz; i++ { + for range sz { var foundKey, foundVal []byte var err error foundKey, buf, err = msgp.ReadMapKeyZC(buf) @@ -330,19 +330,14 @@ func (x *xlMetaInlineData) remove(keys ...string) bool { } } else { removeKey = func(s []byte) bool { - for _, key := range keys { - if key == string(s) { - return true - } - } - return false + return slices.Contains(keys, string(s)) } } // Version plus header... plSize := 1 + msgp.MapHeaderSize found := false - for i := uint32(0); i < sz; i++ { + for range sz { var foundKey, foundVal []byte var err error foundKey, buf, err = msgp.ReadMapKeyZC(buf) @@ -379,24 +374,24 @@ func (x *xlMetaInlineData) remove(keys ...string) bool { // xlMetaV2TrimData will trim any data from the metadata without unmarshalling it. // If any error occurs the unmodified data is returned. func xlMetaV2TrimData(buf []byte) []byte { - metaBuf, min, maj, err := checkXL2V1(buf) + metaBuf, maj, minor, err := checkXL2V1(buf) if err != nil { return buf } - if maj == 1 && min < 1 { + if maj == 1 && minor < 1 { // First version to carry data. return buf } // Skip header _, metaBuf, err = msgp.ReadBytesZC(metaBuf) if err != nil { - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) return buf } // Skip CRC - if maj > 1 || min >= 2 { + if maj > 1 || minor >= 2 { _, metaBuf, err = msgp.ReadUint32Bytes(metaBuf) - logger.LogIf(GlobalContext, err) + storageLogIf(GlobalContext, err) } // = input - current pos ends := len(buf) - len(metaBuf) diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 2d9d63695ed97..35d7ac73360af 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -25,11 +25,11 @@ import ( "errors" "fmt" "io" - "net/url" "os" pathutil "path" "path/filepath" "runtime" + "slices" "strconv" "strings" "sync" @@ -43,19 +43,18 @@ import ( "github.com/klauspost/filepathx" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/bucket/lifecycle" + "github.com/minio/minio/internal/bucket/replication" "github.com/minio/minio/internal/cachevalue" "github.com/minio/minio/internal/config/storageclass" + "github.com/minio/minio/internal/disk" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/pkg/xattr" - "github.com/zeebo/xxh3" ) const ( nullVersionID = "null" - // Largest streams threshold per shard. - largestFileThreshold = 64 * humanize.MiByte // Optimized for HDDs // Small file threshold below which data accompanies metadata from storage layer. smallFileThreshold = 128 * humanize.KiByte // Optimized for NVMe/SSDs @@ -68,6 +67,9 @@ const ( // XL metadata file carries per object metadata. xlStorageFormatFile = "xl.meta" + + // XL metadata file backup file carries previous per object metadata. + xlStorageFormatFileBackup = "xl.meta.bkp" ) var alignedBuf []byte @@ -105,9 +107,6 @@ type xlStorage struct { diskID string - // Indexes, will be -1 until assigned a set. - poolIndex, setIndex, diskIndex int - formatFileInfo os.FileInfo formatFile string formatLegacy bool @@ -119,6 +118,10 @@ type xlStorage struct { nrRequests uint64 major, minor uint32 + fsType string + + immediatePurge chan string + immediatePurgeCancel context.CancelFunc // mutex to prevent concurrent read operations overloading walks. rotational bool @@ -195,15 +198,6 @@ func getValidPath(path string) (string, error) { return path, nil } -// Initialize a new storage disk. -func newLocalXLStorage(path string) (*xlStorage, error) { - u := url.URL{Path: path} - return newXLStorage(Endpoint{ - URL: &u, - IsLocal: true, - }, true) -} - // Make Erasure backend meta volumes. func makeFormatErasureMetaVolumes(disk StorageAPI) error { if disk == nil { @@ -221,45 +215,45 @@ func makeFormatErasureMetaVolumes(disk StorageAPI) error { // Initialize a new storage disk. func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) { + immediatePurgeQueue := 100000 + if globalIsTesting || globalIsCICD { + immediatePurgeQueue = 1 + } + + ctx, cancel := context.WithCancel(GlobalContext) + s = &xlStorage{ - drivePath: ep.Path, - endpoint: ep, - globalSync: globalFSOSync, - diskInfoCache: cachevalue.New[DiskInfo](), - poolIndex: -1, - setIndex: -1, - diskIndex: -1, + drivePath: ep.Path, + endpoint: ep, + globalSync: globalFSOSync, + diskInfoCache: cachevalue.New[DiskInfo](), + immediatePurge: make(chan string, immediatePurgeQueue), + immediatePurgeCancel: cancel, } + defer func() { + if cleanUp && err == nil { + go s.cleanupTrashImmediateCallers(ctx) + } + }() + s.drivePath, err = getValidPath(ep.Path) if err != nil { s.drivePath = ep.Path return s, err } - info, err := disk.GetInfo(s.drivePath, true) + info, rootDrive, err := getDiskInfo(s.drivePath) if err != nil { return s, err } + s.major = info.Major s.minor = info.Minor + s.fsType = info.FSType - if !globalIsCICD && !globalIsErasureSD { - var rootDrive bool - if globalRootDiskThreshold > 0 { - // Use MINIO_ROOTDISK_THRESHOLD_SIZE to figure out if - // this disk is a root disk. treat those disks with - // size less than or equal to the threshold as rootDrives. - rootDrive = info.Total <= globalRootDiskThreshold - } else { - rootDrive, err = disk.IsRootDisk(s.drivePath, SlashSeparator) - if err != nil { - return nil, err - } - } - if rootDrive { - return s, errDriveIsRoot - } + if rootDrive { + return s, errDriveIsRoot } // Sanitize before setting it @@ -302,7 +296,19 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) { if err = json.Unmarshal(s.formatData, &format); err != nil { return s, errCorruptedFormat } - s.diskID = format.Erasure.This + m, n, err := findDiskIndexByDiskID(format, format.Erasure.This) + if err != nil { + return s, err + } + diskID := format.Erasure.This + if m != ep.SetIdx || n != ep.DiskIdx { + storageLogOnceIf(context.Background(), + fmt.Errorf("unexpected drive ordering on pool: %s: found drive at (set=%s, drive=%s), expected at (set=%s, drive=%s): %s(%s): %w", + humanize.Ordinal(ep.PoolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(ep.SetIdx+1), humanize.Ordinal(ep.DiskIdx+1), + s, s.diskID, errInconsistentDisk), "drive-order-format-json") + return s, errInconsistentDisk + } + s.diskID = diskID s.formatLastCheck = time.Now() s.formatLegacy = format.Erasure.DistributionAlgo == formatErasureVersionV2DistributionAlgoV1 } @@ -311,21 +317,70 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) { // oDirect off. if globalIsErasureSD || !disk.ODirectPlatform { s.oDirect = false - } else if err := s.checkODirectDiskSupport(); err == nil { + } else if err := s.checkODirectDiskSupport(info.FSType); err == nil { s.oDirect = true } else { return s, err } + // Initialize DiskInfo cache + s.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, + func(ctx context.Context) (DiskInfo, error) { + dcinfo := DiskInfo{} + di, root, err := getDiskInfo(s.drivePath) + if err != nil { + return dcinfo, err + } + dcinfo.RootDisk = root + dcinfo.Major = di.Major + dcinfo.Minor = di.Minor + dcinfo.Total = di.Total + dcinfo.Free = di.Free + dcinfo.Used = di.Used + dcinfo.UsedInodes = di.Files - di.Ffree + dcinfo.FreeInodes = di.Ffree + dcinfo.FSType = di.FSType + if root { + return dcinfo, errDriveIsRoot + } + + diskID, err := s.GetDiskID() + // Healing is 'true' when + // - if we found an unformatted disk (no 'format.json') + // - if we found healing tracker 'healing.bin' + dcinfo.Healing = errors.Is(err, errUnformattedDisk) + if !dcinfo.Healing { + if hi := s.Healing(); hi != nil && !hi.Finished { + dcinfo.Healing = true + } + } + + dcinfo.ID = diskID + return dcinfo, err + }, + ) + // Success. return s, nil } // getDiskInfo returns given disk information. -func getDiskInfo(drivePath string) (di disk.Info, err error) { +func getDiskInfo(drivePath string) (di disk.Info, rootDrive bool, err error) { if err = checkPathLength(drivePath); err == nil { di, err = disk.GetInfo(drivePath, false) + + if !globalIsCICD && !globalIsErasureSD { + if globalRootDiskThreshold > 0 { + // Use MINIO_ROOTDISK_THRESHOLD_SIZE to figure out if + // this disk is a root disk. treat those disks with + // size less than or equal to the threshold as rootDrives. + rootDrive = di.Total <= globalRootDiskThreshold + } else { + rootDrive, err = disk.IsRootDisk(drivePath, SlashSeparator) + } + } } + switch { case osIsNotExist(err): err = errDiskNotFound @@ -335,7 +390,7 @@ func getDiskInfo(drivePath string) (di disk.Info, err error) { err = errFaultyDisk } - return di, err + return di, rootDrive, err } // Implements stringer compatible interface. @@ -351,7 +406,8 @@ func (s *xlStorage) Endpoint() Endpoint { return s.endpoint } -func (*xlStorage) Close() error { +func (s *xlStorage) Close() error { + s.immediatePurgeCancel() return nil } @@ -369,24 +425,7 @@ func (s *xlStorage) IsLocal() bool { // Retrieve location indexes. func (s *xlStorage) GetDiskLoc() (poolIdx, setIdx, diskIdx int) { - // If unset, see if we can locate it. - if s.poolIndex < 0 || s.setIndex < 0 || s.diskIndex < 0 { - return getXLDiskLoc(s.diskID) - } - return s.poolIndex, s.setIndex, s.diskIndex -} - -func (s *xlStorage) SetFormatData(b []byte) { - s.Lock() - defer s.Unlock() - s.formatData = b -} - -// Set location indexes. -func (s *xlStorage) SetDiskLoc(poolIdx, setIdx, diskIdx int) { - s.poolIndex = poolIdx - s.setIndex = setIdx - s.diskIndex = diskIdx + return s.endpoint.PoolIdx, s.endpoint.SetIdx, s.endpoint.DiskIdx } func (s *xlStorage) Healing() *healingTracker { @@ -394,22 +433,37 @@ func (s *xlStorage) Healing() *healingTracker { bucketMetaPrefix, healingTrackerFilename) b, err := os.ReadFile(healingFile) if err != nil { + if !errors.Is(err, os.ErrNotExist) { + internalLogIf(GlobalContext, fmt.Errorf("unable to read %s: %w", healingFile, err)) + } + return nil + } + if len(b) == 0 { + internalLogIf(GlobalContext, fmt.Errorf("%s is empty", healingFile)) + // 'healing.bin' might be truncated return nil } h := newHealingTracker() _, err = h.UnmarshalMsg(b) - logger.LogIf(GlobalContext, err) + internalLogIf(GlobalContext, err) return h } // checkODirectDiskSupport asks the disk to write some data // with O_DIRECT support, return an error if any and return // errUnsupportedDisk if there is no O_DIRECT support -func (s *xlStorage) checkODirectDiskSupport() error { +func (s *xlStorage) checkODirectDiskSupport(fsType string) error { if !disk.ODirectPlatform { return errUnsupportedDisk } + // We know XFS already supports O_DIRECT no need to check. + if fsType == "XFS" { + return nil + } + + // For all other FS pay the price of not using our recommended filesystem. + // Check if backend is writable and supports O_DIRECT uuid := mustGetUUID() filePath := pathJoin(s.drivePath, minioMetaTmpDeletedBucket, ".writable-check-"+uuid+".tmp") @@ -499,7 +553,8 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates } // Check if the current bucket has replication configuration - if rcfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, cache.Info.Name); err == nil { + var rcfg *replication.Config + if rcfg, _, err = globalBucketMetadataSys.GetReplicationConfig(ctx, cache.Info.Name); err == nil { if rcfg.HasActiveRules("", true) { tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, cache.Info.Name) if err == nil { @@ -511,6 +566,13 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates } } + // Check if bucket is object locked. + lr, err := globalBucketObjectLockSys.Get(cache.Info.Name) + if err != nil { + scannerLogOnceIf(ctx, err, cache.Info.Name) + return cache, err + } + vcfg, _ := globalBucketVersioningSys.Get(cache.Info.Name) // return initialized object layer @@ -529,7 +591,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates cache.Info.updates = updates - dataUsageInfo, err := scanDataFolder(ctx, disks, s.drivePath, cache, func(item scannerItem) (sizeSummary, error) { + dataUsageInfo, err := scanDataFolder(ctx, disks, s, cache, func(item scannerItem) (sizeSummary, error) { // Look for `xl.meta/xl.json' at the leaf. if !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFile) && !strings.HasSuffix(item.Path, SlashSeparator+xlStorageFormatFileV1) { @@ -561,6 +623,11 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates return sizeSummary{}, errSkipFile } + versioned := vcfg != nil && vcfg.Versioned(item.objectPath()) + objInfos := make([]ObjectInfo, len(fivs.Versions)) + for i, fi := range fivs.Versions { + objInfos[i] = fi.ToObjectInfo(item.bucket, item.objectPath(), versioned) + } sizeS := sizeSummary{} for _, tier := range globalTierConfigMgr.ListTiers() { if sizeS.tiers == nil { @@ -573,35 +640,14 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates sizeS.tiers[storageclass.RRS] = tierStats{} } - done := globalScannerMetrics.time(scannerMetricApplyAll) - objInfos, err := item.applyVersionActions(ctx, objAPI, fivs.Versions, globalExpiryState) - done() - if err != nil { res["err"] = err.Error() return sizeSummary{}, errSkipFile } - versioned := vcfg != nil && vcfg.Versioned(item.objectPath()) - - var objDeleted bool - for _, oi := range objInfos { - done = globalScannerMetrics.time(scannerMetricApplyVersion) - var sz int64 - objDeleted, sz = item.applyActions(ctx, objAPI, oi, &sizeS) - done() - - // DeleteAllVersionsAction: The object and all its - // versions are expired and - // doesn't contribute toward data usage. - if objDeleted { - break - } - actualSz, err := oi.GetActualSize() - if err != nil { - continue - } - + var objPresent bool + item.applyActions(ctx, objAPI, objInfos, lr, &sizeS, func(oi ObjectInfo, sz, actualSz int64, sizeS *sizeSummary) { + objPresent = true if oi.DeleteMarker { sizeS.deleteMarkers++ } @@ -614,7 +660,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates // tracking deleted transitioned objects switch { case oi.DeleteMarker, oi.TransitionedObject.FreeVersion: - continue + return } tier := oi.StorageClass if tier == "" { @@ -628,12 +674,12 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates sizeS.tiers[tier] = st.add(oi.tierStats()) } } - } + }) // apply tier sweep action on free versions for _, freeVersion := range fivs.FreeVersions { oi := freeVersion.ToObjectInfo(item.bucket, item.objectPath(), versioned) - done = globalScannerMetrics.time(scannerMetricTierObjSweep) + done := globalScannerMetrics.time(scannerMetricTierObjSweep) globalExpiryState.enqueueFreeVersion(oi) done() } @@ -669,7 +715,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates } } } - if objDeleted { + if !objPresent { // we return errIgnoreFileContrib to signal this function's // callers to skip this object's contribution towards // usage. @@ -724,33 +770,8 @@ func (s *xlStorage) setWriteAttribute(writeCount uint64) error { // DiskInfo provides current information about disk space usage, // total free inodes and underlying filesystem. -func (s *xlStorage) DiskInfo(_ context.Context, _ DiskInfoOptions) (info DiskInfo, err error) { - s.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{}, - func() (DiskInfo, error) { - dcinfo := DiskInfo{} - di, err := getDiskInfo(s.drivePath) - if err != nil { - return dcinfo, err - } - dcinfo.Major = di.Major - dcinfo.Minor = di.Minor - dcinfo.Total = di.Total - dcinfo.Free = di.Free - dcinfo.Used = di.Used - dcinfo.UsedInodes = di.Files - di.Ffree - dcinfo.FreeInodes = di.Ffree - dcinfo.FSType = di.FSType - diskID, err := s.GetDiskID() - // Healing is 'true' when - // - if we found an unformatted disk (no 'format.json') - // - if we found healing tracker 'healing.bin' - dcinfo.Healing = errors.Is(err, errUnformattedDisk) || (s.Healing() != nil) - dcinfo.ID = diskID - return dcinfo, err - }, - ) - - info, err = s.diskInfoCache.Get() +func (s *xlStorage) DiskInfo(ctx context.Context, _ DiskInfoOptions) (info DiskInfo, err error) { + info, err = s.diskInfoCache.GetWithCtx(ctx) info.NRRequests = s.nrRequests info.Rotational = s.rotational info.MountPath = s.drivePath @@ -785,12 +806,12 @@ func (s *xlStorage) checkFormatJSON() (os.FileInfo, error) { } else if osIsPermission(err) { return nil, errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return nil, errCorruptedBackend } else if osIsPermission(err) { return nil, errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return nil, errCorruptedBackend } return fi, nil @@ -836,30 +857,44 @@ func (s *xlStorage) GetDiskID() (string, error) { } else if osIsPermission(err) { return "", errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return "", errCorruptedBackend } else if osIsPermission(err) { return "", errDiskAccessDenied } - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + storageLogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors return "", errCorruptedBackend } format := &formatErasureV3{} json := jsoniter.ConfigCompatibleWithStandardLibrary if err = json.Unmarshal(b, &format); err != nil { - logger.LogOnceIf(GlobalContext, err, "check-format-json") // log unexpected errors + bugLogIf(GlobalContext, err) // log unexpected errors return "", errCorruptedFormat } + m, n, err := findDiskIndexByDiskID(format, format.Erasure.This) + if err != nil { + return "", err + } + + diskID = format.Erasure.This + ep := s.endpoint + if m != ep.SetIdx || n != ep.DiskIdx { + storageLogOnceIf(GlobalContext, + fmt.Errorf("unexpected drive ordering on pool: %s: found drive at (set=%s, drive=%s), expected at (set=%s, drive=%s): %s(%s): %w", + humanize.Ordinal(ep.PoolIdx+1), humanize.Ordinal(m+1), humanize.Ordinal(n+1), humanize.Ordinal(ep.SetIdx+1), humanize.Ordinal(ep.DiskIdx+1), + s, s.diskID, errInconsistentDisk), "drive-order-format-json") + return "", errInconsistentDisk + } s.Lock() - defer s.Unlock() - s.formatData = b - s.diskID = format.Erasure.This + s.diskID = diskID s.formatLegacy = format.Erasure.DistributionAlgo == formatErasureVersionV2DistributionAlgoV1 s.formatFileInfo = fi + s.formatData = b s.formatLastCheck = time.Now() - return s.diskID, nil + s.Unlock() + return diskID, nil } // Make a volume entry. @@ -1054,34 +1089,37 @@ func (s *xlStorage) deleteVersions(ctx context.Context, volume, path string, fis return err } - discard := true + s.RLock() + legacy := s.formatLegacy + s.RUnlock() var legacyJSON bool - buf, _, err := s.readAllData(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile), discard) - if err != nil { - if !errors.Is(err, errFileNotFound) { - return err + buf, err := xioutil.WithDeadline[[]byte](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) ([]byte, error) { + buf, _, err := s.readAllDataWithDMTime(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile)) + if err != nil && !errors.Is(err, errFileNotFound) { + return nil, err } - s.RLock() - legacy := s.formatLegacy - s.RUnlock() - if legacy { - buf, _, err = s.readAllData(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFileV1), discard) + if errors.Is(err, errFileNotFound) && legacy { + buf, _, err = s.readAllDataWithDMTime(ctx, volume, volumeDir, pathJoin(volumeDir, path, xlStorageFormatFileV1)) if err != nil { - return err + return nil, err } legacyJSON = true } - } - if len(buf) == 0 { - if errors.Is(err, errFileNotFound) && !skipAccessChecks(volume) { - if aerr := Access(volumeDir); aerr != nil && osIsNotExist(aerr) { - return errVolumeNotFound + if len(buf) == 0 { + if errors.Is(err, errFileNotFound) && !skipAccessChecks(volume) { + if aerr := Access(volumeDir); aerr != nil && osIsNotExist(aerr) { + return nil, errVolumeNotFound + } + return nil, errFileNotFound } } - return errFileNotFound + return buf, nil + }) + if err != nil { + return err } if legacyJSON { @@ -1155,17 +1193,43 @@ func (s *xlStorage) DeleteVersions(ctx context.Context, volume string, versions errs[i] = ctx.Err() continue } - w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) - if err := w.Run(func() error { return s.deleteVersions(ctx, volume, fiv.Name, fiv.Versions...) }); err != nil { - errs[i] = err - } + errs[i] = s.deleteVersions(ctx, volume, fiv.Name, fiv.Versions...) diskHealthCheckOK(ctx, errs[i]) } return errs } -func (s *xlStorage) moveToTrash(filePath string, recursive, immediatePurge bool) (err error) { +func (s *xlStorage) cleanupTrashImmediateCallers(ctx context.Context) { + for { + select { + case <-ctx.Done(): + return + case entry := <-s.immediatePurge: + // Add deadlines such that immediate purge is not + // perpetually hung here. + w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) + w.Run(func() error { + return removeAll(entry) + }) + } + } +} + +const almostFilledPercent = 0.05 + +func (s *xlStorage) diskAlmostFilled() bool { + info, err := s.diskInfoCache.Get() + if err != nil { + return false + } + if info.Used == 0 || info.UsedInodes == 0 { + return false + } + return (float64(info.Free)/float64(info.Used)) < almostFilledPercent || (float64(info.FreeInodes)/float64(info.UsedInodes)) < almostFilledPercent +} + +func (s *xlStorage) moveToTrashNoDeadline(filePath string, recursive, immediatePurge bool) (err error) { pathUUID := mustGetUUID() targetPath := pathutil.Join(s.drivePath, minioMetaTmpDeletedBucket, pathUUID) @@ -1175,6 +1239,13 @@ func (s *xlStorage) moveToTrash(filePath string, recursive, immediatePurge bool) err = Rename(filePath, targetPath) } + var targetPath2 string + if immediatePurge && HasSuffix(filePath, SlashSeparator) { + // With immediate purge also attempt deleting for `__XL_DIR__` folder/directory objects. + targetPath2 = pathutil.Join(s.drivePath, minioMetaTmpDeletedBucket, mustGetUUID()) + renameAll(encodeDirObject(filePath), targetPath2, pathutil.Join(s.drivePath, minioMetaBucket)) + } + // ENOSPC is a valid error from rename(); remove instead of rename in that case if errors.Is(err, errDiskFull) || isSysErrNoSpace(err) { if recursive { @@ -1189,14 +1260,45 @@ func (s *xlStorage) moveToTrash(filePath string, recursive, immediatePurge bool) return err } + if !immediatePurge && s.diskAlmostFilled() { + immediatePurge = true + } + // immediately purge the target if immediatePurge { - removeAll(targetPath) + for _, target := range []string{ + targetPath, + targetPath2, + } { + if target == "" { + continue + } + select { + case s.immediatePurge <- target: + default: + // Too much back pressure, we will perform the delete + // blocking at this point we need to serialize operations. + removeAll(target) + } + } } - return nil } +func (s *xlStorage) readAllData(ctx context.Context, volume, volumeDir string, filePath string) (buf []byte, err error) { + return xioutil.WithDeadline[[]byte](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) ([]byte, error) { + data, _, err := s.readAllDataWithDMTime(ctx, volume, volumeDir, filePath) + return data, err + }) +} + +func (s *xlStorage) moveToTrash(filePath string, recursive, immediatePurge bool) (err error) { + w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) + return w.Run(func() (err error) { + return s.moveToTrashNoDeadline(filePath, recursive, immediatePurge) + }) +} + // DeleteVersion - deletes FileInfo metadata for path at `xl.meta`. forceDelMarker // will force creating a new `xl.meta` to create a new delete marker func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) { @@ -1219,7 +1321,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F } var legacyJSON bool - buf, _, err := s.readAllData(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFile), true) + buf, err := s.readAllData(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFile)) if err != nil { if !errors.Is(err, errFileNotFound) { return err @@ -1300,7 +1402,11 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F return err } - return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf) + return s.writeAllMeta(ctx, volume, pathJoin(path, xlStorageFormatFile), buf, true) + } + + if opts.UndoWrite && opts.OldDataDir != "" { + return renameAll(pathJoin(filePath, opts.OldDataDir, xlStorageFormatFileBackup), pathJoin(filePath, xlStorageFormatFile), filePath) } return s.deleteFile(volumeDir, pathJoin(volumeDir, path, xlStorageFormatFile), true, false) @@ -1312,12 +1418,21 @@ func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi return errInvalidArgument } + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + // Validate file path length, before reading. + filePath := pathJoin(volumeDir, path) + if err = checkPathLength(filePath); err != nil { + return err + } + buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile)) if err != nil { - if err == errFileNotFound { - if fi.VersionID != "" { - return errFileVersionNotFound - } + if err == errFileNotFound && fi.VersionID != "" { + return errFileVersionNotFound } return err } @@ -1342,7 +1457,7 @@ func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi } defer metaDataPoolPut(wbuf) - return s.writeAll(ctx, volume, pathJoin(path, xlStorageFormatFile), wbuf, !opts.NoPersistence) + return s.writeAllMeta(ctx, volume, pathJoin(path, xlStorageFormatFile), wbuf, !opts.NoPersistence) } // WriteMetadata - writes FileInfo metadata for path at `xl.meta` @@ -1374,8 +1489,8 @@ func (s *xlStorage) WriteMetadata(ctx context.Context, origvolume, volume, path // First writes for special situations do not write to stable storage. // this is currently used by // - emphemeral objects such as objects created during listObjects() calls - // - newMultipartUpload() call.. - return s.writeAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf, false) + ok := volume == minioMetaMultipartBucket // - newMultipartUpload() call must be synced to drives. + return s.writeAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf, ok, "") } buf, err := s.ReadAll(ctx, volume, pathJoin(path, xlStorageFormatFile)) @@ -1471,7 +1586,7 @@ func (s *xlStorage) readRaw(ctx context.Context, volume, volumeDir, filePath str xlPath := pathJoin(filePath, xlStorageFormatFile) if readData { - buf, dmTime, err = s.readAllData(ctx, volume, volumeDir, xlPath, false) + buf, dmTime, err = s.readAllDataWithDMTime(ctx, volume, volumeDir, xlPath) } else { buf, dmTime, err = s.readMetadataWithDMTime(ctx, xlPath) if err != nil { @@ -1491,7 +1606,7 @@ func (s *xlStorage) readRaw(ctx context.Context, volume, volumeDir, filePath str s.RUnlock() if err != nil && errors.Is(err, errFileNotFound) && legacy { - buf, dmTime, err = s.readAllData(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFileV1), false) + buf, dmTime, err = s.readAllDataWithDMTime(ctx, volume, volumeDir, pathJoin(filePath, xlStorageFormatFileV1)) if err != nil { return nil, time.Time{}, err } @@ -1529,8 +1644,9 @@ func (s *xlStorage) ReadXL(ctx context.Context, volume, path string, readData bo // ReadOptions optional inputs for ReadVersion type ReadOptions struct { - ReadData bool - Healing bool + InclFreeVersions bool + ReadData bool + Healing bool } // ReadVersion - reads metadata and returns FileInfo at path `xl.meta` @@ -1574,7 +1690,10 @@ func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, v return fi, err } - fi, err = getFileInfo(buf, volume, path, versionID, readData, true) + fi, err = getFileInfo(buf, volume, path, versionID, fileInfoOpts{ + Data: opts.ReadData, + InclFreeVersions: opts.InclFreeVersions, + }) if err != nil { return fi, err } @@ -1590,7 +1709,7 @@ func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, v // If written with header we are fine. return fi, nil } - if fi.Size == 0 || !(fi.VersionID != "" && fi.VersionID != nullVersionID) { + if fi.Size == 0 || (fi.VersionID == "" || fi.VersionID == nullVersionID) { // If versioned we have no conflicts. fi.SetInlineData() return fi, nil @@ -1611,42 +1730,31 @@ func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, v fi.Data = nil } + attemptInline := fi.TransitionStatus == "" && fi.DataDir != "" && len(fi.Parts) == 1 // Reading data for small objects when // - object has not yet transitioned - // - object size lesser than 128KiB // - object has maximum of 1 parts - if fi.TransitionStatus == "" && - fi.DataDir != "" && fi.Size <= smallFileThreshold && - len(fi.Parts) == 1 { - partPath := fmt.Sprintf("part.%d", fi.Parts[0].Number) - dataPath := pathJoin(volumeDir, path, fi.DataDir, partPath) - fi.Data, _, err = s.readAllData(ctx, volume, volumeDir, dataPath, false) - if err != nil { - return FileInfo{}, err - } - } - } - - if !skipAccessChecks(volume) && !opts.Healing && fi.TransitionStatus == "" && !fi.InlineData() && len(fi.Data) == 0 && fi.DataDir != "" && fi.DataDir != emptyUUID && fi.VersionPurgeStatus().Empty() { - // Verify if the dataDir is present or not when the data - // is not inlined to make sure we return correct errors - // during HeadObject(). - - // Healing must not come here and return error, since healing - // deals with dataDirs directly, let healing fix things automatically. - if lerr := Access(pathJoin(volumeDir, path, fi.DataDir)); lerr != nil { - if os.IsNotExist(lerr) { - // Data dir is missing we must return errFileCorrupted - return FileInfo{}, errFileCorrupt + if attemptInline { + inlineBlock := globalStorageClass.InlineBlock() + if inlineBlock <= 0 { + inlineBlock = 128 * humanize.KiByte + } + + canInline := fi.ShardFileSize(fi.Parts[0].ActualSize) <= inlineBlock + if canInline { + dataPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", fi.Parts[0].Number)) + fi.Data, err = s.readAllData(ctx, volume, volumeDir, dataPath) + if err != nil { + return FileInfo{}, err + } } - return FileInfo{}, osErrToFileErr(lerr) } } return fi, nil } -func (s *xlStorage) readAllData(ctx context.Context, volume, volumeDir string, filePath string, discard bool) (buf []byte, dmTime time.Time, err error) { +func (s *xlStorage) readAllDataWithDMTime(ctx context.Context, volume, volumeDir string, filePath string) (buf []byte, dmTime time.Time, err error) { if filePath == "" { return nil, dmTime, errFileNotFound } @@ -1690,14 +1798,6 @@ func (s *xlStorage) readAllData(ctx context.Context, volume, volumeDir string, f } return nil, dmTime, err } - - if discard { - // This discard is mostly true for DELETEEs - // so we need to make sure we do not keep - // page-cache references after. - defer disk.Fdatasync(f) - } - defer f.Close() // Get size for precise allocation. @@ -1749,8 +1849,7 @@ func (s *xlStorage) ReadAll(ctx context.Context, volume string, path string) (bu return nil, err } - buf, _, err = s.readAllData(ctx, volume, volumeDir, filePath, false) - return buf, err + return s.readAllData(ctx, volume, volumeDir, filePath) } // ReadFile reads exactly len(buf) bytes into buf. It returns the @@ -1873,14 +1972,17 @@ func (s *xlStorage) openFileDirect(path string, mode int) (f *os.File, err error return w, nil } -func (s *xlStorage) openFileSync(filePath string, mode int) (f *os.File, err error) { - return s.openFile(filePath, mode|writeMode) +func (s *xlStorage) openFileSync(filePath string, mode int, skipParent string) (f *os.File, err error) { + return s.openFile(filePath, mode|writeMode, skipParent) } -func (s *xlStorage) openFile(filePath string, mode int) (f *os.File, err error) { +func (s *xlStorage) openFile(filePath string, mode int, skipParent string) (f *os.File, err error) { + if skipParent == "" { + skipParent = s.drivePath + } // Create top level directories if they don't exist. // with mode 0777 mkdir honors system umask. - if err = mkdirAll(pathutil.Dir(filePath), 0o777, s.drivePath); err != nil { + if err = mkdirAll(pathutil.Dir(filePath), 0o777, skipParent); err != nil { return nil, osErrToFileErr(err) } @@ -1983,18 +2085,9 @@ func (s *xlStorage) ReadFileStream(ctx context.Context, volume, path string, off return nil, err } } - return &sendFileReader{Reader: io.LimitReader(file, length), Closer: file}, nil } -// closeWrapper converts a function to an io.Closer -type closeWrapper func() error - -// Close calls the wrapped function. -func (c closeWrapper) Close() error { - return c() -} - // CreateFile - creates the file. func (s *xlStorage) CreateFile(ctx context.Context, origvolume, volume, path string, fileSize int64, r io.Reader) (err error) { if origvolume != "" { @@ -2032,18 +2125,22 @@ func (s *xlStorage) CreateFile(ctx context.Context, origvolume, volume, path str } }() - return s.writeAllDirect(ctx, filePath, fileSize, r, os.O_CREATE|os.O_WRONLY|os.O_EXCL) + return s.writeAllDirect(ctx, filePath, fileSize, r, os.O_CREATE|os.O_WRONLY|os.O_EXCL, volumeDir, false) } -func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSize int64, r io.Reader, flags int) (err error) { +func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSize int64, r io.Reader, flags int, skipParent string, truncate bool) (err error) { if contextCanceled(ctx) { return ctx.Err() } + if skipParent == "" { + skipParent = s.drivePath + } + // Create top level directories if they don't exist. // with mode 0777 mkdir honors system umask. parentFilePath := pathutil.Dir(filePath) - if err = mkdirAll(parentFilePath, 0o777, s.drivePath); err != nil { + if err = mkdirAll(parentFilePath, 0o777, skipParent); err != nil { return osErrToFileErr(err) } @@ -2061,15 +2158,11 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz var bufp *[]byte switch { - case fileSize > 0 && fileSize >= largestFileThreshold: - // use a larger 4MiB buffer for a really large streams. - bufp = xioutil.ODirectPoolXLarge.Get().(*[]byte) - defer xioutil.ODirectPoolXLarge.Put(bufp) - case fileSize <= smallFileThreshold: - bufp = xioutil.ODirectPoolSmall.Get().(*[]byte) + case fileSize <= xioutil.SmallBlock: + bufp = xioutil.ODirectPoolSmall.Get() defer xioutil.ODirectPoolSmall.Put(bufp) default: - bufp = xioutil.ODirectPoolLarge.Get().(*[]byte) + bufp = xioutil.ODirectPoolLarge.Get() defer xioutil.ODirectPoolLarge.Put(bufp) } @@ -2085,9 +2178,15 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz } if written < fileSize && fileSize >= 0 { + if truncate { + w.Truncate(0) // zero-in the file size to indicate that its unreadable + } w.Close() return errLessData } else if written > fileSize && fileSize >= 0 { + if truncate { + w.Truncate(0) // zero-in the file size to indicate that its unreadable + } w.Close() return errMoreData } @@ -2109,7 +2208,8 @@ func (s *xlStorage) writeAllDirect(ctx context.Context, filePath string, fileSiz return w.Close() } -func (s *xlStorage) writeAll(ctx context.Context, volume string, path string, b []byte, sync bool) (err error) { +// writeAllMeta - writes all metadata to a temp file and then links it to the final destination. +func (s *xlStorage) writeAllMeta(ctx context.Context, volume string, path string, b []byte, sync bool) (err error) { if contextCanceled(ctx) { return ctx.Err() } @@ -2124,38 +2224,55 @@ func (s *xlStorage) writeAll(ctx context.Context, volume string, path string, b return err } + tmpVolumeDir, err := s.getVolDir(minioMetaTmpBucket) + if err != nil { + return err + } + + tmpFilePath := pathJoin(tmpVolumeDir, mustGetUUID()) + defer func() { + if err != nil { + Remove(tmpFilePath) + } + }() + + if err = s.writeAllInternal(ctx, tmpFilePath, b, sync, tmpVolumeDir); err != nil { + return err + } + + return renameAll(tmpFilePath, filePath, volumeDir) +} + +// Create or truncate an existing file before writing +func (s *xlStorage) writeAllInternal(ctx context.Context, filePath string, b []byte, sync bool, skipParent string) (err error) { flags := os.O_CREATE | os.O_WRONLY | os.O_TRUNC var w *os.File if sync { - // Perform directIO along with fdatasync for larger xl.meta, mostly when + // Perform DirectIO along with fdatasync for larger xl.meta, mostly when // xl.meta has "inlined data" we prefer writing O_DIRECT and then doing // fdatasync() at the end instead of opening the file with O_DSYNC. // // This is an optimization mainly to ensure faster I/O. if len(b) > xioutil.DirectioAlignSize { r := bytes.NewReader(b) - return s.writeAllDirect(ctx, filePath, r.Size(), r, flags) + return s.writeAllDirect(ctx, filePath, r.Size(), r, flags, skipParent, true) } - w, err = s.openFileSync(filePath, flags) + w, err = s.openFileSync(filePath, flags, skipParent) } else { - w, err = s.openFile(filePath, flags) + w, err = s.openFile(filePath, flags, skipParent) } if err != nil { return err } - n, err := w.Write(b) + _, err = w.Write(b) if err != nil { + w.Truncate(0) // to indicate that we did partial write. w.Close() return err } - if n != len(b) { - w.Close() - return io.ErrShortWrite - } - // Dealing with error returns from close() - 'man 2 close' // // A careful programmer will check the return value of close(), since it is quite possible that @@ -2167,89 +2284,144 @@ func (s *xlStorage) writeAll(ctx context.Context, volume string, path string, b return w.Close() } -func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) { - return s.writeAll(ctx, volume, path, b, true) -} +func (s *xlStorage) writeAll(ctx context.Context, volume string, path string, b []byte, sync bool, skipParent string) (err error) { + if contextCanceled(ctx) { + return ctx.Err() + } -// AppendFile - append a byte array at path, if file doesn't exist at -// path this call explicitly creates it. -func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) { volumeDir, err := s.getVolDir(volume) if err != nil { return err } - if !skipAccessChecks(volume) { - // Stat a volume entry. - if err = Access(volumeDir); err != nil { - return convertAccessError(err, errVolumeAccessDenied) - } - } - filePath := pathJoin(volumeDir, path) if err = checkPathLength(filePath); err != nil { return err } - var w *os.File - // Create file if not found. Not doing O_DIRECT here to avoid the code that does buffer aligned writes. - // AppendFile() is only used by healing code to heal objects written in old format. - w, err = s.openFileSync(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY) - if err != nil { - return err - } - defer w.Close() + return s.writeAllInternal(ctx, filePath, b, sync, skipParent) +} - n, err := w.Write(buf) - if err != nil { - return err +func (s *xlStorage) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) { + // Specific optimization to avoid re-read from the drives for `format.json` + // in-case the caller is a network operation. + if volume == minioMetaBucket && path == formatConfigFile { + s.Lock() + s.formatData = b + s.Unlock() } - if n != len(buf) { - return io.ErrShortWrite + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err } - return nil + return s.writeAll(ctx, volume, path, b, true, volumeDir) } -// CheckParts check if path has necessary parts available. -func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error { +// AppendFile - append a byte array at path, if file doesn't exist at +// path this call explicitly creates it. +func (s *xlStorage) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) { volumeDir, err := s.getVolDir(volume) if err != nil { return err } - for _, part := range fi.Parts { - partPath := pathJoin(path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) - filePath := pathJoin(volumeDir, partPath) - if err = checkPathLength(filePath); err != nil { - return err + if !skipAccessChecks(volume) { + // Stat a volume entry. + if err = Access(volumeDir); err != nil { + return convertAccessError(err, errVolumeAccessDenied) } - st, err := Lstat(filePath) - if err != nil { - if osIsNotExist(err) { - if !skipAccessChecks(volume) { - // Stat a volume entry. - if verr := Access(volumeDir); verr != nil { - if osIsNotExist(verr) { - return errVolumeNotFound - } - return verr + } + + filePath := pathJoin(volumeDir, path) + if err = checkPathLength(filePath); err != nil { + return err + } + + var w *os.File + // Create file if not found. Not doing O_DIRECT here to avoid the code that does buffer aligned writes. + // AppendFile() is only used by healing code to heal objects written in old format. + w, err = s.openFileSync(filePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, volumeDir) + if err != nil { + return err + } + defer w.Close() + + n, err := w.Write(buf) + if err != nil { + return err + } + + if n != len(buf) { + return io.ErrShortWrite + } + + return nil +} + +// checkPart is a light check of an existing and size of a part, without doing a bitrot operation +// For any unexpected error, return checkPartUnknown (zero) +func (s *xlStorage) checkPart(volumeDir, path, dataDir string, partNum int, expectedSize int64, skipAccessCheck bool) (resp int) { + partPath := pathJoin(path, dataDir, fmt.Sprintf("part.%d", partNum)) + filePath := pathJoin(volumeDir, partPath) + st, err := Lstat(filePath) + if err != nil { + if osIsNotExist(err) { + if !skipAccessCheck { + // Stat a volume entry. + if verr := Access(volumeDir); verr != nil { + if osIsNotExist(verr) { + resp = checkPartVolumeNotFound } + return resp } } - return osErrToFileErr(err) } - if st.Mode().IsDir() { - return errFileNotFound + if osErrToFileErr(err) == errFileNotFound { + resp = checkPartFileNotFound } - // Check if shard is truncated. - if st.Size() < fi.Erasure.ShardFileSize(part.Size) { - return errFileCorrupt + return resp + } + if st.Mode().IsDir() { + resp = checkPartFileNotFound + return resp + } + // Check if shard is truncated. + if st.Size() < expectedSize { + resp = checkPartFileCorrupt + return resp + } + return checkPartSuccess +} + +// CheckParts check if path has necessary parts available. +func (s *xlStorage) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) (*CheckPartsResp, error) { + volumeDir, err := s.getVolDir(volume) + if err != nil { + return nil, err + } + + err = checkPathLength(pathJoin(volumeDir, path)) + if err != nil { + return nil, err + } + + resp := CheckPartsResp{ + // By default, all results have an unknown status + Results: make([]int, len(fi.Parts)), + } + + for i, part := range fi.Parts { + resp.Results[i], err = xioutil.WithDeadline[int](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (int, error) { + return s.checkPart(volumeDir, path, fi.DataDir, part.Number, fi.Erasure.ShardFileSize(part.Size), skipAccessChecks(volume)), nil + }) + if err != nil { + return nil, err } } - return nil + return &resp, nil } // deleteFile deletes a file or a directory if its empty unless recursive @@ -2261,10 +2433,10 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, immediate if basePath == "" || deletePath == "" { return nil } - isObjectDir := HasSuffix(deletePath, SlashSeparator) - basePath = pathutil.Clean(basePath) - deletePath = pathutil.Clean(deletePath) - if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath { + + bp := pathutil.Clean(basePath) // do not override basepath / or deletePath / + dp := pathutil.Clean(deletePath) + if !strings.HasPrefix(dp, bp) || dp == bp { return nil } @@ -2279,7 +2451,7 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, immediate case isSysErrNotEmpty(err): // if object is a directory, but if its not empty // return FileNotFound to indicate its an empty prefix. - if isObjectDir { + if HasSuffix(deletePath, SlashSeparator) { return errFileNotFound } // if we have .DS_Store only on macOS @@ -2311,16 +2483,48 @@ func (s *xlStorage) deleteFile(basePath, deletePath string, recursive, immediate } } - deletePath = pathutil.Dir(deletePath) - // Delete parent directory obviously not recursively. Errors for // parent directories shouldn't trickle down. - s.deleteFile(basePath, deletePath, false, false) + s.deleteFile(basePath, pathutil.Dir(pathutil.Clean(deletePath)), false, false) return nil } -// DeleteFile - delete a file at path. +// DeleteBulk - delete many files in bulk to trash. +// this delete does not recursively delete empty +// parents, if you need empty parent delete support +// please use Delete() instead. This API is meant as +// an optimization for Multipart operations. +func (s *xlStorage) DeleteBulk(ctx context.Context, volume string, paths ...string) (err error) { + volumeDir, err := s.getVolDir(volume) + if err != nil { + return err + } + + if !skipAccessChecks(volume) { + // Stat a volume entry. + if err = Access(volumeDir); err != nil { + return convertAccessError(err, errVolumeAccessDenied) + } + } + + for _, fp := range paths { + // Following code is needed so that we retain SlashSeparator suffix if any in + // path argument. + filePath := pathJoin(volumeDir, fp) + if err = checkPathLength(filePath); err != nil { + return err + } + + if err = s.moveToTrash(filePath, false, false); err != nil { + return err + } + } + + return nil +} + +// Delete - delete a file at path. func (s *xlStorage) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) (err error) { volumeDir, err := s.getVolDir(volume) if err != nil { @@ -2346,21 +2550,11 @@ func (s *xlStorage) Delete(ctx context.Context, volume string, path string, dele } func skipAccessChecks(volume string) (ok bool) { - for _, prefix := range []string{ - minioMetaTmpDeletedBucket, - minioMetaTmpBucket, - minioMetaMultipartBucket, - minioMetaBucket, - } { - if strings.HasPrefix(volume, prefix) { - return true - } - } - return ok + return strings.HasPrefix(volume, minioMetaBucket) } // RenameData - rename source path to destination path atomically, metadata and data directory. -func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) { +func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (res RenameDataResp, err error) { defer func() { ignoredErrs := []error{ errFileNotFound, @@ -2373,7 +2567,7 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } if err != nil && !IsErr(err, ignoredErrs...) && !contextCanceled(ctx) { // Only log these errors if context is not yet canceled. - logger.LogOnceIf(ctx, fmt.Errorf("drive:%s, srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v", + storageLogOnceIf(ctx, fmt.Errorf("drive:%s, srcVolume: %s, srcPath: %s, dstVolume: %s:, dstPath: %s - error %v", s.drivePath, srcVolume, srcPath, dstVolume, dstPath, @@ -2386,24 +2580,24 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f srcVolumeDir, err := s.getVolDir(srcVolume) if err != nil { - return 0, err + return res, err } dstVolumeDir, err := s.getVolDir(dstVolume) if err != nil { - return 0, err + return res, err } if !skipAccessChecks(srcVolume) { // Stat a volume entry. if err = Access(srcVolumeDir); err != nil { - return 0, convertAccessError(err, errVolumeAccessDenied) + return res, convertAccessError(err, errVolumeAccessDenied) } } if !skipAccessChecks(dstVolume) { if err = Access(dstVolumeDir); err != nil { - return 0, convertAccessError(err, errVolumeAccessDenied) + return res, convertAccessError(err, errVolumeAccessDenied) } } @@ -2425,13 +2619,17 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } if err = checkPathLength(srcFilePath); err != nil { - return 0, err + return res, err } if err = checkPathLength(dstFilePath); err != nil { - return 0, err + return res, err } + s.RLock() + formatLegacy := s.formatLegacy + s.RUnlock() + dstBuf, err := xioutil.ReadFile(dstFilePath) if err != nil { // handle situations when dstFilePath is 'file' @@ -2441,26 +2639,32 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f if isSysErrNotDir(err) && runtime.GOOS != globalWindowsOSName { // NOTE: On windows the error happens at // next line and returns appropriate error. - return 0, errFileAccessDenied + return res, errFileAccessDenied } if !osIsNotExist(err) { - return 0, osErrToFileErr(err) - } - // errFileNotFound comes here. - err = s.renameLegacyMetadata(dstVolumeDir, dstPath) - if err != nil && err != errFileNotFound { - return 0, err + return res, osErrToFileErr(err) } - if err == nil { - dstBuf, err = xioutil.ReadFile(dstFilePath) - if err != nil && !osIsNotExist(err) { - return 0, osErrToFileErr(err) + if formatLegacy { + // errFileNotFound comes here. + err = s.renameLegacyMetadata(dstVolumeDir, dstPath) + if err != nil && err != errFileNotFound { + return res, err + } + if err == nil { + dstBuf, err = xioutil.ReadFile(dstFilePath) + if err != nil && !osIsNotExist(err) { + return res, osErrToFileErr(err) + } } } } + // Preserve all the legacy data, could be slow, but at max there can be 10,000 parts. + currentDataPath := pathJoin(dstVolumeDir, dstPath) + var xlMeta xlMetaV2 var legacyPreserved bool + var legacyEntries []string if len(dstBuf) > 0 { if isXL2V1Format(dstBuf) { if err = xlMeta.Load(dstBuf); err != nil { @@ -2472,20 +2676,17 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f xlMetaLegacy := &xlMetaV1Object{} json := jsoniter.ConfigCompatibleWithStandardLibrary if err := json.Unmarshal(dstBuf, xlMetaLegacy); err != nil { - logger.LogOnceIf(ctx, err, "read-data-unmarshal-"+dstFilePath) + storageLogOnceIf(ctx, err, "read-data-unmarshal-"+dstFilePath) // Data appears corrupt. Drop data. } else { xlMetaLegacy.DataDir = legacyDataDir if err = xlMeta.AddLegacy(xlMetaLegacy); err != nil { - logger.LogOnceIf(ctx, err, "read-data-add-legacy-"+dstFilePath) + storageLogOnceIf(ctx, err, "read-data-add-legacy-"+dstFilePath) } legacyPreserved = true } } } else { - s.RLock() - formatLegacy := s.formatLegacy - s.RUnlock() // It is possible that some drives may not have `xl.meta` file // in such scenarios verify if at least `part.1` files exist // to verify for legacy version. @@ -2494,10 +2695,9 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f // from `xl.json` to `xl.meta`, we can avoid // one extra readdir operation here for all // new deployments. - currentDataPath := pathJoin(dstVolumeDir, dstPath) - entries, err := readDirN(currentDataPath, 1) + entries, err := readDir(currentDataPath) if err != nil && err != errFileNotFound { - return 0, osErrToFileErr(err) + return res, osErrToFileErr(err) } for _, entry := range entries { if entry == xlStorageFormatFile || strings.HasSuffix(entry, slashSeparator) { @@ -2505,70 +2705,63 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } if strings.HasPrefix(entry, "part.") { legacyPreserved = true + legacyEntries = entries break } } } } - legacyDataPath := pathJoin(dstVolumeDir, dstPath, legacyDataDir) - if legacyPreserved { - // Preserve all the legacy data, could be slow, but at max there can be 10,000 parts. - currentDataPath := pathJoin(dstVolumeDir, dstPath) - entries, err := readDir(currentDataPath) - if err != nil { - return 0, osErrToFileErr(err) - } - - // legacy data dir means its old content, honor system umask. - if err = mkdirAll(legacyDataPath, 0o777, dstVolumeDir); err != nil { - // any failed mkdir-calls delete them. - s.deleteFile(dstVolumeDir, legacyDataPath, true, false) - return 0, osErrToFileErr(err) - } - - for _, entry := range entries { - // Skip xl.meta renames further, also ignore any directories such as `legacyDataDir` - if entry == xlStorageFormatFile || strings.HasSuffix(entry, slashSeparator) { - continue + var legacyDataPath string + if formatLegacy { + legacyDataPath = pathJoin(dstVolumeDir, dstPath, legacyDataDir) + if legacyPreserved { + if contextCanceled(ctx) { + return res, ctx.Err() } - if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil { - // Any failed rename calls un-roll previous transaction. - s.deleteFile(dstVolumeDir, legacyDataPath, true, false) + if len(legacyEntries) > 0 { + // legacy data dir means its old content, honor system umask. + if err = mkdirAll(legacyDataPath, 0o777, dstVolumeDir); err != nil { + // any failed mkdir-calls delete them. + s.deleteFile(dstVolumeDir, legacyDataPath, true, false) + return res, osErrToFileErr(err) + } + for _, entry := range legacyEntries { + // Skip xl.meta renames further, also ignore any directories such as `legacyDataDir` + if entry == xlStorageFormatFile || strings.HasSuffix(entry, slashSeparator) { + continue + } - return 0, osErrToFileErr(err) + if err = Rename(pathJoin(currentDataPath, entry), pathJoin(legacyDataPath, entry)); err != nil { + // Any failed rename calls un-roll previous transaction. + s.deleteFile(dstVolumeDir, legacyDataPath, true, false) + + return res, osErrToFileErr(err) + } + } } } } - var oldDstDataPath, reqVID string + // Set skipParent to skip mkdirAll() calls for deeply nested objects + // - if its an overwrite + // - if its a versioned object + // + // This can potentiall reduce syscalls by strings.Split(path, "/") + // times relative to the object name. + skipParent := dstVolumeDir + if len(dstBuf) > 0 { + skipParent = pathutil.Dir(dstFilePath) + } + var reqVID string if fi.VersionID == "" { reqVID = nullVersionID } else { reqVID = fi.VersionID } - // Replace the data of null version or any other existing version-id - _, ver, err := xlMeta.findVersionStr(reqVID) - if err == nil { - dataDir := ver.getDataDir() - if dataDir != "" && (xlMeta.SharedDataDirCountStr(reqVID, dataDir) == 0) { - // Purge the destination path as we are not preserving anything - // versioned object was not requested. - oldDstDataPath = pathJoin(dstVolumeDir, dstPath, dataDir) - // if old destination path is same as new destination path - // there is nothing to purge, this is true in case of healing - // avoid setting oldDstDataPath at that point. - if oldDstDataPath == dstDataPath { - oldDstDataPath = "" - } else { - xlMeta.data.remove(reqVID, dataDir) - } - } - } - // Empty fi.VersionID indicates that versioning is either // suspended or disabled on this bucket. RenameData will replace // the 'null' version. We add a free-version to track its tiered @@ -2586,74 +2779,129 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } // indicates if RenameData() is called by healing. - // healing doesn't preserve the dataDir as 'legacy' - healing := fi.XLV1 && fi.DataDir != legacyDataDir + healing := fi.Healing() + + // Replace the data of null version or any other existing version-id + _, ver, err := xlMeta.findVersionStr(reqVID) + if err == nil { + dataDir := ver.getDataDir() + if dataDir != "" && (xlMeta.SharedDataDirCountStr(reqVID, dataDir) == 0) { + // Purge the destination path as we are not preserving anything + // versioned object was not requested. + res.OldDataDir = dataDir + if healing { + // if old destination path is same as new destination path + // there is nothing to purge, this is true in case of healing + // avoid setting OldDataDir at that point. + res.OldDataDir = "" + } else { + xlMeta.data.remove(reqVID, dataDir) + } + } + } if err = xlMeta.AddVersion(fi); err != nil { if legacyPreserved { // Any failed rename calls un-roll previous transaction. s.deleteFile(dstVolumeDir, legacyDataPath, true, false) } - return 0, err + return res, err } - var sbuf bytes.Buffer - for _, ver := range xlMeta.versions { - sbuf.Write(ver.header.Signature[:]) + if len(xlMeta.versions) <= 10 { + // any number of versions beyond this is excessive + // avoid healing such objects in this manner, let + // it heal during the regular scanner cycle. + dst := []byte{} + for _, ver := range xlMeta.versions { + dst = slices.Grow(dst, 16) + copy(dst[len(dst):], ver.header.VersionID[:]) + } + res.Sign = dst } - sign = xxh3.Hash(sbuf.Bytes()) - dstBuf, err = xlMeta.AppendTo(metaDataPoolGet()) - defer metaDataPoolPut(dstBuf) + newDstBuf, err := xlMeta.AppendTo(metaDataPoolGet()) + defer metaDataPoolPut(newDstBuf) if err != nil { if legacyPreserved { s.deleteFile(dstVolumeDir, legacyDataPath, true, false) } - return 0, errFileCorrupt + return res, errFileCorrupt } - if err = s.WriteAll(ctx, srcVolume, pathJoin(srcPath, xlStorageFormatFile), dstBuf); err != nil { + if contextCanceled(ctx) { + return res, ctx.Err() + } + + if err = s.WriteAll(ctx, srcVolume, pathJoin(srcPath, xlStorageFormatFile), newDstBuf); err != nil { if legacyPreserved { s.deleteFile(dstVolumeDir, legacyDataPath, true, false) } - return 0, osErrToFileErr(err) + return res, osErrToFileErr(err) } diskHealthCheckOK(ctx, err) - if srcDataPath != "" && len(fi.Data) == 0 && fi.Size > 0 { - // renameAll only for objects that have xl.meta not saved inline. - s.moveToTrash(dstDataPath, true, false) + notInline := srcDataPath != "" && len(fi.Data) == 0 && fi.Size > 0 + if notInline { if healing { + // renameAll only for objects that have xl.meta not saved inline. + // this must be done in healing only, otherwise it is expected + // that for fresh PutObject() call dstDataPath can never exist. + // if its an overwrite then the caller deletes the DataDir + // in a separate RPC call. + s.moveToTrash(dstDataPath, true, false) + // If we are healing we should purge any legacyDataPath content, // that was previously preserved during PutObject() call // on a versioned bucket. s.moveToTrash(legacyDataPath, true, false) } - if err = renameAll(srcDataPath, dstDataPath, dstVolumeDir); err != nil { + if contextCanceled(ctx) { + return res, ctx.Err() + } + if err = renameAll(srcDataPath, dstDataPath, skipParent); err != nil { if legacyPreserved { // Any failed rename calls un-roll previous transaction. s.deleteFile(dstVolumeDir, legacyDataPath, true, false) } + // if its a partial rename() do not attempt to delete recursively. s.deleteFile(dstVolumeDir, dstDataPath, false, false) - return 0, osErrToFileErr(err) + return res, osErrToFileErr(err) + } + diskHealthCheckOK(ctx, err) + } + + // If we have oldDataDir then we must preserve current xl.meta + // as backup, in-case needing renames(). + if res.OldDataDir != "" { + if contextCanceled(ctx) { + return res, ctx.Err() + } + + // preserve current xl.meta inside the oldDataDir. + if err = s.writeAll(ctx, dstVolume, pathJoin(dstPath, res.OldDataDir, xlStorageFormatFileBackup), dstBuf, true, skipParent); err != nil { + if legacyPreserved { + s.deleteFile(dstVolumeDir, legacyDataPath, true, false) + } + return res, osErrToFileErr(err) } + diskHealthCheckOK(ctx, err) + } + + if contextCanceled(ctx) { + return res, ctx.Err() } // Commit meta-file - if err = renameAll(srcFilePath, dstFilePath, dstVolumeDir); err != nil { + if err = renameAll(srcFilePath, dstFilePath, skipParent); err != nil { if legacyPreserved { // Any failed rename calls un-roll previous transaction. s.deleteFile(dstVolumeDir, legacyDataPath, true, false) } + // if its a partial rename() do not attempt to delete recursively. + // this can be healed since all parts are available. s.deleteFile(dstVolumeDir, dstDataPath, false, false) - return 0, osErrToFileErr(err) - } - - // additionally only purge older data at the end of the transaction of new data-dir - // movement, this is to ensure that previous data references can co-exist for - // any recoverability. - if oldDstDataPath != "" { - s.moveToTrash(oldDstDataPath, true, false) + return res, osErrToFileErr(err) } if srcVolume != minioMetaMultipartBucket { @@ -2664,7 +2912,85 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f } else { s.deleteFile(srcVolumeDir, pathutil.Dir(srcFilePath), true, false) } - return sign, nil + return res, nil +} + +// RenamePart - rename part path to destination path atomically, this is meant to be used +// only with multipart API +func (s *xlStorage) RenamePart(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string, meta []byte, skipParent string) (err error) { + srcVolumeDir, err := s.getVolDir(srcVolume) + if err != nil { + return err + } + dstVolumeDir, err := s.getVolDir(dstVolume) + if err != nil { + return err + } + if !skipAccessChecks(srcVolume) { + // Stat a volume entry. + if err = Access(srcVolumeDir); err != nil { + if osIsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + } + if !skipAccessChecks(dstVolume) { + if err = Access(dstVolumeDir); err != nil { + if osIsNotExist(err) { + return errVolumeNotFound + } else if isSysErrIO(err) { + return errFaultyDisk + } + return err + } + } + + srcIsDir := HasSuffix(srcPath, SlashSeparator) + dstIsDir := HasSuffix(dstPath, SlashSeparator) + // either source or destination is a directory return error. + if srcIsDir || dstIsDir { + return errFileAccessDenied + } + + srcFilePath := pathutil.Join(srcVolumeDir, srcPath) + if err = checkPathLength(srcFilePath); err != nil { + return err + } + + dstFilePath := pathutil.Join(dstVolumeDir, dstPath) + if err = checkPathLength(dstFilePath); err != nil { + return err + } + // when skipParent is from rpc. it’s ok for not adding another rpc HandlerID like HandlerRenamePart2 + // For this case, skipParent is empty, destBaseDir is equal to dstVolumeDir, that behavior is the same as the previous one + destBaseDir := pathutil.Join(dstVolumeDir, skipParent) + if err = checkPathLength(destBaseDir); err != nil { + return err + } + + if err = renameAll(srcFilePath, dstFilePath, destBaseDir); err != nil { + if isSysErrNotEmpty(err) || isSysErrNotDir(err) { + return errFileAccessDenied + } + err = osErrToFileErr(err) + if errors.Is(err, errFileNotFound) || errors.Is(err, errFileAccessDenied) { + return errUploadIDNotFound + } + return err + } + + if err = s.WriteAll(ctx, dstVolume, dstPath+".meta", meta); err != nil { + return osErrToFileErr(err) + } + + // Remove parent dir of the source file if empty + parentDir := pathutil.Dir(srcFilePath) + s.deleteFile(srcVolumeDir, parentDir, false, false) + + return nil } // RenameFile - rename source path to destination path atomically. @@ -2701,7 +3027,7 @@ func (s *xlStorage) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolum srcIsDir := HasSuffix(srcPath, SlashSeparator) dstIsDir := HasSuffix(dstPath, SlashSeparator) // Either src and dst have to be directories or files, else return error. - if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { + if (!srcIsDir || !dstIsDir) && (srcIsDir || dstIsDir) { return errFileAccessDenied } srcFilePath := pathutil.Join(srcVolumeDir, srcPath) @@ -2771,42 +3097,94 @@ func (s *xlStorage) bitrotVerify(ctx context.Context, partPath string, partSize return bitrotVerify(diskHealthReader(ctx, file), fi.Size(), partSize, algo, sum, shardSize) } -func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (err error) { +func (s *xlStorage) VerifyFile(ctx context.Context, volume, path string, fi FileInfo) (*CheckPartsResp, error) { volumeDir, err := s.getVolDir(volume) if err != nil { - return err + return nil, err } if !skipAccessChecks(volume) { // Stat a volume entry. if err = Access(volumeDir); err != nil { - return convertAccessError(err, errVolumeAccessDenied) + return nil, convertAccessError(err, errVolumeAccessDenied) } } + resp := CheckPartsResp{ + // By default, the result is unknown per part + Results: make([]int, len(fi.Parts)), + } + erasure := fi.Erasure - for _, part := range fi.Parts { + for i, part := range fi.Parts { checksumInfo := erasure.GetChecksumInfo(part.Number) partPath := pathJoin(volumeDir, path, fi.DataDir, fmt.Sprintf("part.%d", part.Number)) - if err := s.bitrotVerify(ctx, partPath, + err := s.bitrotVerify(ctx, partPath, erasure.ShardFileSize(part.Size), checksumInfo.Algorithm, - checksumInfo.Hash, erasure.ShardSize()); err != nil { - if !IsErr(err, []error{ - errFileNotFound, - errVolumeNotFound, - errFileCorrupt, - errFileAccessDenied, - errFileVersionNotFound, - }...) { - logger.GetReqInfo(ctx).AppendTags("disk", s.String()) - logger.LogOnceIf(ctx, err, partPath) - } - return err + checksumInfo.Hash, erasure.ShardSize()) + + resp.Results[i] = convPartErrToInt(err) + + // Only log unknown errors + if resp.Results[i] == checkPartUnknown && err != errFileAccessDenied { + logger.GetReqInfo(ctx).AppendTags("disk", s.String()) + storageLogOnceIf(ctx, err, partPath) } } - return nil + return &resp, nil +} + +func (s *xlStorage) ReadParts(ctx context.Context, volume string, partMetaPaths ...string) ([]*ObjectPartInfo, error) { + volumeDir, err := s.getVolDir(volume) + if err != nil { + return nil, err + } + + parts := make([]*ObjectPartInfo, len(partMetaPaths)) + for idx, partMetaPath := range partMetaPaths { + var partNumber int + fmt.Sscanf(pathutil.Base(partMetaPath), "part.%d.meta", &partNumber) + + if contextCanceled(ctx) { + parts[idx] = &ObjectPartInfo{ + Error: ctx.Err().Error(), + Number: partNumber, + } + continue + } + + if err := Access(pathJoin(volumeDir, pathutil.Dir(partMetaPath), fmt.Sprintf("part.%d", partNumber))); err != nil { + parts[idx] = &ObjectPartInfo{ + Error: err.Error(), + Number: partNumber, + } + continue + } + + data, err := s.readAllData(ctx, volume, volumeDir, pathJoin(volumeDir, partMetaPath)) + if err != nil { + parts[idx] = &ObjectPartInfo{ + Error: err.Error(), + Number: partNumber, + } + continue + } + + pinfo := &ObjectPartInfo{} + if _, err = pinfo.UnmarshalMsg(data); err != nil { + parts[idx] = &ObjectPartInfo{ + Error: err.Error(), + Number: partNumber, + } + continue + } + + parts[idx] = pinfo + } + diskHealthCheckOK(ctx, nil) + return parts, nil } // ReadMultiple will read multiple files and send each back as response. @@ -2827,15 +3205,17 @@ func (s *xlStorage) ReadMultiple(ctx context.Context, req ReadMultipleReq, resp Prefix: req.Prefix, File: f, } + var data []byte var mt time.Time + fullPath := pathJoin(volumeDir, req.Prefix, f) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) if err := w.Run(func() (err error) { if req.MetadataOnly { data, mt, err = s.readMetadataWithDMTime(ctx, fullPath) } else { - data, mt, err = s.readAllData(ctx, req.Bucket, volumeDir, fullPath, true) + data, mt, err = s.readAllDataWithDMTime(ctx, req.Bucket, volumeDir, fullPath) } return err }); err != nil { @@ -2938,7 +3318,7 @@ func (s *xlStorage) CleanAbandonedData(ctx context.Context, volume string, path } baseDir := pathJoin(volumeDir, path+slashSeparator) metaPath := pathutil.Join(baseDir, xlStorageFormatFile) - buf, _, err := s.readAllData(ctx, volume, volumeDir, metaPath, true) + buf, err := s.readAllData(ctx, volume, volumeDir, metaPath) if err != nil { return err } @@ -2982,8 +3362,11 @@ func (s *xlStorage) CleanAbandonedData(ctx context.Context, volume string, path // Do not abort on context errors. for dir := range foundDirs { toRemove := pathJoin(volumeDir, path, dir+SlashSeparator) - err := s.deleteFile(volumeDir, toRemove, true, true) + err = s.deleteFile(volumeDir, toRemove, true, true) diskHealthCheckOK(ctx, err) + if err != nil { + return err + } } // Do the same for inline data @@ -2991,32 +3374,36 @@ func (s *xlStorage) CleanAbandonedData(ctx context.Context, volume string, path if err != nil { return err } + // Clear and repopulate - for k := range foundDirs { - delete(foundDirs, k) - } + clear(foundDirs) + // Populate into map for _, k := range dirs { foundDirs[k] = struct{}{} } + // Delete all directories we expect to be there. for _, dir := range wantDirs { delete(foundDirs, dir) } + // Nothing to delete + if len(foundDirs) == 0 { + return nil + } + // Delete excessive inline entries. - if len(foundDirs) > 0 { - // Convert to slice. - dirs = dirs[:0] - for dir := range foundDirs { - dirs = append(dirs, dir) - } - if xl.data.remove(dirs...) { - newBuf, err := xl.AppendTo(metaDataPoolGet()) - if err == nil { - defer metaDataPoolPut(newBuf) - return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf) - } + // Convert to slice. + dirs = dirs[:0] + for dir := range foundDirs { + dirs = append(dirs, dir) + } + if xl.data.remove(dirs...) { + newBuf, err := xl.AppendTo(metaDataPoolGet()) + if err == nil { + defer metaDataPoolPut(newBuf) + return s.WriteAll(ctx, volume, pathJoin(path, xlStorageFormatFile), buf) } } return nil diff --git a/cmd/xl-storage_noatime_notsupported.go b/cmd/xl-storage_noatime_notsupported.go index ac6718ef6ac57..c9c91d624678f 100644 --- a/cmd/xl-storage_noatime_notsupported.go +++ b/cmd/xl-storage_noatime_notsupported.go @@ -1,5 +1,4 @@ -//go:build windows || darwin || freebsd -// +build windows darwin freebsd +//go:build !unix || darwin || freebsd // Copyright (c) 2015-2021 MinIO, Inc. // diff --git a/cmd/xl-storage_noatime_supported.go b/cmd/xl-storage_noatime_supported.go index eefeef87c6acf..efa75ff0ab864 100644 --- a/cmd/xl-storage_noatime_supported.go +++ b/cmd/xl-storage_noatime_supported.go @@ -1,5 +1,4 @@ -//go:build !windows && !darwin && !freebsd -// +build !windows,!darwin,!freebsd +//go:build unix && !darwin && !freebsd // Copyright (c) 2015-2021 MinIO, Inc. // @@ -22,12 +21,14 @@ package cmd import ( "os" + "syscall" ) var ( // Disallow updating access times - readMode = os.O_RDONLY | 0x40000 // O_NOATIME + // Add non-block to avoid syscall to attempt to set epoll on files. + readMode = os.O_RDONLY | 0x40000 | syscall.O_NONBLOCK // O_NOATIME // Write with data sync only used only for `xl.meta` writes - writeMode = 0x1000 // O_DSYNC + writeMode = 0x1000 | syscall.O_NONBLOCK // O_DSYNC ) diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index 1556394f3416e..c27647f249790 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -19,10 +19,9 @@ package cmd import ( "bytes" - "context" "crypto/rand" - "fmt" "io" + "net/url" "os" slashpath "path" "runtime" @@ -114,19 +113,35 @@ func TestIsValidVolname(t *testing.T) { } } +func newLocalXLStorage(path string) (*xlStorage, error) { + return newLocalXLStorageWithDiskIdx(path, 0) +} + +// Initialize a new storage disk. +func newLocalXLStorageWithDiskIdx(path string, diskIdx int) (*xlStorage, error) { + u := url.URL{Path: path} + return newXLStorage(Endpoint{ + URL: &u, + IsLocal: true, + PoolIdx: 0, + SetIdx: 0, + DiskIdx: diskIdx, + }, true) +} + // creates a temp dir and sets up xlStorage layer. // returns xlStorage layer, temp dir path to be used for the purpose of tests. func newXLStorageTestSetup(tb testing.TB) (*xlStorageDiskIDCheck, string, error) { diskPath := tb.TempDir() // Initialize a new xlStorage layer. - storage, err := newLocalXLStorage(diskPath) + storage, err := newLocalXLStorageWithDiskIdx(diskPath, 3) if err != nil { return nil, "", err } // Create a sample format.json file - if err = storage.WriteAll(context.Background(), minioMetaBucket, formatConfigFile, []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"da017d62-70e3-45f1-8a1a-587707e69ad1","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","da017d62-70e3-45f1-8a1a-587707e69ad1","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`)); err != nil { + if err = storage.WriteAll(tb.Context(), minioMetaBucket, formatConfigFile, []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"da017d62-70e3-45f1-8a1a-587707e69ad1","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","da017d62-70e3-45f1-8a1a-587707e69ad1","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`)); err != nil { return nil, "", err } @@ -141,22 +156,22 @@ func createPermDeniedFile(t *testing.T) (permDeniedDir string) { permDeniedDir = t.TempDir() if err = os.Mkdir(slashpath.Join(permDeniedDir, "mybucket"), 0o775); err != nil { - t.Fatalf(fmt.Sprintf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err)) + t.Fatalf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err) } if err = os.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil { - t.Fatalf(fmt.Sprintf("Unable to create file %v. %v", slashpath.Join(permDeniedDir, "mybucket", "myobject"), err)) + t.Fatalf("Unable to create file %v. %v", slashpath.Join(permDeniedDir, "mybucket", "myobject"), err) } if err = os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0o400); err != nil { - t.Fatalf(fmt.Sprintf("Unable to change permission to temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err)) + t.Fatalf("Unable to change permission to temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err) } t.Cleanup(func() { os.Chmod(slashpath.Join(permDeniedDir, "mybucket"), 0o775) }) if err = os.Chmod(permDeniedDir, 0o400); err != nil { - t.Fatalf(fmt.Sprintf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err)) + t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } t.Cleanup(func() { os.Chmod(permDeniedDir, 0o775) @@ -179,7 +194,7 @@ func TestXLStorageGetDiskInfo(t *testing.T) { // Check test cases. for _, testCase := range testCases { - if _, err := getDiskInfo(testCase.diskPath); err != testCase.expectedErr { + if _, _, err := getDiskInfo(testCase.diskPath); err != testCase.expectedErr { t.Fatalf("expected: %s, got: %s", testCase.expectedErr, err) } } @@ -190,7 +205,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) { // Should give false on non-existent directory. dir1 := slashpath.Join(tmp, "non-existent-directory") - if isDirEmpty(dir1) { + if isDirEmpty(dir1, true) { t.Error("expected false for non-existent directory, got true") } @@ -201,7 +216,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) { t.Fatal(err) } - if isDirEmpty(dir2) { + if isDirEmpty(dir2, true) { t.Error("expected false for a file, got true") } @@ -212,7 +227,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) { t.Fatal(err) } - if !isDirEmpty(dir3) { + if !isDirEmpty(dir3, true) { t.Error("expected true for empty dir, got false") } } @@ -227,15 +242,15 @@ func TestXLStorageReadVersionLegacy(t *testing.T) { } // Create files for the test cases. - if err = xlStorage.MakeVol(context.Background(), "exists-legacy"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "exists-legacy"); err != nil { t.Fatalf("Unable to create a volume \"exists-legacy\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists-legacy", "as-file/xl.json", []byte(legacyJSON)); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists-legacy", "as-file/xl.json", []byte(legacyJSON)); err != nil { t.Fatalf("Unable to create a file \"as-file\", %s", err) } - fi, err := xlStorage.ReadVersion(context.Background(), "", "exists-legacy", "as-file", "", ReadOptions{}) + fi, err := xlStorage.ReadVersion(t.Context(), "", "exists-legacy", "as-file", "", ReadOptions{}) if err != nil { t.Fatalf("Unable to read older 'xl.json' content: %s", err) } @@ -254,22 +269,22 @@ func TestXLStorageReadVersion(t *testing.T) { } xlMeta, _ := os.ReadFile("testdata/xl.meta") - fi, _ := getFileInfo(xlMeta, "exists", "as-file", "", false, true) + fi, _ := getFileInfo(xlMeta, "exists", "as-file", "", fileInfoOpts{Data: false}) // Create files for the test cases. - if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "exists"); err != nil { t.Fatalf("Unable to create a volume \"exists\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-directory/as-file/xl.meta", xlMeta); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-directory/as-file/xl.meta", xlMeta); err != nil { t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-file/xl.meta", xlMeta); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-file/xl.meta", xlMeta); err != nil { t.Fatalf("Unable to create a file \"as-file\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-file-parent/xl.meta", xlMeta); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-file-parent/xl.meta", xlMeta); err != nil { t.Fatalf("Unable to create a file \"as-file-parent\", %s", err) } - if err = xlStorage.MakeVol(context.Background(), "exists/as-file/"+fi.DataDir); err != nil { + if err = xlStorage.MakeVol(t.Context(), "exists/as-file/"+fi.DataDir); err != nil { t.Fatalf("Unable to create a dataDir %s, %s", fi.DataDir, err) } @@ -325,7 +340,7 @@ func TestXLStorageReadVersion(t *testing.T) { // Run through all the test cases and validate for ReadVersion. for i, testCase := range testCases { - _, err = xlStorage.ReadVersion(context.Background(), "", testCase.volume, testCase.path, "", ReadOptions{}) + _, err = xlStorage.ReadVersion(t.Context(), "", testCase.volume, testCase.path, "", ReadOptions{}) if err != testCase.err { t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) } @@ -341,16 +356,16 @@ func TestXLStorageReadAll(t *testing.T) { } // Create files for the test cases. - if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "exists"); err != nil { t.Fatalf("Unable to create a volume \"exists\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-directory/as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-directory/as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-directory/as-file\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-file", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-file", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file\", %s", err) } - if err = xlStorage.AppendFile(context.Background(), "exists", "as-file-parent", []byte("Hello, World")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "exists", "as-file-parent", []byte("Hello, World")); err != nil { t.Fatalf("Unable to create a file \"as-file-parent\", %s", err) } @@ -407,7 +422,7 @@ func TestXLStorageReadAll(t *testing.T) { var dataRead []byte // Run through all the test cases and validate for ReadAll. for i, testCase := range testCases { - dataRead, err = xlStorage.ReadAll(context.Background(), testCase.volume, testCase.path) + dataRead, err = xlStorage.ReadAll(t.Context(), testCase.volume, testCase.path) if err != testCase.err { t.Errorf("TestXLStorage %d: Expected err \"%v\", got err \"%v\"", i+1, testCase.err, err) continue @@ -513,7 +528,7 @@ func TestXLStorageMakeVol(t *testing.T) { } for i, testCase := range testCases { - if err := xlStorage.MakeVol(context.Background(), testCase.volName); err != testCase.expectedErr { + if err := xlStorage.MakeVol(t.Context(), testCase.volName); err != testCase.expectedErr { t.Fatalf("TestXLStorage %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } } @@ -545,7 +560,7 @@ func TestXLStorageMakeVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err := xlStorageNew.MakeVol(context.Background(), "test-vol"); err != errDiskAccessDenied { + if err := xlStorageNew.MakeVol(t.Context(), "test-vol"); err != errDiskAccessDenied { t.Fatalf("expected: %s, got: %s", errDiskAccessDenied, err) } } @@ -560,7 +575,7 @@ func TestXLStorageDeleteVol(t *testing.T) { } // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -604,7 +619,7 @@ func TestXLStorageDeleteVol(t *testing.T) { } for i, testCase := range testCases { - if err = xlStorage.DeleteVol(context.Background(), testCase.volName, false); err != testCase.expectedErr { + if err = xlStorage.DeleteVol(t.Context(), testCase.volName, false); err != testCase.expectedErr { t.Fatalf("TestXLStorage: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } @@ -646,7 +661,7 @@ func TestXLStorageDeleteVol(t *testing.T) { t.Fatalf("Unable to change permission to temporary directory %v. %v", permDeniedDir, err) } - if err = xlStorageNew.DeleteVol(context.Background(), "mybucket", false); err != errDiskAccessDenied { + if err = xlStorageNew.DeleteVol(t.Context(), "mybucket", false); err != errDiskAccessDenied { t.Fatalf("expected: Permission error, got: %s", err) } } @@ -660,7 +675,7 @@ func TestXLStorageDeleteVol(t *testing.T) { // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = xlStorageDeletedStorage.DeleteVol(context.Background(), "Del-Vol", false) + err = xlStorageDeletedStorage.DeleteVol(t.Context(), "Del-Vol", false) if err != errDiskNotFound { t.Errorf("Expected: \"Drive not found\", got \"%s\"", err) } @@ -675,7 +690,7 @@ func TestXLStorageStatVol(t *testing.T) { } // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -702,7 +717,7 @@ func TestXLStorageStatVol(t *testing.T) { for i, testCase := range testCases { var volInfo VolInfo - volInfo, err = xlStorage.StatVol(context.Background(), testCase.volName) + volInfo, err = xlStorage.StatVol(t.Context(), testCase.volName) if err != testCase.expectedErr { t.Fatalf("TestXLStorage case : %d, Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } @@ -724,7 +739,7 @@ func TestXLStorageStatVol(t *testing.T) { // TestXLStorage for delete on an removed disk. // should fail with disk not found. - _, err = xlStorageDeletedStorage.StatVol(context.Background(), "Stat vol") + _, err = xlStorageDeletedStorage.StatVol(t.Context(), "Stat vol") if err != errDiskNotFound { t.Errorf("Expected: \"Drive not found\", got \"%s\"", err) } @@ -740,18 +755,18 @@ func TestXLStorageListVols(t *testing.T) { var volInfos []VolInfo // TestXLStorage empty list vols. - if volInfos, err = xlStorage.ListVols(context.Background()); err != nil { + if volInfos, err = xlStorage.ListVols(t.Context()); err != nil { t.Fatalf("expected: , got: %s", err) } else if len(volInfos) != 1 { - t.Fatalf("expected: one entry, got: %s", volInfos) + t.Fatalf("expected: one entry, got: %v", volInfos) } // TestXLStorage non-empty list vols. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - volInfos, err = xlStorage.ListVols(context.Background()) + volInfos, err = xlStorage.ListVols(t.Context()) if err != nil { t.Fatalf("expected: , got: %s", err) } @@ -772,7 +787,7 @@ func TestXLStorageListVols(t *testing.T) { // removing the path and simulating disk failure os.RemoveAll(path) // should fail with errDiskNotFound. - if _, err = xlStorage.ListVols(context.Background()); err != errDiskNotFound { + if _, err = xlStorage.ListVols(t.Context()); err != errDiskNotFound { t.Errorf("Expected to fail with \"%s\", but instead failed with \"%s\"", errDiskNotFound, err) } } @@ -793,13 +808,13 @@ func TestXLStorageListDir(t *testing.T) { // removing the disk, used to recreate disk not found error. os.RemoveAll(diskPath) // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = xlStorage.AppendFile(context.Background(), "success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "success-vol", "abc/def/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = xlStorage.AppendFile(context.Background(), "success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "success-vol", "abc/xyz/ghi/success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -858,7 +873,7 @@ func TestXLStorageListDir(t *testing.T) { for i, testCase := range testCases { var dirList []string - dirList, err = xlStorage.ListDir(context.Background(), "", testCase.srcVol, testCase.srcPath, -1) + dirList, err = xlStorage.ListDir(t.Context(), "", testCase.srcVol, testCase.srcPath, -1) if err != testCase.expectedErr { t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } @@ -890,7 +905,7 @@ func TestXLStorageListDir(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{ + if err = xlStorageNew.Delete(t.Context(), "mybucket", "myobject", DeleteOptions{ Recursive: false, Immediate: false, }); err != errFileAccessDenied { @@ -900,7 +915,7 @@ func TestXLStorageListDir(t *testing.T) { // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{ + err = xlStorageDeletedStorage.Delete(t.Context(), "del-vol", "my-file", DeleteOptions{ Recursive: false, Immediate: false, }) @@ -922,17 +937,17 @@ func TestXLStorageDeleteFile(t *testing.T) { } // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err = xlStorage.AppendFile(context.Background(), "success-vol", "success-file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "success-vol", "success-file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err = xlStorage.MakeVol(context.Background(), "no-permissions"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "no-permissions"); err != nil { t.Fatalf("Unable to create volume, %s", err.Error()) } - if err = xlStorage.AppendFile(context.Background(), "no-permissions", "dir/file", []byte("Hello, world")); err != nil { + if err = xlStorage.AppendFile(t.Context(), "no-permissions", "dir/file", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err.Error()) } // Parent directory must have write permissions, this is read + execute. @@ -986,7 +1001,7 @@ func TestXLStorageDeleteFile(t *testing.T) { } for i, testCase := range testCases { - if err = xlStorage.Delete(context.Background(), testCase.srcVol, testCase.srcPath, DeleteOptions{ + if err = xlStorage.Delete(t.Context(), testCase.srcVol, testCase.srcPath, DeleteOptions{ Recursive: false, Immediate: false, }); err != testCase.expectedErr { @@ -1013,7 +1028,7 @@ func TestXLStorageDeleteFile(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = xlStorageNew.Delete(context.Background(), "mybucket", "myobject", DeleteOptions{ + if err = xlStorageNew.Delete(t.Context(), "mybucket", "myobject", DeleteOptions{ Recursive: false, Immediate: false, }); err != errFileAccessDenied { @@ -1034,7 +1049,7 @@ func TestXLStorageDeleteFile(t *testing.T) { // TestXLStorage for delete on an removed disk. // should fail with disk not found. - err = xlStorageDeletedStorage.Delete(context.Background(), "del-vol", "my-file", DeleteOptions{ + err = xlStorageDeletedStorage.Delete(t.Context(), "del-vol", "my-file", DeleteOptions{ Recursive: false, Immediate: false, }) @@ -1053,7 +1068,7 @@ func TestXLStorageReadFile(t *testing.T) { volume := "success-vol" // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), volume); err != nil { + if err = xlStorage.MakeVol(t.Context(), volume); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1140,7 +1155,7 @@ func TestXLStorageReadFile(t *testing.T) { v := NewBitrotVerifier(SHA256, getSHA256Sum([]byte("hello, world"))) // Create test files for further reading. for i, appendFile := range appendFiles { - err = xlStorage.AppendFile(context.Background(), volume, appendFile.fileName, []byte("hello, world")) + err = xlStorage.AppendFile(t.Context(), volume, appendFile.fileName, []byte("hello, world")) if err != appendFile.expectedErr { t.Fatalf("Creating file failed: %d %#v, expected: %s, got: %s", i+1, appendFile, appendFile.expectedErr, err) } @@ -1149,18 +1164,18 @@ func TestXLStorageReadFile(t *testing.T) { { buf := make([]byte, 5) // Test for negative offset. - if _, err = xlStorage.ReadFile(context.Background(), volume, "myobject", -1, buf, v); err == nil { + if _, err = xlStorage.ReadFile(t.Context(), volume, "myobject", -1, buf, v); err == nil { t.Fatalf("expected: error, got: ") } } - for l := 0; l < 2; l++ { + for range 2 { // Following block validates all ReadFile test cases. for i, testCase := range testCases { var n int64 // Common read buffer. buf := make([]byte, testCase.bufSize) - n, err = xlStorage.ReadFile(context.Background(), testCase.volume, testCase.fileName, testCase.offset, buf, v) + n, err = xlStorage.ReadFile(t.Context(), testCase.volume, testCase.fileName, testCase.offset, buf, v) if err != nil && testCase.expectedErr != nil { // Validate if the type string of the errors are an exact match. if err.Error() != testCase.expectedErr.Error() { @@ -1178,7 +1193,7 @@ func TestXLStorageReadFile(t *testing.T) { expectErrno = uintptr(errno) } } - if !(expectErrno != 0 && resultErrno != 0 && expectErrno == resultErrno) { + if expectErrno == 0 || resultErrno == 0 || expectErrno != resultErrno { t.Errorf("Case: %d %#v, expected: %s, got: %s", i+1, testCase, testCase.expectedErr, err) } } @@ -1236,7 +1251,7 @@ func TestXLStorageReadFile(t *testing.T) { // Common read buffer. buf := make([]byte, 10) - if _, err = xlStoragePermStorage.ReadFile(context.Background(), "mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { + if _, err = xlStoragePermStorage.ReadFile(t.Context(), "mybucket", "myobject", 0, buf, v); err != errFileAccessDenied { t.Errorf("expected: %s, got: %s", errFileAccessDenied, err) } } @@ -1277,14 +1292,14 @@ func TestXLStorageReadFileWithVerify(t *testing.T) { if err != nil { t.Fatalf("Unable to create xlStorage test setup, %s", err) } - if err = xlStorage.MakeVol(context.Background(), volume); err != nil { + if err = xlStorage.MakeVol(t.Context(), volume); err != nil { t.Fatalf("Unable to create volume %s: %v", volume, err) } data := make([]byte, 8*1024) if _, err = io.ReadFull(rand.Reader, data); err != nil { t.Fatalf("Unable to create generate random data: %v", err) } - if err = xlStorage.AppendFile(context.Background(), volume, object, data); err != nil { + if err = xlStorage.AppendFile(t.Context(), volume, object, data); err != nil { t.Fatalf("Unable to create object: %v", err) } @@ -1296,7 +1311,7 @@ func TestXLStorageReadFileWithVerify(t *testing.T) { } buffer := make([]byte, test.length) - n, err := xlStorage.ReadFile(context.Background(), volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) + n, err := xlStorage.ReadFile(t.Context(), volume, test.file, int64(test.offset), buffer, NewBitrotVerifier(test.algorithm, h.Sum(nil))) switch { case err == nil && test.expError != nil: @@ -1319,7 +1334,7 @@ func TestXLStorageFormatFileChange(t *testing.T) { t.Fatalf("Unable to create xlStorage test setup, %s", err) } - if err = xlStorage.MakeVol(context.Background(), volume); err != nil { + if err = xlStorage.MakeVol(t.Context(), volume); err != nil { t.Fatalf("MakeVol failed with %s", err) } @@ -1328,7 +1343,7 @@ func TestXLStorageFormatFileChange(t *testing.T) { t.Fatalf("ioutil.WriteFile failed with %s", err) } - err = xlStorage.MakeVol(context.Background(), volume) + err = xlStorage.MakeVol(t.Context(), volume) if err != errVolumeExists { t.Fatalf("MakeVol expected to fail with errDiskNotFound but failed with %s", err) } @@ -1343,7 +1358,7 @@ func TestXLStorageAppendFile(t *testing.T) { } // Setup test environment. - if err = xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err = xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } @@ -1373,7 +1388,7 @@ func TestXLStorageAppendFile(t *testing.T) { } for i, testCase := range testCases { - if err = xlStorage.AppendFile(context.Background(), "success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { + if err = xlStorage.AppendFile(t.Context(), "success-vol", testCase.fileName, []byte("hello, world")); err != testCase.expectedErr { t.Errorf("Case: %d, expected: %s, got: %s", i+1, testCase.expectedErr, err) } } @@ -1398,14 +1413,14 @@ func TestXLStorageAppendFile(t *testing.T) { t.Fatalf("Unable to initialize xlStorage, %s", err) } - if err = xlStoragePermStorage.AppendFile(context.Background(), "mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { + if err = xlStoragePermStorage.AppendFile(t.Context(), "mybucket", "myobject", []byte("hello, world")); err != errFileAccessDenied { t.Fatalf("expected: errFileAccessDenied error, got: %s", err) } } // TestXLStorage case with invalid volume name. // A valid volume name should be at least of size 3. - err = xlStorage.AppendFile(context.Background(), "bn", "yes", []byte("hello, world")) + err = xlStorage.AppendFile(t.Context(), "bn", "yes", []byte("hello, world")) if err != errVolumeNotFound { t.Fatalf("expected: \"Invalid argument error\", got: \"%s\"", err) } @@ -1420,32 +1435,32 @@ func TestXLStorageRenameFile(t *testing.T) { } // Setup test environment. - if err := xlStorage.MakeVol(context.Background(), "src-vol"); err != nil { + if err := xlStorage.MakeVol(t.Context(), "src-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := xlStorage.MakeVol(context.Background(), "dest-vol"); err != nil { + if err := xlStorage.MakeVol(t.Context(), "dest-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "file2", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "file2", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "file3", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "file3", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "file4", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "file4", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "file5", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "file5", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "src-vol", "path/to/file1", []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "src-vol", "path/to/file1", []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } @@ -1622,7 +1637,7 @@ func TestXLStorageRenameFile(t *testing.T) { } for i, testCase := range testCases { - if err := xlStorage.RenameFile(context.Background(), testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { + if err := xlStorage.RenameFile(t.Context(), testCase.srcVol, testCase.srcPath, testCase.destVol, testCase.destPath); err != testCase.expectedErr { t.Fatalf("TestXLStorage %d: Expected the error to be : \"%v\", got: \"%v\".", i+1, testCase.expectedErr, err) } } @@ -1635,7 +1650,7 @@ func TestXLStorageDeleteVersion(t *testing.T) { if err != nil { t.Fatalf("Unable to create xlStorage test setup, %s", err) } - ctx := context.Background() + ctx := t.Context() volume := "myvol-vol" object := "my-object" @@ -1728,19 +1743,19 @@ func TestXLStorageStatInfoFile(t *testing.T) { } // Setup test environment. - if err := xlStorage.MakeVol(context.Background(), "success-vol"); err != nil { + if err := xlStorage.MakeVol(t.Context(), "success-vol"); err != nil { t.Fatalf("Unable to create volume, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "success-vol", pathJoin("success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.AppendFile(context.Background(), "success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { + if err := xlStorage.AppendFile(t.Context(), "success-vol", pathJoin("path/to/success-file", xlStorageFormatFile), []byte("Hello, world")); err != nil { t.Fatalf("Unable to create file, %s", err) } - if err := xlStorage.MakeVol(context.Background(), "success-vol/path/to/"+xlStorageFormatFile); err != nil { + if err := xlStorage.MakeVol(t.Context(), "success-vol/path/to/"+xlStorageFormatFile); err != nil { t.Fatalf("Unable to create path, %s", err) } @@ -1801,7 +1816,7 @@ func TestXLStorageStatInfoFile(t *testing.T) { } for i, testCase := range testCases { - _, err := xlStorage.StatInfoFile(context.Background(), testCase.srcVol, testCase.srcPath+"/"+xlStorageFormatFile, false) + _, err := xlStorage.StatInfoFile(t.Context(), testCase.srcVol, testCase.srcPath+"/"+xlStorageFormatFile, false) if err != testCase.expectedErr { t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) } @@ -1824,7 +1839,7 @@ func TestXLStorageVerifyFile(t *testing.T) { volName := "testvol" fileName := "testfile" - if err := storage.MakeVol(context.Background(), volName); err != nil { + if err := storage.MakeVol(t.Context(), volName); err != nil { t.Fatal(err) } @@ -1838,29 +1853,29 @@ func TestXLStorageVerifyFile(t *testing.T) { h := algo.New() h.Write(data) hashBytes := h.Sum(nil) - if err := storage.WriteAll(context.Background(), volName, fileName, data); err != nil { + if err := storage.WriteAll(t.Context(), volName, fileName, data); err != nil { t.Fatal(err) } - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err != nil { t.Fatal(err) } // 2) Whole-file bitrot check on corrupted file - if err := storage.AppendFile(context.Background(), volName, fileName, []byte("a")); err != nil { + if err := storage.AppendFile(t.Context(), volName, fileName, []byte("a")); err != nil { t.Fatal(err) } // Check if VerifyFile reports the incorrect file length (the correct length is `size+1`) - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } // Check if bitrot fails - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size+1, algo, hashBytes, 0); err == nil { t.Fatal("expected to fail bitrot check") } - if err := storage.Delete(context.Background(), volName, fileName, DeleteOptions{ + if err := storage.Delete(t.Context(), volName, fileName, DeleteOptions{ Recursive: false, Immediate: false, }); err != nil { @@ -1888,7 +1903,7 @@ func TestXLStorageVerifyFile(t *testing.T) { t.Fatal(err) } w.(io.Closer).Close() - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err != nil { t.Fatal(err) } @@ -1903,10 +1918,10 @@ func TestXLStorageVerifyFile(t *testing.T) { t.Fatal(err) } f.Close() - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } - if err := storage.storage.bitrotVerify(context.Background(), pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil { + if err := storage.storage.bitrotVerify(t.Context(), pathJoin(path, volName, fileName), size+1, algo, nil, shardSize); err == nil { t.Fatal("expected to fail bitrot check") } } @@ -1921,8 +1936,8 @@ func TestXLStorageReadMetadata(t *testing.T) { t.Fatal(err) } - disk.MakeVol(context.Background(), volume) - if _, err := disk.readMetadata(context.Background(), pathJoin(tmpDir, volume, object)); err != errFileNameTooLong { + disk.MakeVol(t.Context(), volume) + if _, err := disk.readMetadata(t.Context(), pathJoin(tmpDir, volume, object)); err != errFileNameTooLong { t.Fatalf("Unexpected error from readMetadata - expect %v: got %v", errFileNameTooLong, err) } } diff --git a/cmd/xl-storage_unix_test.go b/cmd/xl-storage_unix_test.go index a164ab29ed11c..cec6990bfd6f8 100644 --- a/cmd/xl-storage_unix_test.go +++ b/cmd/xl-storage_unix_test.go @@ -21,7 +21,6 @@ package cmd import ( - "context" "os" "path" "syscall" @@ -55,7 +54,7 @@ func TestIsValidUmaskVol(t *testing.T) { // Attempt to create a volume to verify the permissions later. // MakeVol creates 0777. - if err = disk.MakeVol(context.Background(), testCase.volName); err != nil { + if err = disk.MakeVol(t.Context(), testCase.volName); err != nil { t.Fatalf("Creating a volume failed with %s expected to pass.", err) } @@ -93,18 +92,18 @@ func TestIsValidUmaskFile(t *testing.T) { // Attempt to create a volume to verify the permissions later. // MakeVol creates directory with 0777 perms. - if err = disk.MakeVol(context.Background(), testCase.volName); err != nil { + if err = disk.MakeVol(t.Context(), testCase.volName); err != nil { t.Fatalf("Creating a volume failed with %s expected to pass.", err) } // Attempt to create a file to verify the permissions later. // AppendFile creates file with 0666 perms. - if err = disk.AppendFile(context.Background(), testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil { + if err = disk.AppendFile(t.Context(), testCase.volName, pathJoin("hello-world.txt", xlStorageFormatFile), []byte("Hello World")); err != nil { t.Fatalf("Create a file `test` failed with %s expected to pass.", err) } // CheckFile - stat the file. - if _, err := disk.StatInfoFile(context.Background(), testCase.volName, "hello-world.txt/"+xlStorageFormatFile, false); err != nil { + if _, err := disk.StatInfoFile(t.Context(), testCase.volName, "hello-world.txt/"+xlStorageFormatFile, false); err != nil { t.Fatalf("Stat failed with %s expected to pass.", err) } } diff --git a/docker-buildx.sh b/docker-buildx.sh index 2ebc98e6f71c9..84acf7a53d761 100755 --- a/docker-buildx.sh +++ b/docker-buildx.sh @@ -1,35 +1,69 @@ #!/bin/bash -sudo sysctl net.ipv6.conf.all.disable_ipv6=0 +set -ex -release=$(git describe --abbrev=0 --tags) +function _init() { + ## All binaries are static make sure to disable CGO. + export CGO_ENABLED=0 + export CRED_DIR="/media/${USER}/minio" -docker buildx build --push --no-cache \ - --build-arg RELEASE="${release}" \ - -t "minio/minio:latest" \ - -t "quay.io/minio/minio:latest" \ - -t "minio/minio:${release}" \ - -t "quay.io/minio/minio:${release}" \ - --platform=linux/arm64,linux/amd64,linux/ppc64le,linux/s390x \ - -f Dockerfile.release . + ## List of architectures and OS to test coss compilation. + SUPPORTED_OSARCH="linux/ppc64le linux/amd64 linux/arm64" -docker buildx prune -f + remote=$(git remote get-url upstream) + if test "$remote" != "git@github.com:minio/minio.git"; then + echo "Script requires that the 'upstream' remote is set to git@github.com:minio/minio.git" + exit 1 + fi -docker buildx build --push --no-cache \ - --build-arg RELEASE="${release}" \ - -t "minio/minio:${release}-cpuv1" \ - -t "quay.io/minio/minio:${release}-cpuv1" \ - --platform=linux/arm64,linux/amd64,linux/ppc64le,linux/s390x \ - -f Dockerfile.release.old_cpu . + git remote update upstream && git checkout master && git rebase upstream/master -docker buildx prune -f + release=$(git describe --abbrev=0 --tags) + export release +} -docker buildx build --push --no-cache \ - --build-arg RELEASE="${release}" \ - -t "minio/minio:${release}.fips" \ - -t "quay.io/minio/minio:${release}.fips" \ - --platform=linux/amd64 -f Dockerfile.release.fips . +function _build() { + local osarch=$1 + IFS=/ read -r -a arr <<<"$osarch" + os="${arr[0]}" + arch="${arr[1]}" + package=$(go list -f '{{.ImportPath}}') + printf -- "--> %15s:%s\n" "${osarch}" "${package}" -docker buildx prune -f + # go build -trimpath to build the binary. + export GOOS=$os + export GOARCH=$arch + export MINIO_RELEASE=RELEASE + LDFLAGS=$(go run buildscripts/gen-ldflags.go) + go build -tags kqueue -trimpath --ldflags "${LDFLAGS}" -o ./minio-${arch}.${release} + minisign -qQSm ./minio-${arch}.${release} -s "$CRED_DIR/minisign.key" <"$CRED_DIR/minisign-passphrase" -sudo sysctl net.ipv6.conf.all.disable_ipv6=0 + sha256sum_str=$(sha256sum <./minio-${arch}.${release}) + rc=$? + if [ "$rc" -ne 0 ]; then + abort "unable to generate sha256sum for ${1}" + fi + echo "${sha256sum_str// -/minio.${release}}" >./minio-${arch}.${release}.sha256sum +} + +function main() { + echo "Testing builds for OS/Arch: ${SUPPORTED_OSARCH}" + for each_osarch in ${SUPPORTED_OSARCH}; do + _build "${each_osarch}" + done + + sudo sysctl net.ipv6.conf.all.disable_ipv6=0 + + docker buildx build --push --no-cache \ + --build-arg RELEASE="${release}" \ + -t "registry.min.dev/community/minio:latest" \ + -t "registry.min.dev/community/minio:${release}" \ + --platform=linux/arm64,linux/amd64,linux/ppc64le \ + -f Dockerfile . + + docker buildx prune -f + + sudo sysctl net.ipv6.conf.all.disable_ipv6=0 +} + +_init && main "$@" diff --git a/dockerscripts/download-static-curl.sh b/dockerscripts/download-static-curl.sh new file mode 100644 index 0000000000000..0f12464c8ce64 --- /dev/null +++ b/dockerscripts/download-static-curl.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +function download_arch_specific_executable { + curl -f -L -s -q \ + https://github.com/moparisthebest/static-curl/releases/latest/download/curl-$1 \ + -o /go/bin/curl || exit 1 + chmod +x /go/bin/curl +} + +case $TARGETARCH in +"arm64") + download_arch_specific_executable aarch64 + ;; +"s390x") + echo "Not downloading static cURL because it does not exist for the $TARGETARCH architecture." + ;; +*) + download_arch_specific_executable "$TARGETARCH" + ;; +esac diff --git a/docs/auditlog/auditlog-echo.go b/docs/auditlog/auditlog-echo.go new file mode 100644 index 0000000000000..1fb8948911199 --- /dev/null +++ b/docs/auditlog/auditlog-echo.go @@ -0,0 +1,62 @@ +//go:build ignore +// +build ignore + +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "net/http" +) + +var port int + +func init() { + flag.IntVar(&port, "port", 8080, "Port to listen on") +} + +func mainHandler(w http.ResponseWriter, r *http.Request) { + body, err := io.ReadAll(r.Body) + defer r.Body.Close() + if err != nil { + log.Printf("Error reading request body: %v", err) + w.WriteHeader(http.StatusBadRequest) + return + } + + log.Printf(">>> %s %s\n", r.Method, r.URL.Path) + var out bytes.Buffer + json.Indent(&out, body, "", " ") + log.Printf("%s\n", out.String()) + + w.WriteHeader(http.StatusOK) +} + +func main() { + flag.Parse() + http.HandleFunc("/", mainHandler) + + log.Printf("Listening on :%d\n", port) + log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), nil)) +} diff --git a/docs/auditlog/auditlog-echo.md b/docs/auditlog/auditlog-echo.md new file mode 100644 index 0000000000000..4d0ab3267c148 --- /dev/null +++ b/docs/auditlog/auditlog-echo.md @@ -0,0 +1,17 @@ +# `auditlog-echo`: A tool to view MinIO Audit logs on the console + +1. Run the tool with: + +``` +go run docs/auditlog/auditlog-echo.go +``` + +The listen port has a default value (8080), but can be set with the `-port` flag. + +2. Configure audit logging in MinIO with for example: + +``` +mc admin config set myminio audit_webhook enable=on endpoint=http://localhost:8080 +``` + +3. Make any requests to MinIO and see audit logs printed to the tool's console. diff --git a/docs/bigdata/README.md b/docs/bigdata/README.md index d77c5160a7a03..39a7efe5cb470 100644 --- a/docs/bigdata/README.md +++ b/docs/bigdata/README.md @@ -16,7 +16,7 @@ MinIO also supports multi-cluster, multi-site federation similar to AWS regions - [Setup Ambari](https://docs.hortonworks.com/HDPDocuments/Ambari-2.7.1.0/bk_ambari-installation/content/set_up_the_ambari_server.html) which automatically sets up YARN - [Installing Spark](https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.0.1/installing-spark/content/installing_spark.html) - Install MinIO Distributed Server using one of the guides below. - - [Deployment based on Kubernetes](https://min.io/docs/minio/kubernetes/upstream/index.html#quickstart-for-kubernetes) + - [Deployment based on Kubernetes](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) - [Deployment based on MinIO Helm Chart](https://github.com/helm/charts/tree/master/stable/minio) ## **3. Configure Hadoop, Spark, Hive to use MinIO** diff --git a/docs/bucket/lifecycle/DESIGN.md b/docs/bucket/lifecycle/DESIGN.md index ff72a252d8ff2..f534023ac506d 100644 --- a/docs/bucket/lifecycle/DESIGN.md +++ b/docs/bucket/lifecycle/DESIGN.md @@ -51,5 +51,5 @@ Tiering and lifecycle transition are applicable only to erasure/distributed MinI ## Explore Further -- [MinIO | Golang Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html#setbucketlifecycle-ctx-context-context-bucketname-config-lifecycle-configuration-error) +- [MinIO | Golang Client API Reference](https://docs.min.io/community/minio-object-store/developers/go/API.html#SetBucketLifecycle) - [Object Lifecycle Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) diff --git a/docs/bucket/lifecycle/README.md b/docs/bucket/lifecycle/README.md index 456686cd82418..6888838f77eb0 100644 --- a/docs/bucket/lifecycle/README.md +++ b/docs/bucket/lifecycle/README.md @@ -4,8 +4,8 @@ Enable object lifecycle configuration on buckets to setup automatic deletion of ## 1. Prerequisites -- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). -- Install `mc` - [mc Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +- Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html). +- Install `mc` - [mc Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) ## 2. Enable bucket lifecycle configuration @@ -59,7 +59,7 @@ TempUploads | temp/ | ✓ | ✓ | 7 day(s) | ✗ ## 3. Activate ILM versioning features -This will only work with a versioned bucket, take a look at [Bucket Versioning Guide](https://min.io/docs/minio/linux/administration/object-management/object-versioning.html) for more understanding. +This will only work with a versioned bucket, take a look at [Bucket Versioning Guide](https://docs.min.io/community/minio-object-store/administration/object-management/object-versioning.html) for more understanding. ### 3.1 Automatic removal of non current objects versions @@ -115,7 +115,7 @@ e.g, To remove noncurrent versions of all objects keeping the most recent 5 nonc This JSON rule is equivalent to the following MinIO Client command: ``` -mc ilm rule add --noncurrent-expire-days 30 --noncurrent-expire-newer 5 myminio/mydata +mc ilm rule add --noncurrent-expire-days 30 --noncurrent-expire-newer 5 --prefix "user-uploads/" myminio/mydata ``` #### 3.2.a Automatic removal of noncurrent versions keeping only most recent ones immediately (MinIO only extension) @@ -178,7 +178,7 @@ When an object has only one version as a delete marker, the latter can be automa { "ID": "Removing all delete markers", "Expiration": { - "DeleteMarker": true + "ExpiredObjectDeleteMarker": true }, "Status": "Enabled" } @@ -228,5 +228,5 @@ Note that transition event notification is a MinIO extension. ## Explore Further -- [MinIO | Golang Client API Reference](https://min.io/docs/minio/linux/developers/go/API.html) +- [MinIO | Golang Client API Reference](https://docs.min.io/community/minio-object-store/developers/go/API.html) - [Object Lifecycle Management](https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) diff --git a/docs/bucket/lifecycle/setup_ilm_transition.sh b/docs/bucket/lifecycle/setup_ilm_transition.sh new file mode 100755 index 0000000000000..975ea81f853f9 --- /dev/null +++ b/docs/bucket/lifecycle/setup_ilm_transition.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +set -x + +trap 'catch $LINENO' ERR + +# shellcheck disable=SC2120 +catch() { + if [ $# -ne 0 ]; then + echo "error on line $1" + for site in sitea siteb; do + echo "$site server logs =========" + cat "/tmp/${site}_1.log" + echo "===========================" + cat "/tmp/${site}_2.log" + done + fi + + echo "Cleaning up instances of MinIO" + pkill minio + pkill -9 minio + rm -rf /tmp/multisitea + rm -rf /tmp/multisiteb + if [ $# -ne 0 ]; then + exit $# + fi +} + +catch + +export MINIO_CI_CD=1 +export MINIO_BROWSER=off +export MINIO_KMS_AUTO_ENCRYPTION=off +export MINIO_PROMETHEUS_AUTH_TYPE=public +export MINIO_KMS_SECRET_KEY=my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw= +unset MINIO_KMS_KES_CERT_FILE +unset MINIO_KMS_KES_KEY_FILE +unset MINIO_KMS_KES_ENDPOINT +unset MINIO_KMS_KES_KEY_NAME + +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x mc +fi + +minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 & +minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 & + +minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 & +minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 & + +# Wait to make sure all MinIO instances are up + +export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001 +export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004 + +./mc ready sitea +./mc ready siteb + +./mc mb --ignore-existing sitea/bucket +./mc mb --ignore-existing siteb/bucket + +sleep 10s + +## Add warm tier +./mc ilm tier add minio sitea WARM-TIER --endpoint http://localhost:9004 --access-key minioadmin --secret-key minioadmin --bucket bucket + +## Add ILM rules +./mc ilm add sitea/bucket --transition-days 0 --transition-tier WARM-TIER +./mc ilm rule list sitea/bucket + +./mc cp README.md sitea/bucket/README.md + +until $(./mc stat sitea/bucket/README.md --json | jq -r '.metadata."X-Amz-Storage-Class"' | grep -q WARM-TIER); do + echo "waiting until the object is tiered to run heal" + sleep 1s +done +./mc stat sitea/bucket/README.md + +success=$(./mc admin heal -r sitea/bucket/README.md --json --force | jq -r 'select((.name == "bucket/README.md") and (.after.color == "green")) | .after.color == "green"') +if [ "${success}" != "true" ]; then + echo "Found bug expected transitioned object to report 'green'" + exit 1 +fi + +catch diff --git a/docs/bucket/notifications/README.md b/docs/bucket/notifications/README.md index 15feebff9bd12..062f75fe60dc6 100644 --- a/docs/bucket/notifications/README.md +++ b/docs/bucket/notifications/README.md @@ -30,7 +30,7 @@ Various event types supported by MinIO server are | `s3:BucketCreated` | | `s3:BucketRemoved` | -Use client tools like `mc` to set and listen for event notifications using the [`event` sub-command](https://min.io/docs/minio/linux/reference/minio-mc/mc-event-add.html). MinIO SDK's [`BucketNotification` APIs](https://min.io/docs/minio/linux/developers/go/API.html#setbucketnotification-ctx-context-context-bucketname-string-config-notification-configuration-error) can also be used. The notification message MinIO sends to publish an event is a JSON message with the following [structure](https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html). +Use client tools like `mc` to set and listen for event notifications using the [`event` sub-command](https://docs.min.io/community/minio-object-store/reference/minio-mc/mc-event-add.html). MinIO SDK's [`BucketNotification` APIs](https://docs.min.io/community/minio-object-store/developers/go/API.html#setbucketnotification-ctx-context-context-bucketname-string-config-notification-configuration-error) can also be used. The notification message MinIO sends to publish an event is a JSON message with the following [structure](https://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html). Bucket events can be published to the following targets: @@ -43,8 +43,8 @@ Bucket events can be published to the following targets: ## Prerequisites -- Install and configure MinIO Server from [here](https://min.io/docs/minio/linux/index.html#procedure). -- Install and configure MinIO Client from [here](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart). +- Install and configure MinIO Server from [here](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html#procedure). +- Install and configure MinIO Client from [here](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart). ``` $ mc admin config get myminio | grep notify diff --git a/docs/bucket/quota/README.md b/docs/bucket/quota/README.md index 003ad5d4032d4..643a65cc97cc4 100644 --- a/docs/bucket/quota/README.md +++ b/docs/bucket/quota/README.md @@ -6,8 +6,8 @@ Buckets can be configured to have `Hard` quota - it disallows writes to the buck ## Prerequisites -- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#procedure). -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +- Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html#procedure). +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) ## Set bucket quota configuration diff --git a/docs/bucket/replication/DESIGN.md b/docs/bucket/replication/DESIGN.md index 8777caff60f2e..2f3d55f0d8bad 100644 --- a/docs/bucket/replication/DESIGN.md +++ b/docs/bucket/replication/DESIGN.md @@ -10,7 +10,7 @@ Replication relies on immutability provided by versioning to sync objects betwee If an object meets replication rules as set in the replication configuration, `X-Amz-Replication-Status` is first set to `PENDING` as the PUT operation completes and replication is queued (unless synchronous replication is in place). After replication is performed, the metadata on the source object version changes to `COMPLETED` or `FAILED` depending on whether replication succeeded. The object version on the target shows `X-Amz-Replication-Status` of `REPLICA` -All replication failures are picked up by the scanner which runs at a one minute frequency, each time scanning upto a sixteenth of the namespace. Object versions marked `PENDING` or `FAILED` are re-queued for replication. +All replication failures are picked up by the scanner which runs at a one minute frequency, each time scanning up to a sixteenth of the namespace. Object versions marked `PENDING` or `FAILED` are re-queued for replication. Replication speed depends on the cluster load, number of objects in the object store as well as storage speed. In addition, any bandwidth limits set via `mc admin bucket remote add` could also contribute to replication speed. The number of workers used for replication defaults to 100. Based on network bandwidth and system load, the number of workers used in replication can be configured using `mc admin config set alias api` to set the `replication_workers`. The prometheus metrics exposed by MinIO can be used to plan resource allocation and bandwidth management to optimize replication speed. @@ -156,5 +156,5 @@ If 3 or more targets are participating in active-active replication, the replica ## Explore Further -- [MinIO Bucket Versioning Implementation](https://min.io/docs/minio/linux/administration/object-management/object-versioning.html) -- [MinIO Client Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +- [MinIO Bucket Versioning Implementation](https://docs.min.io/community/minio-object-store/administration/object-management/object-versioning.html) +- [MinIO Client Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) diff --git a/docs/bucket/replication/README.md b/docs/bucket/replication/README.md index 41bc247cff870..829d98f005cf2 100644 --- a/docs/bucket/replication/README.md +++ b/docs/bucket/replication/README.md @@ -2,13 +2,13 @@ Bucket replication is designed to replicate selected objects in a bucket to a destination bucket. -The contents of this page have been migrated to the new [MinIO Documentation: Bucket Replication](https://min.io/docs/minio/linux/administration/bucket-replication.html) page. The [Bucket Replication](https://min.io/docs/minio/linux/administration/bucket-replication/bucket-replication-requirements.html) page references dedicated tutorials for configuring one-way "Active-Passive" and two-way "Active-Active" bucket replication. +The contents of this page have been migrated to the new [MinIO Documentation: Bucket Replication](https://docs.min.io/community/minio-object-store/administration/bucket-replication.html) page. The [Bucket Replication](https://docs.min.io/community/minio-object-store/administration/bucket-replication/bucket-replication-requirements.html) page references dedicated tutorials for configuring one-way "Active-Passive" and two-way "Active-Active" bucket replication. -To replicate objects in a bucket to a destination bucket on a target site either in the same cluster or a different cluster, start by enabling [versioning](https://min.io/docs/minio/linux/administration/object-management/object-versioning.html) for both source and destination buckets. Finally, the target site and the destination bucket need to be configured on the source MinIO server. +To replicate objects in a bucket to a destination bucket on a target site either in the same cluster or a different cluster, start by enabling [versioning](https://docs.min.io/community/minio-object-store/administration/object-management/object-versioning.html) for both source and destination buckets. Finally, the target site and the destination bucket need to be configured on the source MinIO server. ## Highlights -- Supports source and destination buckets to have the same name unlike AWS S3, addresses variety of usecases such as *Splunk*, *Veeam* site to site DR. +- Supports source and destination buckets to have the same name unlike AWS S3, addresses variety of use-cases such as *Splunk*, *Veeam* site to site DR. - Supports object locking/retention across source and destination buckets natively out of the box, unlike AWS S3. - Simpler implementation than [AWS S3 Bucket Replication Config](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html) with requirements such as IAM Role, AccessControlTranslation, Metrics and SourceSelectionCriteria are not needed with MinIO. - Active-Active replication @@ -155,7 +155,7 @@ The replication configuration generated has the following format and can be expo The replication configuration follows [AWS S3 Spec](https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html). Any objects uploaded to the source bucket that meet replication criteria will now be automatically replicated by the MinIO server to the remote destination bucket. Replication can be disabled at any time by disabling specific rules in the configuration or deleting the replication configuration entirely. -When object locking is used in conjunction with replication, both source and destination buckets needs to have [object locking](https://min.io/docs/minio/linux/administration/object-management/object-retention.html) enabled. Similarly objects encrypted on the server side, will be replicated if destination also supports encryption. +When object locking is used in conjunction with replication, both source and destination buckets needs to have [object locking](https://docs.min.io/community/minio-object-store/administration/object-management/object-retention.html) enabled. Similarly objects encrypted on the server side, will be replicated if destination also supports encryption. Replication status can be seen in the metadata on the source and destination objects. On the source side, the `X-Amz-Replication-Status` changes from `PENDING` to `COMPLETED` or `FAILED` after replication attempt either succeeded or failed respectively. On the destination side, a `X-Amz-Replication-Status` status of `REPLICA` indicates that the object was replicated successfully. Any replication failures are automatically re-attempted during a periodic disk scanner cycle. @@ -277,5 +277,5 @@ MinIO does not support SSE-C encrypted objects on replicated buckets, any applic ## Explore Further - [MinIO Bucket Replication Design](https://github.com/minio/minio/blob/master/docs/bucket/replication/DESIGN.md) -- [MinIO Bucket Versioning Implementation](https://min.io/docs/minio/linux/administration/object-management/object-retention.html) -- [MinIO Client Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +- [MinIO Bucket Versioning Implementation](https://docs.min.io/community/minio-object-store/administration/object-management/object-retention.html) +- [MinIO Client Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) diff --git a/docs/bucket/replication/delete-replication.sh b/docs/bucket/replication/delete-replication.sh index 2eadf52f70147..c91c70f2ebdd3 100755 --- a/docs/bucket/replication/delete-replication.sh +++ b/docs/bucket/replication/delete-replication.sh @@ -23,6 +23,9 @@ catch() { pkill minio pkill mc rm -rf /tmp/xl/ + if [ $# -ne 0 ]; then + exit $# + fi } catch @@ -52,13 +55,18 @@ export MINIO_ROOT_USER="minioadmin" export MINIO_ROOT_PASSWORD="minioadmin" ./minio server --address ":9001" /tmp/xl/1/{1...4}/ 2>&1 >/tmp/dc1.log & +pid1=$! ./minio server --address ":9002" /tmp/xl/2/{1...4}/ 2>&1 >/tmp/dc2.log & +pid2=$! sleep 3 export MC_HOST_myminio1=http://minioadmin:minioadmin@localhost:9001 export MC_HOST_myminio2=http://minioadmin:minioadmin@localhost:9002 +./mc ready myminio1 +./mc ready myminio2 + ./mc mb myminio1/testbucket/ ./mc version enable myminio1/testbucket/ ./mc mb myminio2/testbucket/ @@ -66,6 +74,8 @@ export MC_HOST_myminio2=http://minioadmin:minioadmin@localhost:9002 ./mc replicate add myminio1/testbucket --remote-bucket http://minioadmin:minioadmin@localhost:9002/testbucket/ --priority 1 +# Test replication of delete markers and permanent deletes + ./mc cp README.md myminio1/testbucket/dir/file ./mc cp README.md myminio1/testbucket/dir/file @@ -79,11 +89,11 @@ echo "=== myminio2" versionId="$(./mc ls --json --versions myminio1/testbucket/dir/ | tail -n1 | jq -r .versionId)" -aws configure set aws_access_key_id minioadmin --profile minioadmin -aws configure set aws_secret_access_key minioadmin --profile minioadmin -aws configure set default.region us-east-1 --profile minioadmin +export AWS_ACCESS_KEY_ID=minioadmin +export AWS_SECRET_ACCESS_KEY=minioadmin +export AWS_REGION=us-east-1 -aws s3api --endpoint-url http://localhost:9001 --profile minioadmin delete-object --bucket testbucket --key dir/file --version-id "$versionId" +aws s3api --endpoint-url http://localhost:9001 delete-object --bucket testbucket --key dir/file --version-id "$versionId" ./mc ls -r --versions myminio1/testbucket >/tmp/myminio1.txt ./mc ls -r --versions myminio2/testbucket >/tmp/myminio2.txt @@ -108,5 +118,33 @@ if [ $ret -ne 0 ]; then exit 1 fi +# Test listing of non replicated permanent deletes + +set -x + +./mc mb myminio1/foobucket/ myminio2/foobucket/ --with-versioning +./mc replicate add myminio1/foobucket --remote-bucket http://minioadmin:minioadmin@localhost:9002/foobucket/ --priority 1 +./mc cp README.md myminio1/foobucket/dir/file + +versionId="$(./mc ls --json --versions myminio1/foobucket/dir/ | jq -r .versionId)" + +kill ${pid2} && wait ${pid2} || true + +aws s3api --endpoint-url http://localhost:9001 delete-object --bucket foobucket --key dir/file --version-id "$versionId" + +out="$(./mc ls myminio1/foobucket/dir/)" +if [ "$out" != "" ]; then + echo "BUG: non versioned listing should not show pending/failed replicated delete:" + echo "$out" + exit 1 +fi + +out="$(./mc ls --versions myminio1/foobucket/dir/)" +if [ "$out" != "" ]; then + echo "BUG: versioned listing should not show pending/failed replicated deletes:" + echo "$out" + exit 1 +fi + echo "Success" catch diff --git a/docs/bucket/replication/setup_2site_existing_replication.sh b/docs/bucket/replication/setup_2site_existing_replication.sh index 5a5e026c03908..d7146201c8de9 100755 --- a/docs/bucket/replication/setup_2site_existing_replication.sh +++ b/docs/bucket/replication/setup_2site_existing_replication.sh @@ -24,6 +24,9 @@ catch() { rm -rf /tmp/multisitea rm -rf /tmp/multisiteb rm -rf /tmp/data + if [ $# -ne 0 ]; then + exit $# + fi } catch @@ -56,11 +59,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \ "http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 & -sleep 10s - export MC_HOST_sitea=http://minio:minio123@127.0.0.1:9001 export MC_HOST_siteb=http://minio:minio123@127.0.0.1:9004 +./mc ready sitea +./mc ready siteb + ./mc mb sitea/bucket ## Create 100 files diff --git a/docs/bucket/replication/setup_3site_replication.sh b/docs/bucket/replication/setup_3site_replication.sh index 6aa45f5820812..db8fc7147750a 100755 --- a/docs/bucket/replication/setup_3site_replication.sh +++ b/docs/bucket/replication/setup_3site_replication.sh @@ -26,6 +26,9 @@ catch() { rm -rf /tmp/multisitea rm -rf /tmp/multisiteb rm -rf /tmp/multisitec + if [ $# -ne 0 ]; then + exit $# + fi } catch @@ -43,13 +46,8 @@ unset MINIO_KMS_KES_KEY_FILE unset MINIO_KMS_KES_ENDPOINT unset MINIO_KMS_KES_KEY_NAME -( - cd ./docs/debugging/s3-check-md5 - go install -v -) - -wget -q -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && - chmod +x mc +go install -v github.com/minio/mc@master +cp -a $(go env GOPATH)/bin/mc ./mc if [ ! -f mc.RELEASE.2021-03-12T03-36-59Z ]; then wget -q -O mc.RELEASE.2021-03-12T03-36-59Z https://dl.minio.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2021-03-12T03-36-59Z && @@ -71,12 +69,14 @@ minio server --address 127.0.0.1:9005 "http://127.0.0.1:9005/tmp/multisitec/data minio server --address 127.0.0.1:9006 "http://127.0.0.1:9005/tmp/multisitec/data/disterasure/xl{1...4}" \ "http://127.0.0.1:9006/tmp/multisitec/data/disterasure/xl{5...8}" >/tmp/sitec_2.log 2>&1 & -sleep 30 - export MC_HOST_sitea=http://minio:minio123@127.0.0.1:9001 export MC_HOST_siteb=http://minio:minio123@127.0.0.1:9004 export MC_HOST_sitec=http://minio:minio123@127.0.0.1:9006 +./mc ready sitea +./mc ready siteb +./mc ready sitec + ./mc mb sitea/bucket ./mc version enable sitea/bucket ./mc mb -l sitea/olockbucket @@ -166,7 +166,7 @@ echo "Set default governance retention 30d" ./mc retention set --default governance 30d sitea/olockbucket echo "Copying data to source sitea/bucket" -./mc cp --encrypt "sitea/" --quiet /etc/hosts sitea/bucket +./mc cp --enc-s3 "sitea/" --quiet /etc/hosts sitea/bucket sleep 1 echo "Copying data to source sitea/olockbucket" @@ -174,20 +174,20 @@ echo "Copying data to source sitea/olockbucket" sleep 1 echo "Verifying the metadata difference between source and target" -if diff -pruN <(./mc stat --json sitea/bucket/hosts | jq .) <(./mc stat --json siteb/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then +if diff -pruN <(./mc stat --no-list --json sitea/bucket/hosts | jq .) <(./mc stat --no-list --json siteb/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then echo "verified sitea-> COMPLETED, siteb-> REPLICA" fi -if diff -pruN <(./mc stat --json sitea/bucket/hosts | jq .) <(./mc stat --json sitec/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then +if diff -pruN <(./mc stat --no-list --json sitea/bucket/hosts | jq .) <(./mc stat --no-list --json sitec/bucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then echo "verified sitea-> COMPLETED, sitec-> REPLICA" fi echo "Verifying the metadata difference between source and target" -if diff -pruN <(./mc stat --json sitea/olockbucket/hosts | jq .) <(./mc stat --json siteb/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then +if diff -pruN <(./mc stat --no-list --json sitea/olockbucket/hosts | jq .) <(./mc stat --no-list --json siteb/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then echo "verified sitea-> COMPLETED, siteb-> REPLICA" fi -if diff -pruN <(./mc stat --json sitea/olockbucket/hosts | jq .) <(./mc stat --json sitec/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then +if diff -pruN <(./mc stat --no-list --json sitea/olockbucket/hosts | jq .) <(./mc stat --no-list --json sitec/olockbucket/hosts | jq .) | grep -q 'COMPLETED\|REPLICA'; then echo "verified sitea-> COMPLETED, sitec-> REPLICA" fi @@ -197,25 +197,25 @@ head -c 221227088 200M ./mc.RELEASE.2021-03-12T03-36-59Z cp --config-dir ~/.mc --encrypt "sitea" --quiet 200M "sitea/bucket/200M-enc-v1" ./mc.RELEASE.2021-03-12T03-36-59Z cp --config-dir ~/.mc --quiet 200M "sitea/bucket/200M-v1" -./mc cp --encrypt "sitea" --quiet 200M "sitea/bucket/200M-enc-v2" +./mc cp --enc-s3 "sitea" --quiet 200M "sitea/bucket/200M-enc-v2" ./mc cp --quiet 200M "sitea/bucket/200M-v2" sleep 10 echo "Verifying ETag for all objects" -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket bucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket bucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket bucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket bucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket bucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket bucket - -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket olockbucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket olockbucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket olockbucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket olockbucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket olockbucket -s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket bucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket bucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket bucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket bucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket bucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket bucket + +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9001/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9002/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9003/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9004/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9005/ -bucket olockbucket +./s3-check-md5 -versions -access-key minio -secret-key minio123 -endpoint http://127.0.0.1:9006/ -bucket olockbucket # additional tests for encryption object alignment go install -v github.com/minio/multipart-debug@latest @@ -233,9 +233,9 @@ multipart-debug --endpoint 127.0.0.1:9002 --accesskey minio --secretkey minio123 sleep 10 -./mc stat sitea/bucket/new-test-encrypted-object -./mc stat siteb/bucket/new-test-encrypted-object -./mc stat sitec/bucket/new-test-encrypted-object +./mc stat --no-list sitea/bucket/new-test-encrypted-object +./mc stat --no-list siteb/bucket/new-test-encrypted-object +./mc stat --no-list sitec/bucket/new-test-encrypted-object ./mc ls -r sitea/bucket/ ./mc ls -r siteb/bucket/ diff --git a/docs/bucket/replication/setup_ilm_expiry_replication.sh b/docs/bucket/replication/setup_ilm_expiry_replication.sh index 015718bfd056d..b7fca9276bcfa 100755 --- a/docs/bucket/replication/setup_ilm_expiry_replication.sh +++ b/docs/bucket/replication/setup_ilm_expiry_replication.sh @@ -24,6 +24,9 @@ catch() { rm -rf /tmp/multisitec rm -rf /tmp/multisited rm -rf /tmp/data + if [ $# -ne 0 ]; then + exit $# + fi } catch @@ -67,19 +70,25 @@ minio server --address 127.0.0.1:9008 "http://127.0.0.1:9007/tmp/multisited/data "http://127.0.0.1:9008/tmp/multisited/data/disterasure/xl{5...8}" >/tmp/sited_2.log 2>&1 & # Wait to make sure all MinIO instances are up -sleep 20s export MC_HOST_sitea=http://minio:minio123@127.0.0.1:9001 export MC_HOST_siteb=http://minio:minio123@127.0.0.1:9004 export MC_HOST_sitec=http://minio:minio123@127.0.0.1:9006 export MC_HOST_sited=http://minio:minio123@127.0.0.1:9008 +./mc ready sitea +./mc ready siteb +./mc ready sitec +./mc ready sited + ./mc mb sitea/bucket ./mc mb sitec/bucket ## Setup site replication ./mc admin replicate add sitea siteb --replicate-ilm-expiry +sleep 10s + ## Add warm tier ./mc ilm tier add minio sitea WARM-TIER --endpoint http://localhost:9006 --access-key minio --secret-key minio123 --bucket bucket @@ -101,7 +110,8 @@ if [ "$flag2" != "true" ]; then fi ## Check if ILM expiry rules replicated -sleep 20 +sleep 30s + ./mc ilm rule list siteb/bucket count=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules | length') if [ $count -ne 1 ]; then @@ -146,7 +156,8 @@ fi ## Check edit of ILM expiry rule and its replication id=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[] | select(.Expiration.Days==3) | .ID' | sed 's/"//g') ./mc ilm edit --id "${id}" --expire-days "100" sitea/bucket -sleep 30 +sleep 30s + count1=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration.Days') count2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Expiration.Days') if [ $count1 -ne 100 ]; then @@ -173,7 +184,8 @@ fi ## Perform individual updates of rules to sites ./mc ilm edit --id "${id}" --expire-days "999" sitea/bucket -sleep 1 +sleep 5s + ./mc ilm edit --id "${id}" --expire-days "888" siteb/bucket # when ilm expiry re-enabled, this should win ## Check re-enabling of ILM expiry rules replication @@ -190,7 +202,7 @@ if [ "$flag" != "true" ]; then fi ## Check if latest updated rules get replicated to all sites post re-enable of ILM expiry rules replication -sleep 30 +sleep 30s count1=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration.Days') count2=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Expiration.Days') if [ $count1 -ne 888 ]; then @@ -211,7 +223,8 @@ fi ## Check replication of edit of prefix, tags and status of ILM Expiry Rules ./mc ilm rule edit --id "${id}" --prefix "newprefix" --tags "ntag1=nval1&ntag2=nval2" --disable sitea/bucket -sleep 30 +sleep 30s + nprefix=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Prefix' | sed 's/"//g') ntagName1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Key' | sed 's/"//g') ntagVal1=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[0].Filter.And.Tags[0].Value' | sed 's/"//g') @@ -233,7 +246,8 @@ fi ## Check replication of deleted ILM expiry rules ./mc ilm rule remove --id "${id}" sitea/bucket -sleep 30 +sleep 30s + # should error as rule doesn't exist error=$(./mc ilm rule list siteb/bucket --json | jq '.error.cause.message' | sed 's/"//g') if [ "$error" != "The lifecycle configuration does not exist" ]; then @@ -245,7 +259,8 @@ fi # Add rules again as previous tests removed all ./mc ilm add sitea/bucket --transition-days 0 --transition-tier WARM-TIER --transition-days 0 --noncurrent-expire-days 2 --expire-days 3 --prefix "myprefix" --tags "tag1=val1&tag2=val2" ./mc admin replicate add sitea siteb sited -sleep 30 +sleep 30s + # Check site replication info and status for new site sitesCount=$(mc admin replicate info sited --json | jq '.sites | length') if [ ${sitesCount} -ne 3 ]; then @@ -282,7 +297,8 @@ fi id=$(./mc ilm rule list siteb/bucket --json | jq '.config.Rules[] | select(.Expiration.Days==3) | .ID' | sed 's/"//g') # Remove rule from siteb ./mc ilm rule remove --id "${id}" siteb/bucket -sleep 30 # allow to replicate +sleep 30s # allow to replicate + # sitea should still contain the transition portion of rule transitionRuleDays=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Transition.Days') expirationRuleDet=$(./mc ilm rule list sitea/bucket --json | jq '.config.Rules[0].Expiration') diff --git a/docs/bucket/replication/sio-error.sh b/docs/bucket/replication/sio-error.sh index f7ebe323e9e67..2807104958175 100755 --- a/docs/bucket/replication/sio-error.sh +++ b/docs/bucket/replication/sio-error.sh @@ -35,6 +35,8 @@ sleep 10 ./mc alias set myminio1 http://localhost:9001 minioadmin minioadmin ./mc alias set myminio2 http://localhost:9101 minioadmin minioadmin +./mc ready myminio1 +./mc ready myminio2 sleep 1 ./mc mb myminio1/testbucket/ --with-lock diff --git a/docs/bucket/replication/test_del_marker_proxying.sh b/docs/bucket/replication/test_del_marker_proxying.sh new file mode 100755 index 0000000000000..8d7521b4cfc95 --- /dev/null +++ b/docs/bucket/replication/test_del_marker_proxying.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2120 +exit_1() { + cleanup + + for site in sitea siteb; do + echo "$site server logs =========" + cat "/tmp/${site}_1.log" + echo "===========================" + cat "/tmp/${site}_2.log" + done + + exit 1 +} + +cleanup() { + echo -n "Cleaning up instances of MinIO ..." + pkill -9 minio || sudo pkill -9 minio + rm -rf /tmp/sitea + rm -rf /tmp/siteb + echo "done" +} + +cleanup + +export MINIO_CI_CD=1 +export MINIO_BROWSER=off + +make install-race + +# Start MinIO instances +echo -n "Starting MinIO instances ..." +minio server --address 127.0.0.1:9001 --console-address ":10000" "http://127.0.0.1:9001/tmp/sitea/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9002/tmp/sitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 & +minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/sitea/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9002/tmp/sitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 & + +minio server --address 127.0.0.1:9003 --console-address ":10001" "http://127.0.0.1:9003/tmp/siteb/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9004/tmp/siteb/data/disterasure/xl{5...8}" >/tmp/siteb_1.log 2>&1 & +minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/siteb/data/disterasure/xl{1...4}" \ + "http://127.0.0.1:9004/tmp/siteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 & + +echo "done" + +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x mc +fi + +export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001 +export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004 + +./mc ready sitea +./mc ready siteb + +./mc mb sitea/bucket +./mc version enable sitea/bucket +./mc mb siteb/bucket +./mc version enable siteb/bucket + +# Set bucket replication +./mc replicate add sitea/bucket --remote-bucket siteb/bucket + +# Run the test to make sure proxying of DEL marker doesn't happen +loop_count=0 +while true; do + if [ $loop_count -eq 1000 ]; then + break + fi + echo "Hello World" | ./mc pipe sitea/bucket/obj$loop_count + ./mc rm sitea/bucket/obj$loop_count + RESULT=$({ ./mc stat --no-list sitea/bucket/obj$loop_count; } 2>&1) + if [[ ${RESULT} != *"Object does not exist"* ]]; then + echo "BUG: stat should fail. succeeded." + exit_1 + fi + loop_count=$((loop_count + 1)) +done + +cleanup diff --git a/docs/bucket/retention/README.md b/docs/bucket/retention/README.md index aa7e5328ccb9b..c5087b22acd4a 100644 --- a/docs/bucket/retention/README.md +++ b/docs/bucket/retention/README.md @@ -1,4 +1,4 @@ -# Object Lock and Immutablity Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) +# Object Lock and Immutability Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) MinIO server allows WORM for specific objects or by configuring a bucket with default object lock configuration that applies default retention mode and retention duration to all objects. This makes objects in the bucket immutable i.e. delete of the version are not allowed until an expiry specified in the bucket's object lock configuration or object retention. @@ -10,7 +10,7 @@ A default retention period and retention mode can be configured on a bucket to b ### 1. Prerequisites -- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) +- Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) - Install `awscli` - [Installing AWS Command Line Interface](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) ### 2. Set bucket WORM configuration @@ -53,7 +53,7 @@ See ## Explore Further -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/bucket/versioning/README.md b/docs/bucket/versioning/README.md index cdb226c87dfb7..aeb9aa0018045 100644 --- a/docs/bucket/versioning/README.md +++ b/docs/bucket/versioning/README.md @@ -79,7 +79,7 @@ Duplicate delete markers are not created on MinIO buckets with versioning, if an ### Motivation -**PLEASE READ: This feature is meant for advanced usecases only where the setup is using bucket versioning or with replicated buckets, use this feature to optimize versioning behavior for some specific applications. MinIO experts will evaluate and guide on the benefits for your application, please reach out to us on .** +**PLEASE READ: This feature is meant for advanced use-cases only where the setup is using bucket versioning or with replicated buckets, use this feature to optimize versioning behavior for some specific applications. MinIO experts will evaluate and guide on the benefits for your application, please reach out to us on .** Spark/Hadoop workloads which use Hadoop MR Committer v1/v2 algorithm upload objects to a temporary prefix in a bucket. These objects are 'renamed' to a different prefix on Job commit. Object storage admins are forced to configure separate ILM policies to expire these objects and their versions to reclaim space. @@ -211,7 +211,7 @@ public class IsVersioningEnabled { ## Explore Further -- [Use `minio-java` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/java/minio-java.html) -- [Object Lock and Immutablity Guide](https://min.io/docs/minio/linux/administration/object-management/object-retention.html) -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `minio-java` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/java/minio-java.html) +- [Object Lock and Immutability Guide](https://docs.min.io/community/minio-object-store/administration/object-management/object-retention.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/bucket/versioning/versioning-tests.sh b/docs/bucket/versioning/versioning-tests.sh index 584734b6f93ee..84fbec6f1c497 100755 --- a/docs/bucket/versioning/versioning-tests.sh +++ b/docs/bucket/versioning/versioning-tests.sh @@ -10,16 +10,19 @@ trap 'catch $LINENO' ERR catch() { if [ $# -ne 0 ]; then echo "error on line $1" - echo "$site server logs =========" - cat "/tmp/${site}_1.log" + echo "server logs =========" + cat "/tmp/sitea_1.log" echo "===========================" - cat "/tmp/${site}_2.log" + cat "/tmp/sitea_2.log" fi echo "Cleaning up instances of MinIO" pkill minio pkill -9 minio rm -rf /tmp/multisitea + if [ $# -ne 0 ]; then + exit $# + fi } catch @@ -27,8 +30,6 @@ catch set -e export MINIO_CI_CD=1 export MINIO_BROWSER=off -export MINIO_ROOT_USER="minio" -export MINIO_ROOT_PASSWORD="minio123" export MINIO_KMS_AUTO_ENCRYPTION=off export MINIO_PROMETHEUS_AUTH_TYPE=public export MINIO_KMS_SECRET_KEY=my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw= @@ -42,32 +43,35 @@ if [ ! -f ./mc ]; then chmod +x mc fi -minio server --address 127.0.0.1:9001 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ - "http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 & -minio server --address 127.0.0.1:9002 "http://127.0.0.1:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ - "http://127.0.0.1:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 & +minio server -S /tmp/no-certs --address ":9001" "http://localhost:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ + "http://localhost:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_1.log 2>&1 & + +minio server -S /tmp/no-certs --address ":9002" "http://localhost:9001/tmp/multisitea/data/disterasure/xl{1...4}" \ + "http://localhost:9002/tmp/multisitea/data/disterasure/xl{5...8}" >/tmp/sitea_2.log 2>&1 & -export MC_HOST_sitea=http://minio:minio123@127.0.0.1:9001 +export MC_HOST_sitea=http://minioadmin:minioadmin@localhost:9002 -./mc mb sitea/delissue +./mc ready sitea -./mc version enable sitea/delissue +./mc mb sitea/delissue --insecure -echo hello | ./mc pipe sitea/delissue/hello +./mc version enable sitea/delissue --insecure -./mc version suspend sitea/delissue +echo hello | ./mc pipe sitea/delissue/hello --insecure -./mc rm sitea/delissue/hello +./mc version suspend sitea/delissue --insecure -./mc version enable sitea/delissue +./mc rm sitea/delissue/hello --insecure -echo hello | ./mc pipe sitea/delissue/hello +./mc version enable sitea/delissue --insecure -./mc version suspend sitea/delissue +echo hello | ./mc pipe sitea/delissue/hello --insecure -./mc rm sitea/delissue/hello +./mc version suspend sitea/delissue --insecure -count=$(./mc ls --versions sitea/delissue | wc -l) +./mc rm sitea/delissue/hello --insecure + +count=$(./mc ls --versions sitea/delissue --insecure | wc -l) if [ ${count} -ne 3 ]; then echo "BUG: expected number of versions to be '3' found ${count}" @@ -75,7 +79,21 @@ if [ ${count} -ne 3 ]; then ./mc ls --versions sitea/delissue fi +./mc mb sitea/testbucket + +./mc version enable sitea/testbucket + +./mc put --quiet README.md sitea/testbucket/file +etag1=$(./mc cat sitea/testbucket/file | md5sum --tag | awk {'print $4'}) + +./mc cp --quiet --storage-class "STANDARD" sitea/testbucket/file sitea/testbucket/file +etag2=$(./mc cat sitea/testbucket/file | md5sum --tag | awk {'print $4'}) +if [ $etag1 != $etag2 ]; then + echo "expected $etag1, got $etag2" + exit 1 +fi + echo "SUCCESS:" -./mc ls --versions sitea/delissue +./mc ls --versions sitea/delissue --insecure catch diff --git a/docs/chroot/README.md b/docs/chroot/README.md index c551ceb794bfb..1adb019f904fd 100644 --- a/docs/chroot/README.md +++ b/docs/chroot/README.md @@ -9,12 +9,24 @@ Chroot allows user based namespace isolation on many standard Linux deployments. ## 2. Install MinIO in Chroot +> **Note:** MinIO community edition is now distributed as source code only. Pre-compiled binaries are no longer provided for new releases. + +Build MinIO from source and install it in the chroot directory: + ```sh +# Build MinIO from source +go install github.com/minio/minio@latest + +# Create the bin directory in your chroot mkdir -p /mnt/export/${USER}/bin -wget https://dl.min.io/server/minio/release/linux-amd64/minio -O /mnt/export/${USER}/bin/minio + +# Copy the built binary to the chroot directory +cp $(go env GOPATH)/bin/minio /mnt/export/${USER}/bin/minio chmod +x /mnt/export/${USER}/bin/minio ``` +Alternatively, if you have an existing legacy binary, you can still use it, but note that it will not receive updates. + Bind your `proc` mount to the target chroot directory ``` @@ -39,8 +51,8 @@ Instance is now accessible on the host at port 9000, proceed to access the Web b ## Explore Further -- [MinIO Erasure Code Overview](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Erasure Code Overview](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/compression/README.md b/docs/compression/README.md index 8cf9fa4bd7412..cc7bac663f95a 100644 --- a/docs/compression/README.md +++ b/docs/compression/README.md @@ -19,7 +19,7 @@ will increase speed when the content can be compressed. ### 1. Prerequisites -Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). +Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html). ### 2. Run MinIO with compression @@ -59,6 +59,14 @@ export MINIO_COMPRESSION_EXTENSIONS=".txt,.log,.csv,.json,.tar,.xml,.bin" export MINIO_COMPRESSION_MIME_TYPES="text/*,application/json,application/xml" ``` +> [!NOTE] +> To enable compression for all content when using environment variables, set either or both of the extensions and MIME types to `*` instead of an empty string: +> ```bash +> export MINIO_COMPRESSION_ENABLE="on" +> export MINIO_COMPRESSION_EXTENSIONS="*" +> export MINIO_COMPRESSION_MIME_TYPES="*" +> ``` + ### 3. Compression + Encryption Combining encryption and compression is not safe in all setups. @@ -123,7 +131,7 @@ the data directory to view the size of the object. ## Explore Further -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/config/README.md b/docs/config/README.md index 5336b1fe5daa5..9434752741b76 100644 --- a/docs/config/README.md +++ b/docs/config/README.md @@ -6,7 +6,7 @@ MinIO stores all its config as part of the server deployment, config is erasure ### Certificate Directory -TLS certificates by default are expected to be stored under ``${HOME}/.minio/certs`` directory. You need to place certificates here to enable `HTTPS` based access. Read more about [How to secure access to MinIO server with TLS](https://min.io/docs/minio/linux/operations/network-encryption.html). +TLS certificates by default are expected to be stored under ``${HOME}/.minio/certs`` directory. You need to place certificates here to enable `HTTPS` based access. Read more about [How to secure access to MinIO server with TLS](https://docs.min.io/community/minio-object-store/operations/network-encryption.html). Following is a sample directory structure for MinIO server with TLS certificates. @@ -132,44 +132,47 @@ KEY: api manage global HTTP API call specific features, such as throttling, authentication types, etc. ARGS: -requests_max (number) set the maximum number of concurrent requests (default: '0') -requests_deadline (duration) set the deadline for API requests waiting to be processed (default: '10s') +requests_max (number) set the maximum number of concurrent requests (default: 'auto') cluster_deadline (duration) set the deadline for cluster readiness check (default: '10s') cors_allow_origin (csv) set comma separated list of origins allowed for CORS requests (default: '*') remote_transport_deadline (duration) set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h" (default: '2h') -list_quorum (string) set the acceptable quorum expected for list operations e.g. "optimal", "reduced", "disk", "strict" (default: 'strict') +list_quorum (string) set the acceptable quorum expected for list operations e.g. "optimal", "reduced", "disk", "strict", "auto" (default: 'strict') replication_priority (string) set replication priority (default: 'auto') +replication_max_workers (number) set the maximum number of replication workers (default: '500') +replication_max_lrg_workers (number) set the maximum number of replication workers MinIO uses to replicate large objects between sites. (default: '10') transition_workers (number) set the number of transition workers (default: '100') stale_uploads_expiry (duration) set to expire stale multipart uploads older than this values (default: '24h') stale_uploads_cleanup_interval (duration) set to change intervals when stale multipart uploads are expired (default: '6h') delete_cleanup_interval (duration) set to change intervals when deleted objects are permanently deleted from ".trash" folder (default: '5m') -odirect (boolean) set to enable or disable O_DIRECT for read and writes under special conditions. NOTE: do not disable O_DIRECT without prior testing (default: 'on') +odirect (boolean) set to enable or disable O_DIRECT for writes under special conditions. NOTE: do not disable O_DIRECT without prior testing (default: 'on') root_access (boolean) turn 'off' root credential access for all API calls including s3, admin operations (default: 'on') sync_events (boolean) set to enable synchronous bucket notifications (default: 'off') +object_max_versions (number) set max allowed number of versions per object (default: '9223372036854775807') ``` or environment variables ``` -MINIO_API_REQUESTS_MAX (number) set the maximum number of concurrent requests (default: '0') -MINIO_API_REQUESTS_DEADLINE (duration) set the deadline for API requests waiting to be processed (default: '10s') +MINIO_API_REQUESTS_MAX (number) set the maximum number of concurrent requests (default: 'auto') MINIO_API_CLUSTER_DEADLINE (duration) set the deadline for cluster readiness check (default: '10s') MINIO_API_CORS_ALLOW_ORIGIN (csv) set comma separated list of origins allowed for CORS requests (default: '*') MINIO_API_REMOTE_TRANSPORT_DEADLINE (duration) set the deadline for API requests on remote transports while proxying between federated instances e.g. "2h" (default: '2h') -MINIO_API_LIST_QUORUM (string) set the acceptable quorum expected for list operations e.g. "optimal", "reduced", "disk", "strict" (default: 'strict') +MINIO_API_LIST_QUORUM (string) set the acceptable quorum expected for list operations e.g. "optimal", "reduced", "disk", "strict", "auto" (default: 'strict') MINIO_API_REPLICATION_PRIORITY (string) set replication priority (default: 'auto') +MINIO_API_REPLICATION_MAX_WORKERS (number) set the maximum number of replication workers (default: '500') MINIO_API_TRANSITION_WORKERS (number) set the number of transition workers (default: '100') MINIO_API_STALE_UPLOADS_EXPIRY (duration) set to expire stale multipart uploads older than this values (default: '24h') MINIO_API_STALE_UPLOADS_CLEANUP_INTERVAL (duration) set to change intervals when stale multipart uploads are expired (default: '6h') MINIO_API_DELETE_CLEANUP_INTERVAL (duration) set to change intervals when deleted objects are permanently deleted from ".trash" folder (default: '5m') -MINIO_API_ODIRECT (boolean) set to enable or disable O_DIRECT for read and writes under special conditions. NOTE: do not disable O_DIRECT without prior testing (default: 'on') +MINIO_API_ODIRECT (boolean) set to enable or disable O_DIRECT for writes under special conditions. NOTE: do not disable O_DIRECT without prior testing (default: 'on') MINIO_API_ROOT_ACCESS (boolean) turn 'off' root credential access for all API calls including s3, admin operations (default: 'on') MINIO_API_SYNC_EVENTS (boolean) set to enable synchronous bucket notifications (default: 'off') +MINIO_API_OBJECT_MAX_VERSIONS (number) set max allowed number of versions per object (default: '9223372036854775807') ``` #### Notifications -Notification targets supported by MinIO are in the following list. To configure individual targets please refer to more detailed documentation [here](https://min.io/docs/minio/linux/administration/monitoring.html#bucket-notifications). +Notification targets supported by MinIO are in the following list. To configure individual targets please refer to more detailed documentation [here](https://docs.min.io/community/minio-object-store/administration/monitoring.html#bucket-notifications). ``` notify_webhook publish bucket notifications to webhook endpoints @@ -333,5 +336,5 @@ minio server /data ## Explore Further -* [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) -* [Configure MinIO Server with TLS](https://min.io/docs/minio/linux/operations/network-encryption.html) +* [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) +* [Configure MinIO Server with TLS](https://docs.min.io/community/minio-object-store/operations/network-encryption.html) diff --git a/docs/debugging/README.md b/docs/debugging/README.md index c3745a346b8c8..df01c6e35f01e 100644 --- a/docs/debugging/README.md +++ b/docs/debugging/README.md @@ -2,7 +2,7 @@ ## HTTP Trace -HTTP tracing can be enabled by using [`mc admin trace`](https://github.com/minio/mc/blob/master/docs/minio-admin-complete-guide.md#command-trace---display-minio-server-http-trace) command. +HTTP tracing can be enabled by using [`mc admin trace`](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin/mc-admin-trace.html) command. Example: diff --git a/docs/debugging/build.sh b/docs/debugging/build.sh index 734c31bc0b8d3..204e0ea14c3cc 100755 --- a/docs/debugging/build.sh +++ b/docs/debugging/build.sh @@ -2,5 +2,6 @@ export CGO_ENABLED=0 for dir in docs/debugging/*/; do - go build -C ${dir} -v + bin=$(basename ${dir}) + go build -C ${dir} -o ${PWD}/${bin} done diff --git a/docs/debugging/healing-bin/main.go b/docs/debugging/healing-bin/main.go index ac22fa2ab0c73..e0e4e84f2fa32 100644 --- a/docs/debugging/healing-bin/main.go +++ b/docs/debugging/healing-bin/main.go @@ -56,7 +56,7 @@ FLAGS: cli.ShowAppHelpAndExit(c, 1) // last argument is exit code } - ht := make(map[string]map[string]interface{}) + ht := make(map[string]map[string]any) file := c.Args().Get(0) if strings.HasSuffix(file, ".zip") { var sz int64 @@ -91,7 +91,7 @@ FLAGS: dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() - var htr map[string]interface{} + var htr map[string]any if err = dec.Decode(&htr); err != nil { return err } @@ -113,7 +113,7 @@ FLAGS: if _, err = msgp.CopyToJSON(buf, bytes.NewReader(b)); err != nil { return err } - var htr map[string]interface{} + var htr map[string]any dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() diff --git a/docs/debugging/inspect/decrypt-v1.go b/docs/debugging/inspect/decrypt-v1.go index fc7e6cedd30f1..081f20bd6b734 100644 --- a/docs/debugging/inspect/decrypt-v1.go +++ b/docs/debugging/inspect/decrypt-v1.go @@ -27,7 +27,7 @@ import ( "github.com/secure-io/sio-go" ) -func extractInspectV1(keyHex string, r io.Reader, w io.Writer) error { +func extractInspectV1(keyHex string, r io.Reader, w io.Writer, okMsg string) error { id, err := hex.DecodeString(keyHex[:8]) if err != nil { return err @@ -51,5 +51,8 @@ func extractInspectV1(keyHex string, r io.Reader, w io.Writer) error { nonce := make([]byte, stream.NonceSize()) encr := stream.DecryptReader(r, nonce, nil) _, err = io.Copy(w, encr) + if err == nil { + fmt.Println(okMsg) + } return err } diff --git a/docs/debugging/inspect/decrypt-v2.go b/docs/debugging/inspect/decrypt-v2.go index 17e34bbb94030..13da6c3cf94ef 100644 --- a/docs/debugging/inspect/decrypt-v2.go +++ b/docs/debugging/inspect/decrypt-v2.go @@ -22,22 +22,29 @@ import ( "fmt" "io" "os" + "path/filepath" + "strings" + "unicode/utf8" "github.com/minio/madmin-go/v3/estream" ) -func extractInspectV2(pk []byte, r io.Reader, w io.Writer) error { - privKey, err := bytesToPrivateKey(pk) - if err != nil { - return fmt.Errorf("decoding key returned: %w", err) - } +type keepFileErr struct { + error +} +func extractInspectV2(pks [][]byte, r io.Reader, extractDir string) error { sr, err := estream.NewReader(r) if err != nil { return err } - - sr.SetPrivateKey(privKey) + for _, pk := range pks { + privKey, err := bytesToPrivateKey(pk) + if err != nil { + return fmt.Errorf("decoding key returned: %w", err) + } + sr.SetPrivateKey(privKey) + } sr.ReturnNonDecryptable(true) // Debug corrupted streams. @@ -45,17 +52,18 @@ func extractInspectV2(pk []byte, r io.Reader, w io.Writer) error { sr.SkipEncrypted(true) return sr.DebugStream(os.Stdout) } - + extracted := false for { stream, err := sr.NextStream() if err != nil { if err == io.EOF { + if extracted { + return nil + } return errors.New("no data found on stream") } if errors.Is(err, estream.ErrNoKey) { - if stream.Name == "inspect.zip" { - return errors.New("incorrect private key") - } + fmt.Println("Skipping", stream.Name, "no private key") if err := stream.Skip(); err != nil { return fmt.Errorf("stream skip: %w", err) } @@ -63,15 +71,21 @@ func extractInspectV2(pk []byte, r io.Reader, w io.Writer) error { } return fmt.Errorf("next stream: %w", err) } - if stream.Name == "inspect.zip" { - _, err := io.Copy(w, stream) - if err != nil { - return fmt.Errorf("reading inspect stream: %w", err) - } - return nil + if strings.Contains(stream.Name, "..") || !utf8.ValidString(stream.Name) { + return fmt.Errorf("invalid stream name: %q", stream.Name) } - if err := stream.Skip(); err != nil { - return fmt.Errorf("stream skip: %w", err) + + dst := filepath.Join(extractDir, stream.Name) + os.Mkdir(extractDir, 0o755) + w, err := os.Create(dst) + if err != nil { + return fmt.Errorf("creating output file: %w", err) + } + _, err = io.Copy(w, stream) + if err != nil { + return fmt.Errorf("reading inspect stream: %w", err) } + fmt.Printf("Extracted: %s\n", dst) + extracted = true } } diff --git a/docs/debugging/inspect/export.go b/docs/debugging/inspect/export.go index 56210bdfe66b1..016a412f6d51e 100644 --- a/docs/debugging/inspect/export.go +++ b/docs/debugging/inspect/export.go @@ -289,7 +289,7 @@ func (x xlMetaInlineData) json() ([]byte, error) { const ( xlHeaderVersion = 2 - xlMetaVersion = 1 + xlMetaVersion = 3 ) func decodeXLHeaders(buf []byte) (versions int, b []byte, e error) { @@ -352,33 +352,33 @@ func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, e error) { zb0001, bts, e = msgp.ReadArrayHeaderBytes(bts) if e != nil { e = msgp.WrapError(e) - return + return o, e } if zb0001 != 5 { e = msgp.ArrayError{Wanted: 5, Got: zb0001} - return + return o, e } bts, e = msgp.ReadExactBytes(bts, (z.VersionID)[:]) if e != nil { e = msgp.WrapError(e, "VersionID") - return + return o, e } z.ModTime, bts, e = msgp.ReadInt64Bytes(bts) if e != nil { e = msgp.WrapError(e, "ModTime") - return + return o, e } bts, e = msgp.ReadExactBytes(bts, (z.Signature)[:]) if e != nil { e = msgp.WrapError(e, "Signature") - return + return o, e } { var zb0002 uint8 zb0002, bts, e = msgp.ReadUint8Bytes(bts) if e != nil { e = msgp.WrapError(e, "Type") - return + return o, e } z.Type = zb0002 } @@ -387,12 +387,12 @@ func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, e error) { zb0003, bts, e = msgp.ReadUint8Bytes(bts) if e != nil { e = msgp.WrapError(e, "Flags") - return + return o, e } z.Flags = zb0003 } o = bts - return + return o, e } func (z xlMetaV2VersionHeaderV2) MarshalJSON() (o []byte, e error) { diff --git a/docs/debugging/inspect/go.mod b/docs/debugging/inspect/go.mod index a999931a1f15c..a9e37e2f482a5 100644 --- a/docs/debugging/inspect/go.mod +++ b/docs/debugging/inspect/go.mod @@ -1,22 +1,25 @@ module github.com/minio/minio/docs/debugging/inspect -go 1.19 +go 1.23.0 + +toolchain go1.24.8 require ( - github.com/klauspost/compress v1.17.4 - github.com/minio/colorjson v1.0.6 - github.com/minio/madmin-go/v3 v3.0.36 + github.com/klauspost/compress v1.17.11 + github.com/klauspost/filepathx v1.1.1 + github.com/minio/colorjson v1.0.8 + github.com/minio/madmin-go/v3 v3.0.88 github.com/secure-io/sio-go v0.3.1 - github.com/tinylib/msgp v1.1.9 + github.com/tinylib/msgp v1.2.5 ) require ( - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/fatih/color v1.16.0 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/fatih/color v1.18.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/minio/pkg/v2 v2.0.6 // indirect - github.com/philhofer/fwd v1.1.2 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/sys v0.15.0 // indirect + github.com/minio/pkg/v3 v3.0.28 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect + golang.org/x/crypto v0.35.0 // indirect + golang.org/x/sys v0.30.0 // indirect ) diff --git a/docs/debugging/inspect/go.sum b/docs/debugging/inspect/go.sum index fb7e0db197721..7bdafc1efff3d 100644 --- a/docs/debugging/inspect/go.sum +++ b/docs/debugging/inspect/go.sum @@ -1,36 +1,36 @@ -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/filepathx v1.1.1 h1:201zvAsL1PhZvmXTP+QLer3AavWrO3U1NILWpniHK4w= +github.com/klauspost/filepathx v1.1.1/go.mod h1:XWxdp8rEw4gupPBrxrV5Q57dL/71xj0OgV1gKt2zTfU= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/minio/colorjson v1.0.6 h1:m7TUvpvt0u7FBmVIEQNIa0T4NBQlxrcMBp4wJKsg2Ik= -github.com/minio/colorjson v1.0.6/go.mod h1:LUXwS5ZGNb6Eh9f+t+3uJiowD3XsIWtsvTriUBeqgYs= -github.com/minio/madmin-go/v3 v3.0.36 h1:Ewu/Rt7WVSs9slWW+SZHRc5RPQdYAGIdNZnRr+gyN4k= -github.com/minio/madmin-go/v3 v3.0.36/go.mod h1:4QN2NftLSV7MdlT50dkrenOMmNVHluxTvlqJou3hte8= -github.com/minio/pkg/v2 v2.0.6 h1:n+PpbSMaJK1FfQkP55l1y0wj5Hi9R5w2DtGhxiGdP9I= -github.com/minio/pkg/v2 v2.0.6/go.mod h1:Z9Z/LzhTIxZ6zhPeW658vmLRilRek3zBOqNB9j+lxSY= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= +github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg= +github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w= +github.com/minio/madmin-go/v3 v3.0.88 h1:6AEPJItB65XVKmrpnuvDEO0bwNDeUwPY2cQUZzbGdT0= +github.com/minio/madmin-go/v3 v3.0.88/go.mod h1:pMLdj9OtN0CANNs5tdm6opvOlDFfj0WhbztboZAjRWE= +github.com/minio/pkg/v3 v3.0.28 h1:8tSuZnJbjc3C3DM2DEh4ZnSWjMZdccd679stk8sPD60= +github.com/minio/pkg/v3 v3.0.28/go.mod h1:mIaN552nu0D2jiSk5BQC8LB25f44ytbOBJCuLtksX7Q= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc= github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs= -github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= -github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/docs/debugging/inspect/main.go b/docs/debugging/inspect/main.go index 4b363110cd015..77a6b6f4b2550 100644 --- a/docs/debugging/inspect/main.go +++ b/docs/debugging/inspect/main.go @@ -24,12 +24,15 @@ import ( "crypto/x509" "encoding/json" "encoding/pem" + "errors" "flag" "fmt" "io" "os" "strings" "time" + + "github.com/klauspost/filepathx" ) var ( @@ -48,15 +51,34 @@ func main() { generateKeys() os.Exit(0) } - var privateKey []byte + var privateKeys [][]byte if *keyHex == "" { - if b, err := os.ReadFile(*privKeyPath); err == nil { - privateKey = b - fmt.Println("Using private key from", *privKeyPath) + // Attempt to load private key(s) + n := 1 + var base, ext string + base = *privKeyPath + if idx := strings.LastIndexByte(base, '.'); idx != -1 { + ext = base[idx:] + base = base[:idx] + } + for { + // Automatically read "file.ext", "file-2.ext", "file-3.ext"... + fn := base + ext + if n > 1 { + fn = fmt.Sprintf("%s-%d%s", base, n, ext) + } + + if b, err := os.ReadFile(fn); err == nil { + privateKeys = append(privateKeys, b) + fmt.Println("Added private key from", fn) + } else { + break + } + n++ } // Prompt for decryption key if no --key or --private-key are provided - if len(privateKey) == 0 { + if len(privateKeys) == 0 && !*stdin { reader := bufio.NewReader(os.Stdin) fmt.Print("Enter Decryption Key: ") @@ -67,7 +89,7 @@ func main() { } } - var inputFileName, outputFileName string + var inputs []string // Parse parameters switch { @@ -82,53 +104,72 @@ func main() { fatalErr(err) } fatalErr(json.Unmarshal(got, &input)) - inputFileName = input.File + inputs = []string{input.File} *keyHex = input.Key case len(flag.Args()) == 1: - inputFileName = flag.Args()[0] + var err error + inputs, err = filepathx.Glob(flag.Args()[0]) + fatalErr(err) + if len(inputs) == 0 { + fmt.Println("Usage: No input found") + } default: flag.Usage() fatalIf(true, "Only 1 file can be decrypted") os.Exit(1) } + for _, input := range inputs { + processFile(input, privateKeys) + } +} +func processFile(inputFileName string, privateKeys [][]byte) { // Calculate the output file name + var outputFileName string switch { case strings.HasSuffix(inputFileName, ".enc"): outputFileName = strings.TrimSuffix(inputFileName, ".enc") + ".zip" case strings.HasSuffix(inputFileName, ".zip"): outputFileName = strings.TrimSuffix(inputFileName, ".zip") + ".decrypted.zip" - } - - // Backup any already existing output file - _, err := os.Stat(outputFileName) - if err == nil { - err := os.Rename(outputFileName, outputFileName+"."+time.Now().Format("20060102150405")) - if err != nil { - fatalErr(err) - } + case strings.Contains(inputFileName, ".enc."): + outputFileName = strings.Replace(inputFileName, ".enc.", ".", 1) + ".zip" + default: + outputFileName = inputFileName + ".decrypted" } // Open the input and create the output file input, err := os.Open(inputFileName) fatalErr(err) defer input.Close() - output, err := os.Create(outputFileName) - fatalErr(err) // Decrypt the inspect data switch { case *keyHex != "": - err = extractInspectV1(*keyHex, input, output) - case len(privateKey) != 0: - err = extractInspectV2(privateKey, input, output) + // Backup any already existing output file + _, err := os.Stat(outputFileName) + if err == nil { + err := os.Rename(outputFileName, outputFileName+"."+time.Now().Format("20060102150405")) + if err != nil { + fatalErr(err) + } + } + output, err := os.Create(outputFileName) + fatalErr(err) + msg := fmt.Sprintf("output written to %s", outputFileName) + err = extractInspectV1(*keyHex, input, output, msg) + output.Close() + case len(privateKeys) != 0: + outputFileName := strings.TrimSuffix(outputFileName, ".zip") + err = extractInspectV2(privateKeys, input, outputFileName) } - output.Close() if err != nil { - os.Remove(outputFileName) + + var keep keepFileErr + if !errors.As(err, &keep) { + os.Remove(outputFileName) + } fatalErr(err) } - fmt.Println("output written to", outputFileName) // Export xl.meta to stdout if *export { diff --git a/docs/debugging/pprofgoparser/go.mod b/docs/debugging/pprofgoparser/go.mod index 5ddb69a01588d..1494a2c82912c 100644 --- a/docs/debugging/pprofgoparser/go.mod +++ b/docs/debugging/pprofgoparser/go.mod @@ -1,3 +1,5 @@ module github.com/minio/minio/docs/debugging/pprofgoparser -go 1.19 +go 1.21 + +toolchain go1.24.8 diff --git a/docs/debugging/reorder-disks/go.mod b/docs/debugging/reorder-disks/go.mod index 928ec84721b23..e22fcd9e48865 100644 --- a/docs/debugging/reorder-disks/go.mod +++ b/docs/debugging/reorder-disks/go.mod @@ -1,5 +1,7 @@ module github.com/minio/minio/docs/debugging/reorder-disks -go 1.19 +go 1.21 -require github.com/minio/pkg/v2 v2.0.6 +toolchain go1.24.8 + +require github.com/minio/pkg/v3 v3.0.1 diff --git a/docs/debugging/reorder-disks/go.sum b/docs/debugging/reorder-disks/go.sum index 2e1e7e24dd75f..0cdd474b0f8e9 100644 --- a/docs/debugging/reorder-disks/go.sum +++ b/docs/debugging/reorder-disks/go.sum @@ -1,2 +1,2 @@ -github.com/minio/pkg/v2 v2.0.6 h1:n+PpbSMaJK1FfQkP55l1y0wj5Hi9R5w2DtGhxiGdP9I= -github.com/minio/pkg/v2 v2.0.6/go.mod h1:Z9Z/LzhTIxZ6zhPeW658vmLRilRek3zBOqNB9j+lxSY= +github.com/minio/pkg/v3 v3.0.1 h1:qts6g9rYjAdeomRdwjnMc1IaQ6KbaJs3dwqBntXziaw= +github.com/minio/pkg/v3 v3.0.1/go.mod h1:53gkSUVHcfYoskOs5YAJ3D99nsd2SKru90rdE9whlXU= diff --git a/docs/debugging/reorder-disks/main.go b/docs/debugging/reorder-disks/main.go index 8e9a7618f5c08..5581ca8330872 100644 --- a/docs/debugging/reorder-disks/main.go +++ b/docs/debugging/reorder-disks/main.go @@ -30,7 +30,7 @@ import ( "strings" "syscall" - "github.com/minio/pkg/v2/ellipses" + "github.com/minio/pkg/v3/ellipses" ) type xl struct { diff --git a/docs/debugging/s3-check-md5/main.go b/docs/debugging/s3-check-md5/main.go index de1f1e302a21d..8670b78008e27 100644 --- a/docs/debugging/s3-check-md5/main.go +++ b/docs/debugging/s3-check-md5/main.go @@ -142,7 +142,7 @@ func main() { if versions { fpath += ":" + obj.VersionID } - return + return fpath } // List all objects from a bucket-name with a matching prefix. diff --git a/docs/debugging/s3-verify/go.mod b/docs/debugging/s3-verify/go.mod index 4febf7570233a..37426446d5c8e 100644 --- a/docs/debugging/s3-verify/go.mod +++ b/docs/debugging/s3-verify/go.mod @@ -1,24 +1,25 @@ module github.com/minio/minio/docs/debugging/s3-verify -go 1.19 +go 1.23.0 -require github.com/minio/minio-go/v7 v7.0.66 +toolchain go1.24.8 + +require github.com/minio/minio-go/v7 v7.0.83 require ( + github.com/davecgh/go-spew v1.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/google/uuid v1.5.0 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.4 // indirect - github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/goccy/go-json v0.10.4 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/rs/xid v1.6.0 // indirect + golang.org/x/crypto v0.36.0 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/docs/debugging/s3-verify/go.sum b/docs/debugging/s3-verify/go.sum index 61f3fb153ad5e..c8926fa12e8bd 100644 --- a/docs/debugging/s3-verify/go.sum +++ b/docs/debugging/s3-verify/go.sum @@ -1,51 +1,36 @@ -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4= -github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= -github.com/minio/minio-go/v7 v7.0.66/go.mod h1:DHAgmyQEGdW3Cif0UooKOyrT3Vxs82zNdV6tkKhRtbs= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/minio/minio-go/v7 v7.0.83 h1:W4Kokksvlz3OKf3OqIlzDNKd4MERlC2oN8YptwJ0+GA= +github.com/minio/minio-go/v7 v7.0.83/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/docs/debugging/xattr/go.mod b/docs/debugging/xattr/go.mod index a53fe331d7968..41b13f910e4a1 100644 --- a/docs/debugging/xattr/go.mod +++ b/docs/debugging/xattr/go.mod @@ -1,6 +1,8 @@ module github.com/minio/minio/docs/debugging/xattr -go 1.19 +go 1.21 + +toolchain go1.24.8 require ( github.com/olekukonko/tablewriter v0.0.5 diff --git a/docs/debugging/xl-meta/main.go b/docs/debugging/xl-meta/main.go index 39daed482846f..23c88d58038e3 100644 --- a/docs/debugging/xl-meta/main.go +++ b/docs/debugging/xl-meta/main.go @@ -19,6 +19,8 @@ package main import ( "bytes" + "crypto/md5" + "encoding/base64" "encoding/binary" "encoding/hex" "encoding/json" @@ -28,15 +30,20 @@ import ( "log" "os" "path/filepath" + "regexp" + "slices" "sort" + "strconv" "strings" "time" + "unicode/utf8" "github.com/google/uuid" "github.com/klauspost/compress/zip" "github.com/klauspost/filepathx" "github.com/klauspost/reedsolomon" "github.com/minio/cli" + "github.com/minio/highwayhash" "github.com/tinylib/msgp/msgp" ) @@ -62,9 +69,10 @@ FLAGS: {{range .VisibleFlags}}{{.}} {{end}} ` + //nolint:staticcheck + isPart := regexp.MustCompile("[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}/part\\.[0-9]+$") app.HideHelpCommand = true - app.Flags = []cli.Flag{ cli.BoolFlag{ Usage: "print each file as a separate line without formatting", @@ -83,6 +91,10 @@ FLAGS: Usage: "combine inline data", Name: "combine", }, + cli.BoolFlag{ + Usage: "combine inline data across versions when size matches", + Name: "xver", + }, } app.Action = func(c *cli.Context) error { @@ -92,9 +104,13 @@ FLAGS: } // file / version / file filemap := make(map[string]map[string]string) + foundData := make(map[string][]byte) + partDataToVerID := make(map[string][2]string) + var baseName string // versionID -> combineFiles := make(map[string][]string) decode := func(r io.Reader, file string) ([]byte, error) { + file = strings.ReplaceAll(file, ":", "_") b, err := io.ReadAll(r) if err != nil { return nil, err @@ -105,6 +121,7 @@ FLAGS: } filemap[file] = make(map[string]string) buf := bytes.NewBuffer(nil) + v0 := "" var data xlMetaInlineData switch minor { case 0: @@ -137,7 +154,7 @@ FLAGS: b = nbuf } - nVers, v, err := decodeXLHeaders(v) + hdr, v, err := decodeXLHeaders(v) if err != nil { return nil, err } @@ -146,10 +163,11 @@ FLAGS: Header json.RawMessage Metadata json.RawMessage } - versions := make([]version, nVers) - err = decodeVersions(v, nVers, func(idx int, hdr, meta []byte) error { + versions := make([]version, hdr.versions) + headerVer := hdr.headerVer + err = decodeVersions(v, hdr.versions, func(idx int, hdr, meta []byte) error { var header xlMetaV2VersionHeaderV2 - if _, err := header.UnmarshalMsg(hdr); err != nil { + if _, err := header.UnmarshalMsg(hdr, headerVer); err != nil { return err } b, err := header.MarshalJSON() @@ -167,18 +185,44 @@ FLAGS: } type erasureInfo struct { V2Obj *struct { - EcDist []int - EcIndex int - EcM int - EcN int + EcDist []int + EcIndex int + EcM int + EcN int + DDir []byte + PartNums []int + MetaSys struct { + Inline []byte `json:"x-minio-internal-inline-data"` + } } } var ei erasureInfo if err := json.Unmarshal(buf.Bytes(), &ei); err == nil && ei.V2Obj != nil { verID := uuid.UUID(header.VersionID).String() + if verID == "00000000-0000-0000-0000-000000000000" { + // If the version ID is all zeros, use the signature as version ID. + verID = fmt.Sprintf("null/%08x", header.Signature) + v0 = verID + } idx := ei.V2Obj.EcIndex filemap[file][verID] = fmt.Sprintf("%s/shard-%02d-of-%02d", verID, idx, ei.V2Obj.EcN+ei.V2Obj.EcM) filemap[file][verID+".json"] = buf.String() + for _, i := range ei.V2Obj.PartNums { + if len(ei.V2Obj.MetaSys.Inline) != 0 { + break + } + file := file + dataFile := fmt.Sprintf("%s%s/part.%d", strings.TrimSuffix(file, "xl.meta"), uuid.UUID(ei.V2Obj.DDir).String(), i) + if i > 1 { + file = fmt.Sprintf("%s/part.%d", file, i) + filemap[file] = make(map[string]string) + filemap[file][verID] = fmt.Sprintf("%s/part.%d/shard-%02d-of-%02d", verID, i, idx, ei.V2Obj.EcN+ei.V2Obj.EcM) + filemap[file][verID+".json"] = buf.String() + } + partDataToVerID[dataFile] = [2]string{file, verID} + } + } else if err != nil { + fmt.Println("Error:", err) } return nil }) @@ -197,7 +241,7 @@ FLAGS: } if c.Bool("data") { - b, err := data.json() + b, err := data.json(true) if err != nil { return nil, err } @@ -221,24 +265,42 @@ FLAGS: } }, file) } + if baseName == "" { + if strings.HasSuffix(file, "/xl.meta") { + baseName = strings.TrimSuffix(file, "/xl.meta") + if idx := strings.LastIndexByte(baseName, '/'); idx > 0 { + baseName = baseName[idx+1:] + } + } + } err := data.files(func(name string, data []byte) { fn := fmt.Sprintf("%s-%s.data", file, name) if c.Bool("combine") { + if name == "null" { + name = v0 + } + f := filemap[file][name] if f != "" { fn = f + ".data" - os.MkdirAll(filepath.Dir(fn), os.ModePerm) + err = os.MkdirAll(filepath.Dir(fn), os.ModePerm) + if err != nil { + fmt.Println("MkdirAll:", filepath.Dir(fn), err) + } err = os.WriteFile(fn+".json", []byte(filemap[file][name+".json"]), os.ModePerm) combineFiles[name] = append(combineFiles[name], fn) if err != nil { - fmt.Println("ERR:", err) + fmt.Println("WriteFile:", err) + } + err = os.WriteFile(filepath.Dir(fn)+"/filename.txt", []byte(file), os.ModePerm) + if err != nil { + fmt.Println("combine WriteFile:", err) } - _ = os.WriteFile(filepath.Dir(fn)+"/filename.txt", []byte(file), os.ModePerm) } } err = os.WriteFile(fn, data, os.ModePerm) if err != nil { - fmt.Println(err) + fmt.Println("WriteFile:", err) } }) if err != nil { @@ -248,7 +310,7 @@ FLAGS: if ndjson { return buf.Bytes(), nil } - var msi map[string]interface{} + var msi map[string]any dec := json.NewDecoder(buf) // Use number to preserve integers. dec.UseNumber() @@ -309,13 +371,16 @@ FLAGS: defer f.Close() r = f } - if strings.HasSuffix(file, ".zip") { - zr, err := zip.NewReader(r.(io.ReaderAt), sz) + if ra, ok := r.(io.ReaderAt); ok && strings.HasSuffix(file, ".zip") { + zr, err := zip.NewReader(ra, sz) if err != nil { return err } for _, file := range zr.File { - if !file.FileInfo().IsDir() && strings.HasSuffix(file.Name, "xl.meta") { + if file.FileInfo().IsDir() { + continue + } + if strings.HasSuffix(file.Name, "xl.meta") { r, err := file.Open() if err != nil { return err @@ -326,13 +391,25 @@ FLAGS: if err != nil { return err } - var tmp map[string]interface{} + var tmp map[string]any if err := json.Unmarshal(b2, &tmp); err == nil { if b3, err := json.Marshal(tmp); err == nil { b2 = b3 } } toPrint = append(toPrint, fmt.Sprintf("\t%s: %s", string(b), string(b2))) + } else if c.Bool("combine") && isPart.MatchString(file.Name) { + // name := isPart.FindString(file.Name) + name := strings.ReplaceAll(file.Name, ":", "_") + r, err := file.Open() + if err != nil { + return err + } + all, err := io.ReadAll(r) + if err != nil { + return err + } + foundData[name] = all } } } else { @@ -357,12 +434,57 @@ FLAGS: } sort.Strings(toPrint) fmt.Printf("{\n%s\n}\n", strings.Join(toPrint, ",\n")) + if c.Bool("combine") { + for partName, data := range foundData { + if verid := partDataToVerID[partName]; verid != [2]string{} { + file := verid[0] + name := verid[1] + f := filemap[file][name] + fn := fmt.Sprintf("%s-%s.data", file, name) + if f != "" { + fn = f + ".data" + err := os.MkdirAll(filepath.Dir(fn), os.ModePerm) + if err != nil { + fmt.Println("MkdirAll:", filepath.Dir(fn), err) + } + err = os.WriteFile(fn+".json", []byte(filemap[file][name+".json"]), os.ModePerm) + combineFiles[name] = append(combineFiles[name], fn) + if err != nil { + fmt.Println("WriteFile:", err) + } + err = os.WriteFile(filepath.Dir(fn)+"/filename.txt", []byte(file), os.ModePerm) + if err != nil { + fmt.Println("combine WriteFile:", err) + } + fmt.Println("Remapped", partName, "to", fn) + } + delete(partDataToVerID, partName) + err := os.WriteFile(fn, data, os.ModePerm) + if err != nil { + fmt.Println("WriteFile:", err) + } + } + } + if len(partDataToVerID) > 0 { + fmt.Println("MISSING PART FILES:") + for k := range partDataToVerID { + fmt.Println(k) + } + fmt.Println("END MISSING PART FILES") + } + } if len(combineFiles) > 0 { - for k, v := range combineFiles { - if err := combine(v, k); err != nil { + if c.Bool("xver") { + if err := combineCrossVer(combineFiles, baseName); err != nil { fmt.Println("ERROR:", err) } + } else { + for k, v := range combineFiles { + if err := combine(v, k+"-"+baseName); err != nil { + fmt.Println("ERROR:", err) + } + } } } @@ -444,21 +566,20 @@ func (x xlMetaInlineData) versionOK() bool { return x[0] > 0 && x[0] <= xlMetaInlineDataVer } -func (x xlMetaInlineData) json() ([]byte, error) { +func (x xlMetaInlineData) json(value bool) ([]byte, error) { if len(x) == 0 { return []byte("{}"), nil } if !x.versionOK() { return nil, errors.New("xlMetaInlineData: unknown version") } - sz, buf, err := msgp.ReadMapHeaderBytes(x.afterVersion()) if err != nil { return nil, err } res := []byte("{") - for i := uint32(0); i < sz; i++ { + for i := range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -475,7 +596,34 @@ func (x xlMetaInlineData) json() ([]byte, error) { if i > 0 { res = append(res, ',') } - s := fmt.Sprintf(`"%s":%d`, string(key), len(val)) + s := fmt.Sprintf(`"%s": {"bytes": %d`, string(key), len(val)) + // Check bitrot... We should only ever have one block... + if len(val) >= 32 { + want := val[:32] + data := val[32:] + const magicHighwayHash256Key = "\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0" + + hh, _ := highwayhash.New([]byte(magicHighwayHash256Key)) + hh.Write(data) + got := hh.Sum(nil) + if bytes.Equal(want, got) { + s += ", \"bitrot_valid\": true" + } else { + s += ", \"bitrot_valid\": false" + } + if value { + if utf8.Valid(data) { + // Encode as JSON string. + b, err := json.Marshal(string(data)) + if err == nil { + s += `, "data_string": ` + string(b) + } + } + // Base64 encode. + s += `, "data_base64": "` + base64.StdEncoding.EncodeToString(data) + `"` + } + s += "}" + } res = append(res, []byte(s)...) } res = append(res, '}') @@ -496,7 +644,7 @@ func (x xlMetaInlineData) files(fn func(name string, data []byte)) error { return err } - for i := uint32(0); i < sz; i++ { + for i := range sz { var key, val []byte key, buf, err = msgp.ReadMapKeyZC(buf) if err != nil { @@ -517,33 +665,38 @@ func (x xlMetaInlineData) files(fn func(name string, data []byte)) error { } const ( - xlHeaderVersion = 2 - xlMetaVersion = 2 + xlHeaderVersion = 3 + xlMetaVersion = 3 ) -func decodeXLHeaders(buf []byte) (versions int, b []byte, err error) { - hdrVer, buf, err := msgp.ReadUintBytes(buf) +type xlHeaders struct { + versions int + headerVer, metaVer uint +} + +func decodeXLHeaders(buf []byte) (x xlHeaders, b []byte, err error) { + x.headerVer, buf, err = msgp.ReadUintBytes(buf) if err != nil { - return 0, buf, err + return x, buf, err } - metaVer, buf, err := msgp.ReadUintBytes(buf) + x.metaVer, buf, err = msgp.ReadUintBytes(buf) if err != nil { - return 0, buf, err + return x, buf, err } - if hdrVer > xlHeaderVersion { - return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", metaVer) + if x.headerVer > xlHeaderVersion { + return x, buf, fmt.Errorf("decodeXLHeaders: Unknown xl header version %d", x.headerVer) } - if metaVer > xlMetaVersion { - return 0, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", metaVer) + if x.metaVer > xlMetaVersion { + return x, buf, fmt.Errorf("decodeXLHeaders: Unknown xl meta version %d", x.metaVer) } - versions, buf, err = msgp.ReadIntBytes(buf) + x.versions, buf, err = msgp.ReadIntBytes(buf) if err != nil { - return 0, buf, err + return x, buf, err } - if versions < 0 { - return 0, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", versions) + if x.versions < 0 { + return x, buf, fmt.Errorf("decodeXLHeaders: Negative version count %d", x.versions) } - return versions, buf, nil + return x, buf, nil } // decodeVersions will decode a number of versions from a buffer @@ -551,7 +704,7 @@ func decodeXLHeaders(buf []byte) (versions int, b []byte, err error) { // Any non-nil error is returned. func decodeVersions(buf []byte, versions int, fn func(idx int, hdr, meta []byte) error) (err error) { var tHdr, tMeta []byte // Zero copy bytes - for i := 0; i < versions; i++ { + for i := range versions { tHdr, buf, err = msgp.ReadBytesZC(buf) if err != nil { return err @@ -573,41 +726,46 @@ type xlMetaV2VersionHeaderV2 struct { Signature [4]byte Type uint8 Flags uint8 + EcN, EcM uint8 // Note that these will be 0/0 for non-v2 objects and older xl.meta } // UnmarshalMsg implements msgp.Unmarshaler -func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, err error) { +func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte, hdrVer uint) (o []byte, err error) { var zb0001 uint32 zb0001, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } - if zb0001 != 5 { - err = msgp.ArrayError{Wanted: 5, Got: zb0001} - return + want := uint32(5) + if hdrVer > 2 { + want += 2 + } + if zb0001 != want { + err = msgp.ArrayError{Wanted: want, Got: zb0001} + return o, err } bts, err = msgp.ReadExactBytes(bts, (z.VersionID)[:]) if err != nil { err = msgp.WrapError(err, "VersionID") - return + return o, err } z.ModTime, bts, err = msgp.ReadInt64Bytes(bts) if err != nil { err = msgp.WrapError(err, "ModTime") - return + return o, err } bts, err = msgp.ReadExactBytes(bts, (z.Signature)[:]) if err != nil { err = msgp.WrapError(err, "Signature") - return + return o, err } { var zb0002 uint8 zb0002, bts, err = msgp.ReadUint8Bytes(bts) if err != nil { err = msgp.WrapError(err, "Type") - return + return o, err } z.Type = zb0002 } @@ -616,12 +774,33 @@ func (z *xlMetaV2VersionHeaderV2) UnmarshalMsg(bts []byte) (o []byte, err error) zb0003, bts, err = msgp.ReadUint8Bytes(bts) if err != nil { err = msgp.WrapError(err, "Flags") - return + return o, err } z.Flags = zb0003 } + if hdrVer > 2 { + // Version 3 has EcM and EcN + { + var zb0004 uint8 + zb0004, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "EcN") + return o, err + } + z.EcN = zb0004 + } + { + var zb0005 uint8 + zb0005, bts, err = msgp.ReadUint8Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "EcM") + return o, err + } + z.EcM = zb0005 + } + } o = bts - return + return o, err } func (z xlMetaV2VersionHeaderV2) MarshalJSON() (o []byte, err error) { @@ -631,131 +810,191 @@ func (z xlMetaV2VersionHeaderV2) MarshalJSON() (o []byte, err error) { Signature string Type uint8 Flags uint8 + EcM, EcN uint8 // Note that these will be 0/0 for non-v2 objects and older xl.meta }{ VersionID: hex.EncodeToString(z.VersionID[:]), ModTime: time.Unix(0, z.ModTime), Signature: hex.EncodeToString(z.Signature[:]), Type: z.Type, Flags: z.Flags, + EcM: z.EcM, + EcN: z.EcN, } return json.Marshal(tmp) } -func combine(files []string, out string) error { +type mappedData struct { + mapped, filled []byte + size, shards, data, parity int + parityData map[int]map[int][]byte + blockOffset int // Offset in bytes to start of block. + blocks int // 0 = one block. + objSize, partSize int + wantMD5 string +} + +func readAndMap(files []string, partNum, blockNum int) (*mappedData, error) { + var m mappedData sort.Strings(files) - var size, shards, data, parity int - mapped := make([]byte, size) - filled := make([]byte, size) - parityData := make(map[int]map[int][]byte) - fmt.Printf("Attempting to combine version %q.\n", out) + m.parityData = make(map[int]map[int][]byte) for _, file := range files { - b, err := os.ReadFile(file) - if err != nil { - return err - } meta, err := os.ReadFile(file + ".json") if err != nil { - return err + return nil, err } type erasureInfo struct { V2Obj *struct { - EcDist []int - EcIndex int - EcM int - EcN int - Size int + EcDist []int + EcIndex int + EcM int + EcN int + Size int + EcBSize int + PartNums []int + PartSizes []int + MetaUsr struct { + Etag string `json:"etag"` + } } } var ei erasureInfo var idx int if err := json.Unmarshal(meta, &ei); err == nil && ei.V2Obj != nil { - if size == 0 { - size = ei.V2Obj.Size - mapped = make([]byte, size) - filled = make([]byte, size) + if m.objSize == 0 { + m.objSize = ei.V2Obj.Size } - data = ei.V2Obj.EcM - parity = ei.V2Obj.EcN - if shards == 0 { - shards = data + parity + m.data = ei.V2Obj.EcM + m.parity = ei.V2Obj.EcN + if len(ei.V2Obj.PartNums) == 1 && !strings.ContainsRune(ei.V2Obj.MetaUsr.Etag, '-') { + m.wantMD5 = ei.V2Obj.MetaUsr.Etag + } + if m.shards == 0 { + m.shards = m.data + m.parity } idx = ei.V2Obj.EcIndex - 1 - fmt.Println("Read shard", ei.V2Obj.EcIndex, "Data shards", data, "Parity", parity, fmt.Sprintf("(%s)", file)) - if ei.V2Obj.Size != size { - return fmt.Errorf("size mismatch. Meta size: %d", ei.V2Obj.Size) + fmt.Println("Read shard", ei.V2Obj.EcIndex, fmt.Sprintf("(%s)", file)) + if ei.V2Obj.Size != m.objSize { + return nil, fmt.Errorf("size mismatch. Meta size: %d, Prev: %d", ei.V2Obj.Size, m.objSize) + } + for i, s := range ei.V2Obj.PartNums { + if s == partNum { + m.size = ei.V2Obj.PartSizes[i] + m.partSize = ei.V2Obj.PartSizes[i] + break + } } } else { - return err + return nil, err + } + + offset := ei.V2Obj.EcBSize * blockNum + if offset >= m.size { + return nil, fmt.Errorf("block %d out of range. offset %d > size %d", blockNum, offset, m.size) + } + m.blockOffset = offset + m.blocks = (m.size + ei.V2Obj.EcBSize - 1) / ei.V2Obj.EcBSize + if m.blocks > 0 { + m.blocks-- + } + if blockNum < m.blocks { + m.size = ei.V2Obj.EcBSize + } else { + m.size -= offset + } + + b, err := os.ReadFile(file) + if err != nil { + return nil, err } if len(b) < 32 { - return fmt.Errorf("file %s too short", file) + return nil, fmt.Errorf("file %s too short", file) + } + + // Extract block data. + ssz := shardSize(ei.V2Obj.EcBSize, ei.V2Obj.EcM) + b, err = bitrot(b, blockNum*ssz, ssz) + if err != nil { + return nil, err + } + + if m.mapped == nil { + m.mapped = make([]byte, m.size) + m.filled = make([]byte, m.size) } - // Trim hash. Fine for inline data, since only one block. - b = b[32:] - set := parityData[data] + set := m.parityData[m.data] if set == nil { set = make(map[int][]byte) } set[idx] = b - parityData[data] = set + m.parityData[m.data] = set // Combine start := len(b) * idx - if start >= len(mapped) { + if start >= len(m.mapped) { continue } - copy(mapped[start:], b) + fmt.Println("Block data size:", m.size, "Shard size", ssz, "Got Shard:", len(b), "Bitrot ok", "Start", start, "End", start+len(b)) + copy(m.mapped[start:], b) for j := range b { - if j+start >= len(filled) { + if j+start >= len(m.filled) { break } - filled[j+start] = 1 + m.filled[j+start] = 1 } } + return &m, nil +} +func combine(files []string, out string) error { + fmt.Printf("Attempting to combine version %q.\n", out) + m, err := readAndMap(files, 1, 0) + if err != nil { + return err + } + if m.blocks > 0 { + // TODO: Support multiple blocks. For now use -xver. + return fmt.Errorf("multiple blocks found, only one block supported. Try with -xver") + } lastValid := 0 missing := 0 - for i := range filled { - if filled[i] == 1 { + for i := range m.filled { + if m.filled[i] == 1 { lastValid = i } else { missing++ } } - if missing > 0 && len(parityData) > 0 { + if missing > 0 && len(m.parityData) > 0 { fmt.Println("Attempting to reconstruct using parity sets:") - for k, v := range parityData { + for k, v := range m.parityData { if missing == 0 { break } - fmt.Println("* Setup: Data shards:", k, "- Parity blocks:", len(v)) - rs, err := reedsolomon.New(k, shards-k) + fmt.Println("* Setup: Data shards:", k, "- Parity blocks:", m.shards-k) + rs, err := reedsolomon.New(k, m.shards-k) if err != nil { return err } - split, err := rs.Split(mapped) + split, err := rs.Split(m.mapped) if err != nil { return err } - splitFilled, err := rs.Split(filled) + splitFilled, err := rs.Split(m.filled) if err != nil { return err } ok := len(splitFilled) for i, sh := range splitFilled { - for _, v := range sh { - if v == 0 { - split[i] = nil - ok-- - break - } + if slices.Contains(sh, 0) { + split[i] = nil + ok-- } } hasParity := 0 for idx, sh := range v { split[idx] = sh - if idx >= k && len(v) > 0 { + if idx >= k && len(sh) > 0 { hasParity++ } } @@ -765,9 +1004,9 @@ func combine(files []string, out string) error { fmt.Println("Could reconstruct completely") for i, data := range split[:k] { start := i * len(data) - copy(mapped[start:], data) + copy(m.mapped[start:], data) } - lastValid = size - 1 + lastValid = m.size - 1 missing = 0 } else { fmt.Println("Could NOT reconstruct:", err) @@ -778,16 +1017,550 @@ func combine(files []string, out string) error { return errors.New("no valid data found") } if missing > 0 { - out += ".truncated" + fmt.Println(missing, "bytes missing. Truncating", len(m.filled)-lastValid-1, "from end.") + out += ".incomplete" } else { + fmt.Println("No bytes missing.") out += ".complete" } - fmt.Println(missing, "bytes missing. Truncating", len(filled)-lastValid-1, "from end.") - mapped = mapped[:lastValid+1] - err := os.WriteFile(out, mapped, os.ModePerm) + m.mapped = m.mapped[:lastValid+1] + err = os.WriteFile(out, m.mapped, os.ModePerm) if err != nil { return err } fmt.Println("Wrote output to", out) return nil } + +func combineCrossVer(all map[string][]string, baseName string) error { + names := make([][]string, 0) + /// part, verID, file + files := make([]map[string][]string, 0) + partNums := make(map[int]int) + for k, v := range all { + for _, file := range v { + part := getPartNum(file) + partIdx, ok := partNums[part] + if !ok { + partIdx = len(names) + partNums[part] = partIdx + names = append(names, nil) + files = append(files, make(map[string][]string)) + } + names[partIdx] = append(names[partIdx], k) + files[partIdx][k] = append(files[partIdx][k], file) + } + } + if len(files) == 0 { + return nil + } + for part, partIdx := range partNums { + if len(files[partIdx]) == 0 { + continue + } + var wantMD5 string + exportedSizes := make(map[int]bool) + // block -> data + combineSharedBlocks := make(map[int][]byte) + combineFilledBlocks := make(map[int][]byte) + nextFile: + for key, file := range files[partIdx] { + fmt.Println("Reading base version", file[0], "part", part) + var combined []byte + var missingAll int + var lastValidAll int + + attempt := 0 + for block := 0; ; block++ { + combineFilled := combineFilledBlocks[block] + combineShared := combineSharedBlocks[block] + nextAttempt: + fmt.Printf("Block %d, Base version %q. Part %d. Files %d\n", block+1, key, part, len(file)) + m, err := readAndMap(file, part, block) + if err != nil { + return err + } + if exportedSizes[m.objSize] { + fmt.Println("Skipping version", key, "as it has already been exported.") + continue nextFile + } + addedFiles := 0 + compareFile: + for otherKey, other := range files[partIdx] { + addedFiles++ + if attempt > 0 && len(m.filled) == len(combineFilled) { + fmt.Println("Merging previous global data") + filled := 0 + missing := 0 + for i, v := range combineFilled { + if v == 1 { + m.filled[i] = 1 + m.mapped[i] = combineShared[i] + filled++ + } else { + missing++ + } + } + fmt.Println("Missing", missing, "bytes. Filled", filled, "bytes.") + break + } + if key == otherKey { + continue + } + + otherPart := getPartNum(other[0]) + if part != otherPart { + fmt.Println("part ", part, " != other part", otherPart, other[0]) + continue + } + // fmt.Println("part ", part, "other part", otherPart, other[0]) + fmt.Printf("Reading version %q Part %d.\n", otherKey, otherPart) + // os.Exit(0) + otherM, err := readAndMap(other, part, block) + if err != nil { + fmt.Println(err) + continue + } + if m.objSize != otherM.objSize { + continue + } + + // If data+parity matches, combine. + if m.parity == otherM.parity && m.data == otherM.data { + for k, v := range m.parityData { + if otherM.parityData[k] == nil { + continue + } + for i, data := range v { + if data != nil || otherM.parityData[k][i] == nil { + continue + } + m.parityData[k][i] = otherM.parityData[k][i] + } + } + } + + var ok int + for i, filled := range otherM.filled[:m.size] { + if filled == 1 && m.filled[i] == 1 { + if m.mapped[i] != otherM.mapped[i] { + fmt.Println("Data mismatch at byte", i, "- Disregarding version", otherKey) + continue compareFile + } + ok++ + } + } + + fmt.Printf("Data overlaps (%d bytes). Combining with %q.\n", ok, otherKey) + for i := range otherM.filled { + if otherM.filled[i] == 1 { + m.filled[i] = 1 + m.mapped[i] = otherM.mapped[i] + } + } + } + + lastValid := 0 + missing := 0 + for i := range m.filled { + if m.filled[i] == 1 { + lastValid = i + } else { + missing++ + } + } + if missing == 0 && len(m.parityData) == 1 { + k := 0 + var parityData map[int][]byte + for par, pdata := range m.parityData { + k = par + parityData = pdata + } + if k > 0 { + rs, err := reedsolomon.New(k, m.shards-k) + if err != nil { + return err + } + + splitData, err := rs.Split(m.mapped) + if err != nil { + return err + } + // Do separate encode, verify we get the same result. + err = rs.Encode(splitData) + if err != nil { + return err + } + misMatches := 0 + for idx, sh := range parityData { + calculated := splitData[idx] + if !bytes.Equal(calculated, sh) { + off := 0 + for i, v := range sh { + if v != calculated[i] { + off = i + break + } + } + calculated := calculated[off:] + inFile := sh[off:] + extra := "" + if len(calculated) != len(inFile) { + fmt.Println("SIZE MISMATCH", len(calculated), len(inFile)) + } else if len(calculated) > 10 { + calculated = calculated[:10] + inFile = inFile[:10] + extra = "..." + } + a := hex.EncodeToString(calculated) + extra + b := hex.EncodeToString(inFile) + extra + fmt.Println("MISMATCH in parity shard", idx+1, "at offset", off, "calculated:", a, "found:", b) + misMatches++ + } + } + if misMatches == 0 { + fmt.Println(m.shards-k, "erasure code shards verified.") + } + } + } + if missing > 0 && len(m.parityData) > 0 { + fmt.Println("Attempting to reconstruct using parity sets:") + for k, v := range m.parityData { + if missing == 0 { + break + } + fmt.Println("* Setup: Data shards:", k, "- Parity blocks:", m.shards-k) + rs, err := reedsolomon.New(k, m.shards-k) + if err != nil { + return err + } + splitData, err := rs.Split(m.mapped) + if err != nil { + return err + } + splitFilled, err := rs.Split(m.filled) + if err != nil { + return err + } + // Fill padding... + padding := len(splitFilled[0])*k - len(m.filled) + for i := range padding { + arr := splitFilled[k-1] + arr[len(arr)-i-1] = 1 + } + + hasParity := 0 + parityOK := make([]bool, m.shards) + for idx, sh := range v { + splitData[idx] = sh + if idx >= k && len(sh) > 0 { + parityOK[idx] = true + hasParity++ + for i := range splitFilled[idx] { + splitFilled[idx][i] = 1 + } + } + } + + splitDataShards := make([]byte, len(splitFilled[0])) + for _, sh := range splitFilled { + for i, v := range sh { + splitDataShards[i] += v + } + } + var hist [256]int + for _, v := range splitDataShards { + hist[v]++ + } + + for _, v := range hist[m.data-hasParity : m.shards] { + if attempt > 0 { + break + } + if v == 0 { + continue + } + for i, v := range hist[:m.shards] { + if v > 0 { + if i < m.data { + fmt.Println("- Shards:", i, "of", m.data, "Bytes:", v, "Missing: ", v*(m.data-i+hasParity)) + } else { + fmt.Println("+ Shards:", i, "of", m.data, "Bytes:", v, "Recovering: ", v*(m.data-i+hasParity)) + } + } + } + fmt.Println("Attempting to reconstruct with partial shards") + offset := 0 + startOffset := 0 + shardConfig := make([]byte, k) + reconstructAbleConfig := false + shards := make([][]byte, m.shards) + for i := range shards { + shards[i] = make([]byte, 0, len(splitData[0])) + } + for offset < len(splitDataShards) { + newConfig := false + for shardIdx, shard := range splitFilled[:k] { + if shardConfig[shardIdx] != shard[offset] { + newConfig = true + break + } + } + if newConfig { + if offset > startOffset && reconstructAbleConfig { + reconPartial(shards, k, parityOK, splitData, startOffset, offset, rs, shardConfig, splitFilled) + } + // Update to new config and add current + valid := 0 + for shardIdx, shard := range splitFilled[:k] { + shardConfig[shardIdx] = shard[offset] + valid += int(shard[offset]) + if shard[offset] == 0 { + shards[shardIdx] = shards[shardIdx][:0] + } else { + shards[shardIdx] = append(shards[shardIdx][:0], splitData[shardIdx][offset]) + } + } + reconstructAbleConfig = valid >= m.data-hasParity && valid < m.data + startOffset = offset + offset++ + continue + } + for shardIdx, ok := range shardConfig { + if ok != 0 { + shards[shardIdx] = append(shards[shardIdx], splitData[shardIdx][offset]) + } + } + offset++ + } + if offset > startOffset && reconstructAbleConfig { + reconPartial(shards, k, parityOK, splitData, startOffset, offset, rs, shardConfig, splitFilled) + } + + var buf bytes.Buffer + if err := rs.Join(&buf, splitFilled, m.size); err == nil { + m.filled = buf.Bytes() + } + buf = bytes.Buffer{} + if err := rs.Join(&buf, splitData, m.size); err == nil { + m.mapped = buf.Bytes() + } + for i, v := range m.filled { + if v == 0 { + m.mapped[i] = 0 + } + } + break + } + ok := k + for i, sh := range splitFilled { + for j, v := range sh { + if v == 0 { + splitData[i] = nil + if i < k { + fmt.Println("Shard", i, "is missing data from offset", i*len(sh)+j) + ok-- + } + break + } + } + } + + missing = 0 + lastValid = 0 + for i := range m.filled { + if m.filled[i] == 1 { + lastValid = i + } else { + missing++ + } + } + fmt.Printf("Have %d complete remapped data shards and %d complete parity shards (%d bytes missing). ", ok, hasParity, missing) + + if err := rs.ReconstructData(splitData); err == nil { + fmt.Println("Could reconstruct completely.") + for i, data := range splitData[:k] { + start := i * len(data) + copy(m.mapped[start:], data) + } + lastValid = m.size - 1 + missing = 0 + attempt = 2 + wantMD5 = m.wantMD5 + } else { + fmt.Println("Could NOT reconstruct:", err, " - Need", m.data, "shards.") + if attempt == 0 { + if len(combineShared) == 0 { + combineShared = make([]byte, len(m.mapped)) + combineFilled = make([]byte, len(m.filled)) + } + for i := range m.filled { + if m.filled[i] == 1 && combineFilled[i] == 0 { + combineShared[i] = m.mapped[i] + combineFilled[i] = 1 + } + } + combineFilledBlocks[block] = combineFilled + combineSharedBlocks[block] = combineShared + fmt.Println("Retrying with merged data") + if addedFiles >= len(files[partIdx]) { + attempt++ + goto nextAttempt + } + } + } + } + } + if m.blockOffset != len(combined) { + return fmt.Errorf("Block offset mismatch. Expected %d got %d", m.blockOffset, len(combined)) + } + combined = append(combined, m.mapped[:m.size]...) + missingAll += missing + if lastValid > 0 { + lastValidAll = lastValid + m.blockOffset + } + if m.blocks == block { + if len(combined) != m.partSize { + fmt.Println("Combined size mismatch. Expected", m.partSize, "got", len(combined)) + } + fmt.Println("Reached block", block+1, "of", m.blocks+1, "for", key, "Done.") + break + } + } + if lastValidAll == 0 { + return errors.New("no valid data found") + } + out := fmt.Sprintf("%s-%s.%05d", key, baseName, part) + if len(files) == 1 { + out = fmt.Sprintf("%s-%s", key, baseName) + } + if missingAll > 0 { + out += ".incomplete" + fmt.Println(missingAll, "bytes missing.") + } else { + if wantMD5 != "" { + sum := md5.Sum(combined) + gotMD5 := hex.EncodeToString(sum[:]) + if gotMD5 != wantMD5 { + fmt.Println("MD5 mismatch. Expected", wantMD5, "got", gotMD5) + out += ".mismatch" + } else { + fmt.Println("MD5 verified.") + out = fmt.Sprintf("verified/%s", baseName) + } + } else { + out = fmt.Sprintf("complete/%s.%05d", baseName, part) + fmt.Println("No bytes missing.") + } + } + if missingAll == 0 { + exportedSizes[len(combined)] = true + } + err := os.MkdirAll(filepath.Dir(out), os.ModePerm) + if err != nil { + return err + } + err = os.WriteFile(out, combined, os.ModePerm) + if err != nil { + return err + } + fmt.Println("Wrote output to", out) + } + } + return nil +} + +func reconPartial(shards [][]byte, k int, parityOK []bool, splitData [][]byte, startOffset int, offset int, rs reedsolomon.Encoder, shardConfig []byte, splitFilled [][]byte) { + // Add parity + for i := range shards[k:] { + shards[i+k] = nil + if parityOK[i+k] { + shards[i+k] = splitData[i+k][startOffset:offset] + } + } + // Reconstruct with current config. + if err := rs.ReconstructData(shards); err != nil { + panic(fmt.Sprintln("Internal error, could NOT partially reconstruct:", err)) + } + // Copy reconstructed data back. + verified := 0 + reconstructed := 0 + for shardsIdx, ok := range shardConfig { + if ok == 0 { + copy(splitData[shardsIdx][startOffset:], shards[shardsIdx]) + for i := range shards[shardsIdx] { + if splitFilled[shardsIdx][startOffset+i] == 1 { + fmt.Println("Internal error: Found filled data at", startOffset+i) + } + splitFilled[shardsIdx][startOffset+i] = 1 + } + reconstructed += len(shards[shardsIdx]) + } else { + for i := range shards[shardsIdx] { + if splitFilled[shardsIdx][startOffset+i] == 0 { + fmt.Println("Internal error: Expected filled data at", startOffset+i) + } + if splitData[shardsIdx][startOffset+i] != shards[shardsIdx][i] { + fmt.Println("Internal error: Mismatch at", startOffset+i) + } + verified++ + } + } + } + fmt.Println("Reconstructed", reconstructed, "bytes and verified", verified, "bytes of partial shard with config", string(shardConfig)) +} + +// bitrot returns a shard beginning at startOffset after doing bitrot checks. +func bitrot(val []byte, startOffset, shardSize int) ([]byte, error) { + var res []byte + for len(val) >= 32 { + want := val[:32] + data := val[32:] + if len(data) > shardSize { + data = data[:shardSize] + } + + const magicHighwayHash256Key = "\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0" + + hh, _ := highwayhash.New([]byte(magicHighwayHash256Key)) + hh.Write(data) + if !bytes.Equal(want, hh.Sum(nil)) { + return res, fmt.Errorf("bitrot detected") + } + res = append(res, data...) + val = val[32+len(data):] + if len(res) > startOffset { + return res[startOffset:], nil + } + } + return res, fmt.Errorf("bitrot: data too short to get block. len(res)=%d, startOffset=%d", len(res), startOffset) +} + +// shardSize returns the shard size for a given block size and data blocks. +func shardSize(blockSize, dataBlocks int) (sz int) { + if dataBlocks == 0 { + // do nothing on invalid input + return sz + } + // Make denominator positive + if dataBlocks < 0 { + blockSize = -blockSize + dataBlocks = -dataBlocks + } + sz = blockSize / dataBlocks + if blockSize > 0 && blockSize%dataBlocks != 0 { + sz++ + } + return sz +} + +//nolint:staticcheck +var rePartNum = regexp.MustCompile("/part\\.([0-9]+)/") + +func getPartNum(s string) int { + if m := rePartNum.FindStringSubmatch(s); len(m) > 1 { + n, _ := strconv.Atoi(m[1]) + return n + } + return 1 +} diff --git a/docs/distributed/CONFIG.md b/docs/distributed/CONFIG.md index 0417070b34136..bb029dda2d0ef 100644 --- a/docs/distributed/CONFIG.md +++ b/docs/distributed/CONFIG.md @@ -15,34 +15,57 @@ minio server --config config.yaml Lets you start MinIO server with all inputs to start MinIO server provided via this configuration file, once the configuration file is provided all other pre-existing values on disk for configuration are overridden by the new values set in this configuration file. Following is an example YAML configuration structure. -``` -version: v1 -address: ':9000' -rootUser: 'minioadmin' -rootPassword: 'pBU94AGAY85e' -console-address: ':9001' -certs-dir: '/home/user/.minio/certs/' +```yaml +version: v2 +address: ":9000" +rootUser: "minioadmin" +rootPassword: "minioadmin" +console-address: ":9001" +certs-dir: "/home/user/.minio/certs/" pools: # Specify the nodes and drives with pools - - - - 'https://server-example-pool1:9000/mnt/disk{1...4}/' - - 'https://server{1...2}-pool1:9000/mnt/disk{1...4}/' - - 'https://server3-pool1:9000/mnt/disk{1...4}/' - - 'https://server4-pool1:9000/mnt/disk{1...4}/' - - - - 'https://server-example-pool2:9000/mnt/disk{1...4}/' - - 'https://server{1...2}-pool2:9000/mnt/disk{1...4}/' - - 'https://server3-pool2:9000/mnt/disk{1...4}/' - - 'https://server4-pool2:9000/mnt/disk{1...4}/' - -... + - args: + - "https://server-example-pool1:9000/mnt/disk{1...4}/" + - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/" + - "https://server3-pool1:9000/mnt/disk{1...4}/" + - "https://server4-pool1:9000/mnt/disk{1...4}/" + - args: + - "https://server-example-pool2:9000/mnt/disk{1...4}/" + - "https://server{1...2}-pool2:9000/mnt/disk{1...4}/" + - "https://server3-pool2:9000/mnt/disk{1...4}/" + - "https://server4-pool2:9000/mnt/disk{1...4}/" + # more args options: ftp: # settings for MinIO to act as an ftp server - address: ':8021' - passive-port-range: '30000-40000' + address: ":8021" + passive-port-range: "30000-40000" sftp: # settings for MinIO to act as an sftp server - address: ':8022' - ssh-private-key: '/home/user/.ssh/id_rsa' + address: ":8022" + ssh-private-key: "/home/user/.ssh/id_rsa" +``` + +If you are using the config `v1` YAML you should migrate your `pools:` field values to the following format + +`v1` format +```yaml +pools: # Specify the nodes and drives with pools + - + - "https://server-example-pool1:9000/mnt/disk{1...4}/" + - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/" + - "https://server3-pool1:9000/mnt/disk{1...4}/" + - "https://server4-pool1:9000/mnt/disk{1...4}/" +``` + +to `v2` format + +```yaml +pools: + - args: + - "https://server-example-pool1:9000/mnt/disk{1...4}/" + - "https://server{1...2}-pool1:9000/mnt/disk{1...4}/" + - "https://server3-pool1:9000/mnt/disk{1...4}/" + - "https://server4-pool1:9000/mnt/disk{1...4}/" + set-drive-count: 4 # Advanced option, must be used under guidance from MinIO team. ``` ### Things to know @@ -51,7 +74,7 @@ options: - Each pool expects a minimum of 2 nodes per pool, and unique non-repeating hosts for each argument. - Each pool expects each host in this pool has the same number of drives specified as any other host. - Mixing `local-path` and `distributed-path` is not allowed, doing so would cause MinIO to refuse starting the server. -- Ellipses notation (e.g. `{1...10}`) or bracket notations are fully allowed (e.g. `{a,c,f}`) to have multiple entries in one line. +- Ellipses and bracket notation (e.g. `{1...10}`) are allowed. > NOTE: MinIO environmental variables still take precedence over the `config.yaml` file, however `config.yaml` is preferred over MinIO internal config KV settings via `mc admin config set alias/ `. @@ -65,3 +88,4 @@ In subsequent releases we are planning to extend this to provide things like and decommissioning to provide a functionality that smaller deployments care about. +- Fully allow bracket notation (e.g. `{a,c,f}`) to have multiple entries on one line. \ No newline at end of file diff --git a/docs/distributed/DESIGN.md b/docs/distributed/DESIGN.md index abe861842407c..4c663d4b9bf8d 100644 --- a/docs/distributed/DESIGN.md +++ b/docs/distributed/DESIGN.md @@ -41,7 +41,7 @@ Expansion of ellipses and choice of erasure sets based on this expansion is an a - Erasure coding used by MinIO is [Reed-Solomon](https://github.com/klauspost/reedsolomon) erasure coding scheme, which has a total shard maximum of 256 i.e 128 data and 128 parity. MinIO design goes beyond this limitation by doing some practical architecture choices. -- Erasure set is a single erasure coding unit within a MinIO deployment. An object is sharded within an erasure set. Erasure set size is automatically calculated based on the number of drives. MinIO supports unlimited number of drives but each erasure set can be upto 16 drives and a minimum of 2 drives. +- Erasure set is a single erasure coding unit within a MinIO deployment. An object is sharded within an erasure set. Erasure set size is automatically calculated based on the number of drives. MinIO supports unlimited number of drives but each erasure set can be up to 16 drives and a minimum of 2 drives. - We limited the number of drives to 16 for erasure set because, erasure code shards more than 16 can become chatty and do not have any performance advantages. Additionally since 16 drive erasure set gives you tolerance of 8 drives per object by default which is plenty in any practical scenario. diff --git a/docs/distributed/README.md b/docs/distributed/README.md index 9fe7e9edb0ff5..0ebf6cfcb1733 100644 --- a/docs/distributed/README.md +++ b/docs/distributed/README.md @@ -8,7 +8,7 @@ MinIO in distributed mode can help you setup a highly-available storage system w ### Data protection -Distributed MinIO provides protection against multiple node/drive failures and [bit rot](https://github.com/minio/minio/blob/master/docs/erasure/README.md#what-is-bit-rot-protection) using [erasure code](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html). As the minimum drives required for distributed MinIO is 2 (same as minimum drives required for erasure coding), erasure code automatically kicks in as you launch distributed MinIO. +Distributed MinIO provides protection against multiple node/drive failures and [bit rot](https://github.com/minio/minio/blob/master/docs/erasure/README.md#what-is-bit-rot-protection) using [erasure code](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html). As the minimum drives required for distributed MinIO is 2 (same as minimum drives required for erasure coding), erasure code automatically kicks in as you launch distributed MinIO. If one or more drives are offline at the start of a PutObject or NewMultipartUpload operation the object will have additional data protection bits added automatically to provide additional safety for these objects. @@ -38,11 +38,11 @@ Install MinIO either on Kubernetes or Distributed Linux. Install MinIO on Kubernetes: -- [MinIO Quickstart Guide for Kubernetes](https://min.io/docs/minio/kubernetes/upstream/index.html#quickstart-for-kubernetes). -- [Deploy a Tenant from the MinIO Operator](https://min.io/docs/minio/kubernetes/upstream/operations/install-deploy-manage/deploy-minio-tenant.html) +- [MinIO Quickstart Guide for Kubernetes](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html). +- [Deploy a Tenant from the MinIO Operator](https://docs.min.io/community/minio-object-store/operations/deployments/k8s-deploy-minio-tenant-on-kubernetes.html) Install Distributed MinIO on Linux: -- [Deploy Distributed MinIO on Linux](https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html#deploy-distributed-minio) +- [Deploy Distributed MinIO on Linux](https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html#deploy-distributed-minio) ### 2. Run distributed MinIO @@ -98,12 +98,12 @@ Now the server has expanded total storage by _(newly_added_servers\*m)_ more dri ## 3. Test your setup -To test this setup, access the MinIO server via browser or [`mc`](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart). +To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart). ## Explore Further -- [MinIO Erasure Code QuickStart Guide](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/distributed/decom-compressed-sse-s3.sh b/docs/distributed/decom-compressed-sse-s3.sh index 5f0bede1d974a..f8aba098616bb 100755 --- a/docs/distributed/decom-compressed-sse-s3.sh +++ b/docs/distributed/decom-compressed-sse-s3.sh @@ -24,7 +24,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" (minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & pid=$! -sleep 30 +./mc ready myminio ./mc admin user add myminio/ minio123 minio123 ./mc admin user add myminio/ minio12345 minio12345 @@ -59,6 +59,7 @@ pid_1=$! pid_2=$! sleep 30 +./mc ready myminio expanded_user_count=$(./mc admin user list myminio/ | wc -l) expanded_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -100,10 +101,10 @@ sleep 5 (minio server --address ":9001" http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/removed.log) & pid=$! -sleep 30 - export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9001/" +./mc ready myminio + decom_user_count=$(./mc admin user list myminio/ | wc -l) decom_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -147,12 +148,6 @@ if [ $ret -ne 0 ]; then exit 1 fi -( - cd ./docs/debugging/s3-check-md5 - go install -v -) - -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket bucket2 -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned +./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned kill $pid diff --git a/docs/distributed/decom-encrypted-kes.sh b/docs/distributed/decom-encrypted-kes.sh new file mode 100755 index 0000000000000..836a22b5bd624 --- /dev/null +++ b/docs/distributed/decom-encrypted-kes.sh @@ -0,0 +1,245 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +pkill minio +pkill kes +rm -rf /tmp/xl + +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x mc +fi + +if [ ! -f ./kes ]; then + wget --quiet -O kes https://github.com/minio/kes/releases/latest/download/kes-linux-amd64 && + chmod +x kes +fi + +if ! openssl version &>/dev/null; then + apt install openssl || sudo apt install opensssl +fi + +# Start KES Server +(./kes server --dev 2>&1 >kes-server.log) & +kes_pid=$! +sleep 5s +API_KEY=$(grep "API Key" /dev/null 1>public.crt) + +export CI=true +export MINIO_KMS_KES_ENDPOINT=https://127.0.0.1:7373 +export MINIO_KMS_KES_API_KEY="${API_KEY}" +export MINIO_KMS_KES_KEY_NAME=minio-default-key +export MINIO_KMS_KES_CAPATH=public.crt +export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" + +(minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & +pid=$! + +./mc ready myminio + +./mc admin user add myminio/ minio123 minio123 +./mc admin user add myminio/ minio12345 minio12345 + +./mc admin policy create myminio/ rw ./docs/distributed/rw.json +./mc admin policy create myminio/ lake ./docs/distributed/rw.json + +./mc admin policy attach myminio/ rw --user=minio123 +./mc admin policy attach myminio/ lake --user=minio12345 + +./mc mb -l myminio/versioned +./mc mb -l myminio/versioned-1 + +./mc encrypt set sse-s3 myminio/versioned +./mc encrypt set sse-kms minio-default-key myminio/versioned-1 + +./mc mirror internal myminio/versioned/ --quiet >/dev/null +./mc mirror internal myminio/versioned-1/ --quiet >/dev/null + +## Soft delete (creates delete markers) +./mc rm -r --force myminio/versioned >/dev/null +./mc rm -r --force myminio/versioned-1 >/dev/null + +## mirror again to create another set of version on top +./mc mirror internal myminio/versioned/ --quiet >/dev/null +./mc mirror internal myminio/versioned-1/ --quiet >/dev/null + +expected_checksum=$(./mc cat internal/dsync/drwmutex.go | md5sum) + +user_count=$(./mc admin user list myminio/ | wc -l) +policy_count=$(./mc admin policy list myminio/ | wc -l) + +kill $pid + +(minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded_1.log) & +pid_1=$! + +(minio server --address ":9001" http://localhost:9000/tmp/xl/{1...10}/disk{0...1} http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded_2.log) & +pid_2=$! + +./mc ready myminio + +expanded_user_count=$(./mc admin user list myminio/ | wc -l) +expanded_policy_count=$(./mc admin policy list myminio/ | wc -l) + +if [ "$user_count" -ne "$expanded_user_count" ]; then + echo "BUG: original user count differs from expanded setup" + exit 1 +fi + +if [ "$policy_count" -ne "$expanded_policy_count" ]; then + echo "BUG: original policy count differs from expanded setup" + exit 1 +fi + +./mc version info myminio/versioned | grep -q "versioning is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "expected versioning enabled after expansion" + exit 1 +fi + +./mc encrypt info myminio/versioned | grep -q "Auto encryption 'sse-s3' is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "expected encryption enabled after expansion" + exit 1 +fi + +./mc version info myminio/versioned-1 | grep -q "versioning is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "expected versioning enabled after expansion" + exit 1 +fi + +./mc encrypt info myminio/versioned-1 | grep -q "Auto encryption 'sse-kms' is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "expected encryption enabled after expansion" + exit 1 +fi + +./mc mirror cmd myminio/versioned/ --quiet >/dev/null +./mc mirror cmd myminio/versioned-1/ --quiet >/dev/null + +./mc ls -r myminio/versioned/ >expanded_ns.txt +./mc ls -r --versions myminio/versioned/ >expanded_ns_versions.txt +./mc ls -r myminio/versioned-1/ >expanded_ns_1.txt +./mc ls -r --versions myminio/versioned-1/ >expanded_ns_versions_1.txt + +./mc admin decom start myminio/ http://localhost:9000/tmp/xl/{1...10}/disk{0...1} + +until $(./mc admin decom status myminio/ | grep -q Complete); do + echo "waiting for decom to finish..." + sleep 1s +done + +kill $pid_1 +kill $pid_2 + +sleep 5s + +(minio server --address ":9001" http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/removed.log) & +pid=$! + +sleep 30s + +export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9001/" + +./mc ready myminio + +decom_user_count=$(./mc admin user list myminio/ | wc -l) +decom_policy_count=$(./mc admin policy list myminio/ | wc -l) + +if [ "$user_count" -ne "$decom_user_count" ]; then + echo "BUG: original user count differs after decommission" + exit 1 +fi + +if [ "$policy_count" -ne "$decom_policy_count" ]; then + echo "BUG: original policy count differs after decommission" + exit 1 +fi + +./mc version info myminio/versioned | grep -q "versioning is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected versioning enabled after decommission" + exit 1 +fi + +./mc encrypt info myminio/versioned | grep -q "Auto encryption 'sse-s3' is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected encryption enabled after expansion" + exit 1 +fi + +./mc version info myminio/versioned-1 | grep -q "versioning is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected versioning enabled after decommission" + exit 1 +fi + +./mc encrypt info myminio/versioned-1 | grep -q "Auto encryption 'sse-kms' is enabled" +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected encryption enabled after expansion" + exit 1 +fi + +got_checksum=$(./mc cat myminio/versioned/dsync/drwmutex.go | md5sum) +if [ "${expected_checksum}" != "${got_checksum}" ]; then + echo "BUG: decommission failed on encrypted objects: expected ${expected_checksum} got ${got_checksum}" + exit 1 +fi + +got_checksum_1=$(./mc cat myminio/versioned-1/dsync/drwmutex.go | md5sum) +if [ "${expected_checksum}" != "${got_checksum_1}" ]; then + echo "BUG: decommission failed on encrypted objects: expected ${expected_checksum} got ${got_checksum_1}" + exit 1 +fi + +./mc ls -r myminio/versioned >decommissioned_ns.txt +./mc ls -r --versions myminio/versioned >decommissioned_ns_versions.txt +./mc ls -r myminio/versioned-1 >decommissioned_ns_1.txt +./mc ls -r --versions myminio/versioned-1 >decommissioned_ns_versions_1.txt + +out=$(diff -qpruN expanded_ns.txt decommissioned_ns.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no missing entries after decommission: $out" + exit 1 +fi + +out=$(diff -qpruN expanded_ns_versions.txt decommissioned_ns_versions.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no missing entries after decommission: $out" + exit 1 +fi + +out1=$(diff -qpruN expanded_ns_1.txt decommissioned_ns_1.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no missing entries after decommission: $out1" + exit 1 +fi + +out1=$(diff -qpruN expanded_ns_versions_1.txt decommissioned_ns_versions_1.txt) +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: expected no missing entries after decommission: $out1" + exit 1 +fi + +./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned +./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned-1 + +kill $pid +kill $kes_pid diff --git a/docs/distributed/decom-encrypted-sse-s3.sh b/docs/distributed/decom-encrypted-sse-s3.sh index c28413280a5b9..651803488020a 100755 --- a/docs/distributed/decom-encrypted-sse-s3.sh +++ b/docs/distributed/decom-encrypted-sse-s3.sh @@ -19,7 +19,7 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" (minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & pid=$! -sleep 30 +./mc ready myminio ./mc admin user add myminio/ minio123 minio123 ./mc admin user add myminio/ minio12345 minio12345 @@ -55,7 +55,7 @@ pid_1=$! (minio server --address ":9001" http://localhost:9000/tmp/xl/{1...10}/disk{0...1} http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded_2.log) & pid_2=$! -sleep 30 +./mc ready myminio expanded_user_count=$(./mc admin user list myminio/ | wc -l) expanded_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -104,10 +104,10 @@ sleep 5 (minio server --address ":9001" http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/removed.log) & pid=$! -sleep 30 - export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9001/" +./mc ready myminio + decom_user_count=$(./mc admin user list myminio/ | wc -l) decom_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -158,12 +158,6 @@ if [ $ret -ne 0 ]; then exit 1 fi -( - cd ./docs/debugging/s3-check-md5 - go install -v -) - -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket bucket2 -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned +./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned kill $pid diff --git a/docs/distributed/decom-encrypted.sh b/docs/distributed/decom-encrypted.sh index 2eb2e1aa74d45..6d47537b7138e 100755 --- a/docs/distributed/decom-encrypted.sh +++ b/docs/distributed/decom-encrypted.sh @@ -19,10 +19,10 @@ export MINIO_KMS_SECRET_KEY=my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl (minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & pid=$! -sleep 30 - export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" +./mc ready myminio + ./mc admin user add myminio/ minio123 minio123 ./mc admin user add myminio/ minio12345 minio12345 @@ -55,7 +55,7 @@ pid_1=$! (minio server --address ":9001" http://localhost:9000/tmp/xl/{1...10}/disk{0...1} http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded_2.log) & pid_2=$! -sleep 30 +./mc ready myminio expanded_user_count=$(./mc admin user list myminio/ | wc -l) expanded_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -97,10 +97,10 @@ sleep 5 (minio server --address ":9001" http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/removed.log) & pid=$! -sleep 30 - export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9001/" +./mc ready myminio + decom_user_count=$(./mc admin user list myminio/ | wc -l) decom_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -144,12 +144,6 @@ if [ "${expected_checksum}" != "${got_checksum}" ]; then exit 1 fi -( - cd ./docs/debugging/s3-check-md5 - go install -v -) - -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket bucket2 -s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned +./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned kill $pid diff --git a/docs/distributed/decom.sh b/docs/distributed/decom.sh index 80c2b4e4170db..18d9ca4fbbb8d 100755 --- a/docs/distributed/decom.sh +++ b/docs/distributed/decom.sh @@ -19,10 +19,10 @@ export MINIO_SCANNER_SPEED=fastest (minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/tmp/decom.log) & pid=$! -sleep 30 - export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" +./mc ready myminio + ./mc admin user add myminio/ minio123 minio123 ./mc admin user add myminio/ minio12345 minio12345 @@ -49,10 +49,11 @@ policy_count=$(./mc admin policy list myminio/ | wc -l) ## create a warm tier instance (minio server /tmp/xltier/{1...4}/disk{0...1} --address :9002 2>&1 >/dev/null) & -sleep 30 export MC_HOST_mytier="http://minioadmin:minioadmin@localhost:9002/" +./mc ready myminio + ./mc mb -l myminio/bucket2 ./mc mb -l mytier/tiered @@ -77,7 +78,7 @@ pid_1=$! (minio server --address ":9001" http://localhost:9000/tmp/xl/{1...10}/disk{0...1} http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/expanded_2.log) & pid_2=$! -sleep 30 +./mc ready myminio expanded_user_count=$(./mc admin user list myminio/ | wc -l) expanded_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -124,10 +125,11 @@ sleep 5 (minio server --address ":9001" http://localhost:9001/tmp/xl/{11...30}/disk{0...3} 2>&1 >/tmp/removed.log) & pid=$! -sleep 30 - +sleep 5 export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9001/" +./mc ready myminio + decom_user_count=$(./mc admin user list myminio/ | wc -l) decom_policy_count=$(./mc admin policy list myminio/ | wc -l) @@ -210,11 +212,6 @@ if [ "${expected_checksum}" != "${got_checksum}" ]; then exit 1 fi -( - cd ./docs/debugging/s3-check-md5 - go install -v -) - s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket bucket2 s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://127.0.0.1:9001/ -bucket versioned diff --git a/docs/distributed/distributed-from-config-file.sh b/docs/distributed/distributed-from-config-file.sh index 68e68ae0c2afd..cea1717298315 100755 --- a/docs/distributed/distributed-from-config-file.sh +++ b/docs/distributed/distributed-from-config-file.sh @@ -22,6 +22,12 @@ export MINIO_CI_CD=1 if [ ! -f ./mc ]; then os="$(uname -s)" arch="$(uname -m)" + case "${arch}" in + "x86_64") + arch="amd64" + ;; + esac + wget -O mc https://dl.minio.io/client/mc/release/${os,,}-${arch,,}/mc && chmod +x mc fi @@ -58,8 +64,6 @@ site3_pid=$! minio server --config /tmp/minio.configfile.4 >/tmp/minio4_1.log 2>&1 & site4_pid=$! -sleep 30 - export MC_HOST_minio1=http://minr0otUS2r:pBU94AGAY85e@localhost:9001 export MC_HOST_minio3=http://minr0otUS2r:pBU94AGAY85e@localhost:9003 diff --git a/docs/distributed/iam-import-with-missing-entities.sh b/docs/distributed/iam-import-with-missing-entities.sh new file mode 100755 index 0000000000000..f6cc6ed4c4485 --- /dev/null +++ b/docs/distributed/iam-import-with-missing-entities.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +pkill minio +docker rm -f $(docker ps -aq) +rm -rf /tmp/ldap{1..4} +rm -rf /tmp/ldap1{1..4} + +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x mc +fi + +mc -v + +# Start LDAP server +echo "Copying docs/distributed/samples/bootstrap-complete.ldif => minio-iam-testing/ldap/50-bootstrap.ldif" +cp docs/distributed/samples/bootstrap-complete.ldif minio-iam-testing/ldap/50-bootstrap.ldif || exit 1 +cd ./minio-iam-testing +make docker-images +make docker-run +cd - + +export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:22000" +export MC_HOST_myminio1="http://minioadmin:minioadmin@localhost:24000" + +# Start MinIO instance +export CI=true +(minio server --address :22000 --console-address :10000 http://localhost:22000/tmp/ldap{1...4} 2>&1 >/dev/null) & +sleep 30 +./mc ready myminio + +./mc idp ldap add myminio server_addr=localhost:389 server_insecure=on \ + lookup_bind_dn=cn=admin,dc=min,dc=io lookup_bind_password=admin \ + user_dn_search_base_dn=dc=min,dc=io user_dn_search_filter="(uid=%s)" \ + group_search_base_dn=ou=swengg,dc=min,dc=io group_search_filter="(&(objectclass=groupOfNames)(member=%d))" + +./mc admin service restart myminio --json +./mc ready myminio +./mc admin cluster iam import myminio docs/distributed/samples/myminio-iam-info.zip +sleep 10 + +# Verify the list of users and service accounts from the import +./mc admin user list myminio +USER_COUNT=$(./mc admin user list myminio | wc -l) +if [ "${USER_COUNT}" -ne 2 ]; then + echo "BUG: Expected no of users: 2 Found: ${USER_COUNT}" + exit 1 +fi +./mc admin user svcacct list myminio "uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io" --json +SVCACCT_COUNT_1=$(./mc admin user svcacct list myminio "uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io" --json | jq '.accessKey' | wc -l) +if [ "${SVCACCT_COUNT_1}" -ne 2 ]; then + echo "BUG: Expected svcacct count for 'uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io': 2. Found: ${SVCACCT_COUNT_1}" + exit 1 +fi +./mc admin user svcacct list myminio "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" --json +SVCACCT_COUNT_2=$(./mc admin user svcacct list myminio "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" --json | jq '.accessKey' | wc -l) +if [ "${SVCACCT_COUNT_2}" -ne 2 ]; then + echo "BUG: Expected svcacct count for 'uid=dillon,ou=people,ou=swengg,dc=min,dc=io': 2. Found: ${SVCACCT_COUNT_2}" + exit 1 +fi + +# Kill MinIO and LDAP to start afresh with missing groups/DN +pkill minio +docker rm -f $(docker ps -aq) +rm -rf /tmp/ldap{1..4} + +# Deploy the LDAP config witg missing groups/DN +echo "Copying docs/distributed/samples/bootstrap-partial.ldif => minio-iam-testing/ldap/50-bootstrap.ldif" +cp docs/distributed/samples/bootstrap-partial.ldif minio-iam-testing/ldap/50-bootstrap.ldif || exit 1 +cd ./minio-iam-testing +make docker-images +make docker-run +cd - + +(minio server --address ":24000" --console-address :10000 http://localhost:24000/tmp/ldap1{1...4} 2>&1 >/dev/null) & +sleep 30 +./mc ready myminio1 + +./mc idp ldap add myminio1 server_addr=localhost:389 server_insecure=on \ + lookup_bind_dn=cn=admin,dc=min,dc=io lookup_bind_password=admin \ + user_dn_search_base_dn=dc=min,dc=io user_dn_search_filter="(uid=%s)" \ + group_search_base_dn=ou=hwengg,dc=min,dc=io group_search_filter="(&(objectclass=groupOfNames)(member=%d))" + +./mc admin service restart myminio1 --json +./mc ready myminio1 +./mc admin cluster iam import myminio1 docs/distributed/samples/myminio-iam-info.zip +sleep 10 + +# Verify the list of users and service accounts from the import +./mc admin user list myminio1 +USER_COUNT=$(./mc admin user list myminio1 | wc -l) +if [ "${USER_COUNT}" -ne 1 ]; then + echo "BUG: Expected no of users: 1 Found: ${USER_COUNT}" + exit 1 +fi +./mc admin user svcacct list myminio1 "uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io" --json +SVCACCT_COUNT_1=$(./mc admin user svcacct list myminio1 "uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io" --json | jq '.accessKey' | wc -l) +if [ "${SVCACCT_COUNT_1}" -ne 2 ]; then + echo "BUG: Expected svcacct count for 'uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io': 2. Found: ${SVCACCT_COUNT_1}" + exit 1 +fi +./mc admin user svcacct list myminio1 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" --json +SVCACCT_COUNT_2=$(./mc admin user svcacct list myminio1 "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" --json | jq '.accessKey' | wc -l) +if [ "${SVCACCT_COUNT_2}" -ne 0 ]; then + echo "BUG: Expected svcacct count for 'uid=dillon,ou=people,ou=swengg,dc=min,dc=io': 0. Found: ${SVCACCT_COUNT_2}" + exit 1 +fi + +# Finally kill running processes +pkill minio +docker rm -f $(docker ps -aq) diff --git a/docs/distributed/iam-import-with-openid.sh b/docs/distributed/iam-import-with-openid.sh new file mode 100755 index 0000000000000..ca703aa5e3cab --- /dev/null +++ b/docs/distributed/iam-import-with-openid.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +pkill minio +docker rm -f $(docker ps -aq) +rm -rf /tmp/openid{1..4} + +export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:22000" +# The service account used below is already present in iam configuration getting imported +export MC_HOST_myminio1="http://dillon-service-2:dillon-service-2@localhost:22000" + +# Start MinIO instance +export CI=true + +if [ ! -f ./mc ]; then + wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc && + chmod +x mc +fi + +mc -v + +# Start openid server +( + cd ./minio-iam-testing + make docker-images + make docker-run + cd - +) + +(minio server --address :22000 --console-address :10000 http://localhost:22000/tmp/openid{1...4} 2>&1 >/tmp/server.log) & +./mc ready myminio +./mc mb myminio/test-bucket +./mc cp /etc/hosts myminio/test-bucket + +./mc idp openid add myminio \ + config_url="http://localhost:5556/dex/.well-known/openid-configuration" \ + client_id="minio-client-app" \ + client_secret="minio-client-app-secret" \ + scopes="openid,groups,email,profile" \ + redirect_uri="http://127.0.0.1:10000/oauth_callback" \ + display_name="Login via dex1" \ + role_policy="consoleAdmin" + +./mc admin service restart myminio --json +./mc ready myminio +./mc admin cluster iam import myminio docs/distributed/samples/myminio-iam-info-openid.zip + +# Verify if buckets / objects accessible using service account +echo "Verifying buckets and objects access for the imported service account" + +./mc ls myminio1/ --json +BKT_COUNT=$(./mc ls myminio1/ --json | jq '.key' | wc -l) +if [ "${BKT_COUNT}" -ne 1 ]; then + echo "BUG: Expected no of bucket: 1, Found: ${BKT_COUNT}" + exit 1 +fi + +BKT_NAME=$(./mc ls myminio1/ --json | jq '.key' | sed 's/"//g' | sed 's\/\\g') +if [[ ${BKT_NAME} != "test-bucket" ]]; then + echo "BUG: Expected bucket: test-bucket, Found: ${BKT_NAME}" + exit 1 +fi + +./mc ls myminio1/test-bucket +OBJ_COUNT=$(./mc ls myminio1/test-bucket --json | jq '.key' | wc -l) +if [ "${OBJ_COUNT}" -ne 1 ]; then + echo "BUG: Expected no of objects: 1, Found: ${OBJ_COUNT}" + exit 1 +fi + +OBJ_NAME=$(./mc ls myminio1/test-bucket --json | jq '.key' | sed 's/"//g') +if [[ ${OBJ_NAME} != "hosts" ]]; then + echo "BUG: Expected object: hosts, Found: ${BKT_NAME}" + exit 1 +fi + +# Finally kill running processes +pkill minio +docker rm -f $(docker ps -aq) diff --git a/docs/distributed/samples/bootstrap-complete.ldif b/docs/distributed/samples/bootstrap-complete.ldif new file mode 100644 index 0000000000000..6f4f457109979 --- /dev/null +++ b/docs/distributed/samples/bootstrap-complete.ldif @@ -0,0 +1,123 @@ +# Create hardware engg org unit +dn: ou=hwengg,dc=min,dc=io +objectClass: organizationalUnit +ou: hwengg + +# Create people sub-org +dn: ou=people,ou=hwengg,dc=min,dc=io +objectClass: organizationalUnit +ou: people + +# Create Alice, Bob and Cody in hwengg +dn: uid=alice1,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Alice Smith +sn: Smith +uid: alice1 +mail: alice@example.io +userPassword: {SSHA}Yeh2/IV/q/HjG2yzN3YdE9CAF3EJFCLu + +dn: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Robert Fisher +sn: Fisher +uid: bobfisher +mail: bob@example.io +userPassword: {SSHA}LktfbhK5oXSdDWCNzauJ9JA+Poxinl3y + +dn: uid=cody3,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Cody Thomas +sn: Thomas +uid: cody3 +mail: cody@example.io +userPassword: {SSHA}H8B0gaOd4bRklK3fXj9ltHvJXWQFXW5Q + +# Create groups ou for hwengg +dn: ou=groups,ou=hwengg,dc=min,dc=io +objectclass: organizationalUnit +ou: groups +description: groups branch + +# Create project groups + +dn: cn=projectx,ou=groups,ou=hwengg,dc=min,dc=io +objectclass: groupofnames +cn: projectx +description: Project X group members +member: uid=alice1,ou=people,ou=hwengg,dc=min,dc=io +member: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io + +dn: cn=projecty,ou=groups,ou=hwengg,dc=min,dc=io +objectclass: groupofnames +cn: projecty +description: Project Y group members +member: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io +member: uid=cody3,ou=people,ou=hwengg,dc=min,dc=io + +# Create software engg org unit +dn: ou=swengg,dc=min,dc=io +objectClass: organizationalUnit +ou: swengg + +# Create people sub-org +dn: ou=people,ou=swengg,dc=min,dc=io +objectClass: organizationalUnit +ou: people + +# Create Dillon, Elizabeth and Fahim in swengg +dn: uid=dillon,ou=people,ou=swengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Dillon Harper +sn: Harper +uid: dillon +mail: dillon@example.io +userPassword: {SSHA}UH+LmoEhWWW6s9rjgdpqHPI0qCMouY8+ + +dn: uid=liza,ou=people,ou=swengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Elizabeth Jones +sn: Jones +uid: liza +mail: ejones@example.io +userPassword: {SSHA}feVkKkafHtsu2Io7n0tQP4Cnh8/Oy1PK + +dn: uid=fahim,ou=people,ou=swengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Fahim Ahmed +sn: Ahmed +uid: fahim +mail: fahmed@example.io +userPassword: {SSHA}lRNH+PHooRaruiEb+CBEA21EZLMkAmcc + +# Add a user with special chars. The password = example here. +dn: uid=Пользователь,OU=people,OU=swengg,DC=min,DC=io +objectClass: inetOrgPerson +cn: Special Charsman +sn: Charsman +uid: Пользователь +mail: scharsman@example.io +userPassword: {SSHA}XQSZqLPvYgm30wR7pk67a1GW+q+DDvSj + +# Creates groups ou for swengg +dn: ou=groups,ou=swengg,dc=min,dc=io +objectclass: organizationalUnit +ou: groups +description: groups branch + +# Create project groups + +dn: cn=projecta,ou=groups,ou=swengg,dc=min,dc=io +objectclass: groupofnames +cn: projecta +description: Project A group members +member: uid=dillon,ou=people,ou=swengg,dc=min,dc=io + +dn: cn=projectb,ou=groups,ou=swengg,dc=min,dc=io +objectclass: groupofnames +cn: projectb +description: Project B group members +member: uid=dillon,ou=people,ou=swengg,dc=min,dc=io +member: uid=liza,ou=people,ou=swengg,dc=min,dc=io +member: uid=fahim,ou=people,ou=swengg,dc=min,dc=io +member: uid=Пользователь,OU=people,OU=swengg,DC=min,DC=io diff --git a/docs/distributed/samples/bootstrap-partial.ldif b/docs/distributed/samples/bootstrap-partial.ldif new file mode 100644 index 0000000000000..02cbb83213fa6 --- /dev/null +++ b/docs/distributed/samples/bootstrap-partial.ldif @@ -0,0 +1,56 @@ +# Create hardware engg org unit +dn: ou=hwengg,dc=min,dc=io +objectClass: organizationalUnit +ou: hwengg + +# Create people sub-org +dn: ou=people,ou=hwengg,dc=min,dc=io +objectClass: organizationalUnit +ou: people + +# Create Alice, Bob and Cody in hwengg +dn: uid=alice1,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Alice Smith +sn: Smith +uid: alice1 +mail: alice@example.io +userPassword: {SSHA}Yeh2/IV/q/HjG2yzN3YdE9CAF3EJFCLu + +dn: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Robert Fisher +sn: Fisher +uid: bobfisher +mail: bob@example.io +userPassword: {SSHA}LktfbhK5oXSdDWCNzauJ9JA+Poxinl3y + +dn: uid=cody3,ou=people,ou=hwengg,dc=min,dc=io +objectClass: inetOrgPerson +cn: Cody Thomas +sn: Thomas +uid: cody3 +mail: cody@example.io +userPassword: {SSHA}H8B0gaOd4bRklK3fXj9ltHvJXWQFXW5Q + +# Create groups ou for hwengg +dn: ou=groups,ou=hwengg,dc=min,dc=io +objectclass: organizationalUnit +ou: groups +description: groups branch + +# Create project groups + +dn: cn=projectx,ou=groups,ou=hwengg,dc=min,dc=io +objectclass: groupofnames +cn: projectx +description: Project X group members +member: uid=alice1,ou=people,ou=hwengg,dc=min,dc=io +member: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io + +dn: cn=projecty,ou=groups,ou=hwengg,dc=min,dc=io +objectclass: groupofnames +cn: projecty +description: Project Y group members +member: uid=bobfisher,ou=people,ou=hwengg,dc=min,dc=io +member: uid=cody3,ou=people,ou=hwengg,dc=min,dc=io diff --git a/docs/distributed/samples/myminio-iam-info-openid.zip b/docs/distributed/samples/myminio-iam-info-openid.zip new file mode 100644 index 0000000000000..aec4ca70e63c6 Binary files /dev/null and b/docs/distributed/samples/myminio-iam-info-openid.zip differ diff --git a/docs/distributed/samples/myminio-iam-info.zip b/docs/distributed/samples/myminio-iam-info.zip new file mode 100644 index 0000000000000..cd1d7ec1de38b Binary files /dev/null and b/docs/distributed/samples/myminio-iam-info.zip differ diff --git a/docs/docker/README.md b/docs/docker/README.md index 0934675793d52..ccc9031f5fafd 100644 --- a/docs/docker/README.md +++ b/docs/docker/README.md @@ -8,7 +8,7 @@ Docker installed on your machine. Download the relevant installer from [here](ht ## Run Standalone MinIO on Docker -*Note*: Standalone MinIO is intended for early development and evaluation. For production clusters, deploy a [Distributed](https://min.io/docs/minio/container/operations/install-deploy-manage/deploy-minio-single-node-multi-drive.html) MinIO deployment. +*Note*: Standalone MinIO is intended for early development and evaluation. For production clusters, deploy a [Distributed](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html) MinIO deployment. MinIO needs a persistent volume to store configuration and application data. For testing purposes, you can launch MinIO by simply passing a directory (`/data` in the example below). This directory gets created in the container filesystem at the time of container start. But all the data is lost after container exits. @@ -57,7 +57,7 @@ docker run \ We recommend kubernetes based deployment for production level deployment . -See the [Kubernetes documentation](https://min.io/docs/minio/kubernetes/upstream/index.html) for more information. +See the [Kubernetes documentation](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) for more information. ## MinIO Docker Tips @@ -211,5 +211,5 @@ docker stats ## Explore Further -* [Distributed MinIO Quickstart Guide](https://min.io/docs/minio/container/operations/install-deploy-manage/deploy-minio-single-node-multi-drive.html) -* [MinIO Erasure Code QuickStart Guide](https://min.io/docs/minio/container/operations/concepts/erasure-coding.html) +* [MinIO in a Container Installation Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html) +* [MinIO Erasure Code QuickStart Guide](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) diff --git a/docs/erasure/README.md b/docs/erasure/README.md index cbee49ac1e0a8..48b3cb6bbfe1b 100644 --- a/docs/erasure/README.md +++ b/docs/erasure/README.md @@ -26,7 +26,7 @@ MinIO's erasure coded backend uses high speed [HighwayHash](https://github.com/m MinIO divides the drives you provide into erasure-coding sets of *2 to 16* drives. Therefore, the number of drives you present must be a multiple of one of these numbers. Each object is written to a single erasure-coding set. -Minio uses the largest possible EC set size which divides into the number of drives given. For example, *18 drives* are configured as *2 sets of 9 drives*, and *24 drives* are configured as *2 sets of 12 drives*. This is true for scenarios when running MinIO as a standalone erasure coded deployment. In [distributed setup however node (affinity) based](https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html) erasure stripe sizes are chosen. +Minio uses the largest possible EC set size which divides into the number of drives given. For example, *18 drives* are configured as *2 sets of 9 drives*, and *24 drives* are configured as *2 sets of 12 drives*. This is true for scenarios when running MinIO as a standalone erasure coded deployment. In [distributed setup however node (affinity) based](https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html) erasure stripe sizes are chosen. The drives should all be of approximately the same size. @@ -34,7 +34,7 @@ The drives should all be of approximately the same size. ### 1. Prerequisites -Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) +Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) ### 2. Run MinIO Server with Erasure Code diff --git a/docs/erasure/storage-class/README.md b/docs/erasure/storage-class/README.md index 3842d4a01749f..15190a65aaeb6 100644 --- a/docs/erasure/storage-class/README.md +++ b/docs/erasure/storage-class/README.md @@ -2,7 +2,7 @@ MinIO server supports storage class in erasure coding mode. This allows configurable data and parity drives per object. -This page is intended as a summary of MinIO Erasure Coding. For a more complete explanation, see . +This page is intended as a summary of MinIO Erasure Coding. For a more complete explanation, see . ## Overview @@ -53,7 +53,7 @@ The default value for the `STANDARD` storage class depends on the number of volu | 6-7 | EC:3 | | 8 or more | EC:4 | -For more complete documentation on Erasure Set sizing, see the [MinIO Documentation on Erasure Sets](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html#erasure-sets). +For more complete documentation on Erasure Set sizing, see the [MinIO Documentation on Erasure Sets](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html#erasure-sets). ### Allowed values for REDUCED_REDUNDANCY storage class diff --git a/docs/extensions/s3zip/README.md b/docs/extensions/s3zip/README.md index 88eb765e1f932..7db6bdec95f6d 100644 --- a/docs/extensions/s3zip/README.md +++ b/docs/extensions/s3zip/README.md @@ -39,3 +39,7 @@ All properties except the file size are tied to the zip file. This means that mo - `ListObjectsV2` - If the ZIP file directory isn't located within the last 100MB the file will not be parsed. - A maximum of 100M inside a single zip is allowed. However, a reasonable limit of 100,000 files inside a single ZIP archive is recommended for best performance and memory usage trade-off. + +## Content-Type + +The Content-Type of the response will be determined by the extension and the following: https://pkg.go.dev/mime#TypeByExtension \ No newline at end of file diff --git a/docs/federation/lookup/README.md b/docs/federation/lookup/README.md index 5ab514fab3fb1..b2cebd63b5302 100644 --- a/docs/federation/lookup/README.md +++ b/docs/federation/lookup/README.md @@ -6,7 +6,7 @@ This document explains how to configure MinIO with `Bucket lookup from DNS` styl ### 1. Prerequisites -Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). +Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html). ### 2. Run MinIO in federated mode @@ -76,11 +76,11 @@ it is randomized which cluster might provision the bucket. ### 3. Test your setup -To test this setup, access the MinIO server via browser or [`mc`](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart). You’ll see the uploaded files are accessible from the all the MinIO endpoints. +To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart). You’ll see the uploaded files are accessible from the all the MinIO endpoints. ## Explore Further -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/ftp/README.md b/docs/ftp/README.md index de7b07eec16ee..1fb6e5795fbc9 100644 --- a/docs/ftp/README.md +++ b/docs/ftp/README.md @@ -167,3 +167,91 @@ Unlike SFTP server, FTP server is insecure by default. To operate under TLS mode > certs from the server certificate chain, this is mainly to add simplicity of setup. However if you wish to terminate > TLS certificates via a different domain for your FTP servers you may choose the above command line options. + +### Custom Algorithms (SFTP) + +Custom algorithms can be specified via command line parameters. +Algorithms are comma separated. +Note that valid values does not in all cases represent default values. + +`--sftp=pub-key-algos=...` specifies the supported client public key +authentication algorithms. Note that this doesn't include certificate types +since those use the underlying algorithm. This list is sent to the client if +it supports the server-sig-algs extension. Order is irrelevant. + +Valid values +``` +ssh-ed25519 +sk-ssh-ed25519@openssh.com +sk-ecdsa-sha2-nistp256@openssh.com +ecdsa-sha2-nistp256 +ecdsa-sha2-nistp384 +ecdsa-sha2-nistp521 +rsa-sha2-256 +rsa-sha2-512 +ssh-rsa +ssh-dss +``` + +`--sftp=kex-algos=...` specifies the supported key-exchange algorithms in preference order. + +Valid values: + +``` +curve25519-sha256 +curve25519-sha256@libssh.org +ecdh-sha2-nistp256 +ecdh-sha2-nistp384 +ecdh-sha2-nistp521 +diffie-hellman-group14-sha256 +diffie-hellman-group16-sha512 +diffie-hellman-group14-sha1 +diffie-hellman-group1-sha1 +``` + +`--sftp=cipher-algos=...` specifies the allowed cipher algorithms. +If unspecified then a sensible default is used. + +Valid values: +``` +aes128-ctr +aes192-ctr +aes256-ctr +aes128-gcm@openssh.com +aes256-gcm@openssh.com +chacha20-poly1305@openssh.com +arcfour256 +arcfour128 +arcfour +aes128-cbc +3des-cbc +``` + +`--sftp=mac-algos=...` specifies a default set of MAC algorithms in preference order. +This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed because they have +reached the end of their useful life. + +Valid values: + +``` +hmac-sha2-256-etm@openssh.com +hmac-sha2-512-etm@openssh.com +hmac-sha2-256 +hmac-sha2-512 +hmac-sha1 +hmac-sha1-96 +``` + +### Certificate-based authentication + +`--sftp=trusted-user-ca-key=...` specifies a file containing public key of certificate authority that is trusted +to sign user certificates for authentication. + +Implementation is identical with "TrustedUserCAKeys" setting in OpenSSH server with exception that only one CA +key can be defined. + +If a certificate is presented for authentication and has its signing CA key is in this file, then it may be +used for authentication for any user listed in the certificate's principals list. + +Note that certificates that lack a list of principals will not be permitted for authentication using trusted-user-ca-key. +For more details on certificates, see the CERTIFICATES section in ssh-keygen(1). diff --git a/docs/hotfixes.md b/docs/hotfixes.md deleted file mode 100644 index 96f4508fb58ec..0000000000000 --- a/docs/hotfixes.md +++ /dev/null @@ -1,137 +0,0 @@ -# Introduction - -This document outlines how to make hotfix binaries and containers for MinIO?. The main focus in this article is about how to backport patches to a specific branch and finally building binaries/containers. - -## Pre-pre requisite - -- A working knowledge of MinIO codebase and its various components. -- A working knowledge of AWS S3 API behaviors and corner cases. - -## Pre-requisite for backporting any fixes - -Fixes that are allowed a backport must satisfy any of the following criteria's: - -- A fix must not be a feature, for example. - -``` -commit faf013ec84051b92ae0f420a658b8d35bb7bb000 -Author: Klaus Post -Date: Thu Nov 18 12:15:22 2021 -0800 - - Improve performance on multiple versions (#13573) -``` - -- A fix must be a valid fix that was reproduced and seen in a customer environment, for example. - -``` -commit 886262e58af77ebc7c836ef587c08544e9a0c271 -Author: Harshavardhana -Date: Wed Nov 17 15:49:12 2021 -0800 - - heal legacy objects when versioning is enabled after upgrade (#13671) -``` - -- A security fix must be backported if a customer is affected by it, we have a mechanism in SUBNET to send out notifications to affected customers in such situations, this is a mandatory requirement. - -``` -commit 99bf4d0c429f04dbd013ba98840d07b759ae1702 (tag: RELEASE.2019-06-15T23-07-18Z) -Author: Harshavardhana -Date: Sat Jun 15 11:27:17 2019 -0700 - - [security] Match ${aws:username} exactly instead of prefix match (#7791) - - This PR fixes a security issue where an IAM user based - on his policy is granted more privileges than restricted - by the users IAM policy. - - This is due to an issue of prefix based Matcher() function - which was incorrectly matching prefix based on resource - prefixes instead of exact match. -``` - -- There is always a possibility of a fix that is new, it is advised that the developer must make sure that the fix is sent upstream, reviewed and merged to the master branch. - -## Creating a hotfix branch - -Customers in MinIO are allowed LTS on any release they choose to standardize. Production setups seldom change and require maintenance. Hotfix branches are such maintenance branches that allow customers to operate a production cluster without drastic changes to their deployment. - -## Backporting a fix - -Developer is advised to clone the MinIO source and checkout the MinIO release tag customer is currently on. - -``` -λ git checkout RELEASE.2021-04-22T15-44-28Z -``` - -Create a branch and proceed to push the branch **upstream** -> (upstream here points to git@github.com:minio/minio.git) - -``` -λ git branch -m RELEASE.2021-04-22T15-44-28Z.hotfix -λ git push -u upstream RELEASE.2021-04-22T15-44-28Z.hotfix -``` - -Pick the relevant commit-id say for example commit-id from the master branch - -``` -commit 4f3317effea38c203c358af9cb5ce3c0e4173976 -Author: Klaus Post -Date: Mon Nov 8 08:41:27 2021 -0800 - - Close stream on panic (#13605) - - Always close streamHTTPResponse on panic on main thread to avoid - write/flush after response handler has returned. -``` - -``` -λ git cherry-pick 4f3317effea38c203c358af9cb5ce3c0e4173976 -``` - -*A self contained **patch** usually applies fine on the hotfix branch during backports as long it is self contained. There are situations however this may lead to conflicts and the patch will not cleanly apply. Conflicts might be trivial which can be resolved easily, when conflicts seem to be non-trivial or touches the part of the code-base the developer is not confident - to get additional clarity reach out to #hack on MinIOHQ slack channel. Hasty changes must be avoided, minor fixes and logs may be added to hotfix branches but this should not be followed as practice.* - -Once the **patch** is successfully applied, developer must run tests to validate the fix that was backported by running following tests, locally. - -Unit tests - -``` -λ make test -``` - -Verify different type of MinIO deployments work - -``` -λ make verify -``` - -Verify if healing and replacing a drive works - -``` -λ make verify-healing -``` - -At this point in time the backport is ready to be submitted as a pull request to the relevant branch. A pull request is recommended to ensure [mint](http://github.com/minio/mint) tests are validated. Pull request also ensures code-reviews for the backports in case of any unforeseen regressions. - -### Building a hotfix binary and container - -To add a hotfix tag to the binary version and embed the relevant `commit-id` following build helpers are available - -#### Builds the hotfix binary and uploads to https;//dl.min.io - -``` -λ CRED_DIR=/media/builder/minio make hotfix-push -``` - -#### Builds the hotfix container and pushes to docker.io/minio/minio - -``` -λ CRED_DIR=/media/builder/minio make docker-hotfix-push -``` - -#### Builds the hotfix container and pushes to registry.min.dev//minio - -``` -λ REPO="registry.min.dev/" CRED_DIR=/media/builder/minio make docker-hotfix-push -``` - -Once this has been provided to the customer relevant binary will be uploaded from our *release server* securely, directly to diff --git a/docs/iam/identity-manager-plugin.go b/docs/iam/identity-manager-plugin.go index cd8d33bc8aa9e..05d472617393c 100644 --- a/docs/iam/identity-manager-plugin.go +++ b/docs/iam/identity-manager-plugin.go @@ -81,6 +81,6 @@ func mainHandler(w http.ResponseWriter, r *http.Request) { func main() { http.HandleFunc("/", mainHandler) - log.Print("Listing on :8081") + log.Print("Listening on :8081") log.Fatal(http.ListenAndServe(":8081", nil)) } diff --git a/docs/iam/policies/deny-non-sse-kms-objects.json b/docs/iam/policies/deny-non-sse-kms-objects.json new file mode 100644 index 0000000000000..003d03a786e58 --- /dev/null +++ b/docs/iam/policies/deny-non-sse-kms-objects.json @@ -0,0 +1,17 @@ +{ + "Version":"2012-10-17", + "Id":"PutObjectPolicy", + "Statement":[{ + "Sid":"DenyObjectsThatAreNotSSEKMS", + "Effect":"Deny", + "Principal":"*", + "Action":"s3:PutObject", + "Resource":"arn:aws:s3:::multi-key-poc/*", + "Condition":{ + "Null":{ + "s3:x-amz-server-side-encryption-aws-kms-key-id":"true" + } + } + } + ] +} diff --git a/docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json b/docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json new file mode 100644 index 0000000000000..e872c78d893ee --- /dev/null +++ b/docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json @@ -0,0 +1,17 @@ +{ + "Version":"2012-10-17", + "Id":"PutObjectPolicy1", + "Statement":[{ + "Sid":"DenyObjectsWithInvalidSSEKMS", + "Effect":"Deny", + "Principal":"*", + "Action":"s3:PutObject", + "Resource":"arn:aws:s3:::multi-key-poc/*", + "Condition":{ + "StringNotEquals":{ + "s3:x-amz-server-side-encryption-aws-kms-key-id":"minio-default-key" + } + } + } + ] +} diff --git a/docs/iam/policies/pbac-tests.sh b/docs/iam/policies/pbac-tests.sh new file mode 100755 index 0000000000000..607abc3ebbfc0 --- /dev/null +++ b/docs/iam/policies/pbac-tests.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +if [ -n "$TEST_DEBUG" ]; then + set -x +fi + +pkill minio +pkill kes +rm -rf /tmp/xl + +go install -v github.com/minio/mc@master +cp -a $(go env GOPATH)/bin/mc ./mc + +if [ ! -f ./kes ]; then + wget --quiet -O kes https://github.com/minio/kes/releases/latest/download/kes-linux-amd64 && + chmod +x kes +fi + +if ! openssl version &>/dev/null; then + apt install openssl || sudo apt install opensssl +fi + +# Start KES Server +(./kes server --dev 2>&1 >kes-server.log) & +kes_pid=$! +sleep 5s +API_KEY=$(grep "API Key" /dev/null 1>public.crt) + +export CI=true +export MINIO_KMS_KES_ENDPOINT=https://127.0.0.1:7373 +export MINIO_KMS_KES_API_KEY="${API_KEY}" +export MINIO_KMS_KES_KEY_NAME=minio-default-key +export MINIO_KMS_KES_CAPATH=public.crt +export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/" + +(minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) & +pid=$! + +mc ready myminio + +mc admin user add myminio/ minio123 minio123 + +mc admin policy create myminio/ deny-non-sse-kms-pol ./docs/iam/policies/deny-non-sse-kms-objects.json +mc admin policy create myminio/ deny-invalid-sse-kms-pol ./docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json + +mc admin policy attach myminio deny-non-sse-kms-pol --user minio123 +mc admin policy attach myminio deny-invalid-sse-kms-pol --user minio123 +mc admin policy attach myminio consoleAdmin --user minio123 + +mc mb -l myminio/test-bucket +mc mb -l myminio/multi-key-poc + +export MC_HOST_myminio1="http://minio123:minio123@localhost:9000/" + +mc cp /etc/issue myminio1/test-bucket +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: PutObject to bucket: test-bucket should succeed. Failed" + exit 1 +fi + +mc cp /etc/issue myminio1/multi-key-poc | grep -q "Insufficient permissions to access this path" +ret=$? +if [ $ret -eq 0 ]; then + echo "BUG: PutObject to bucket: multi-key-poc without sse-kms should fail. Succedded" + exit 1 +fi + +mc cp /etc/hosts myminio1/multi-key-poc/hosts --enc-kms "myminio1/multi-key-poc/hosts=minio-default-key" +ret=$? +if [ $ret -ne 0 ]; then + echo "BUG: PutObject to bucket: multi-key-poc with valid sse-kms should succeed. Failed" + exit 1 +fi + +mc cp /etc/issue myminio1/multi-key-poc/issue --enc-kms "myminio1/multi-key-poc/issue=minio-default-key-xxx" | grep "Insufficient permissions to access this path" +ret=$? +if [ $ret -eq 0 ]; then + echo "BUG: PutObject to bucket: multi-key-poc with invalid sse-kms should fail. Succeeded" + exit 1 +fi + +kill $pid +kill $kes_pid diff --git a/docs/integrations/veeam/README.md b/docs/integrations/veeam/README.md index 830130e86400d..c8c7556d1ed9f 100644 --- a/docs/integrations/veeam/README.md +++ b/docs/integrations/veeam/README.md @@ -5,10 +5,10 @@ When using Veeam Backup and Replication, you can use S3 compatible object storag ## Prerequisites - One or both of Veeam Backup and Replication with support for S3 compatible object store (e.g. 9.5.4) and Veeam Backup for Office365 (VBO) -- MinIO object storage set up per -- Veeam requires TLS connections to the object storage. This can be configured per +- MinIO object storage set up per +- Veeam requires TLS connections to the object storage. This can be configured per - The S3 bucket, Access Key and Secret Key have to be created before and outside of Veeam. -- Configure the minio client for the Veeam MinIO endpoint - +- Configure the minio client for the Veeam MinIO endpoint - ## Setting up an S3 compatible object store for Veeam Backup and Replication @@ -26,7 +26,7 @@ mc mb myminio/veeambackup mc mb -l myminio/veeambackup ``` -> Object locking requires erasure coding enabled on the minio server. For more information see . +> Object locking requires erasure coding enabled on the minio server. For more information see . ### Add MinIO as an object store for Veeam diff --git a/docs/kms/README.md b/docs/kms/README.md index bdd124962e902..86d9d8c0556d6 100644 --- a/docs/kms/README.md +++ b/docs/kms/README.md @@ -1,10 +1,10 @@ # KMS Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) -MinIO uses a key-management-system (KMS) to support SSE-S3. If a client requests SSE-S3, or auto-encryption is enabled, the MinIO server encrypts each object with an unique object key which is protected by a master key managed by the KMS. +MinIO uses a key-management-system (KMS) to support SSE-S3. If a client requests SSE-S3, or auto-encryption is enabled, the MinIO server encrypts each object with a unique object key which is protected by a master key managed by the KMS. ## Quick Start -MinIO supports multiple KMS implementations via our [KES](https://github.com/minio/kes#kes) project. We run a KES instance at `https://play.min.io:7373` for you to experiment and quickly get started. To run MinIO with a KMS just fetch the root identity, set the following environment variables and then start your MinIO server. If you haven't installed MinIO, yet, then follow the MinIO [install instructions](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) first. +MinIO supports multiple KMS implementations via our [KES](https://github.com/minio/kes#kes) project. We run a KES instance at `https://play.min.io:7373` for you to experiment and quickly get started. To run MinIO with a KMS just fetch the root identity, set the following environment variables and then start your MinIO server. If you haven't installed MinIO, yet, then follow the MinIO [install instructions](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) first. ### 1. Fetch the root identity @@ -67,9 +67,9 @@ The MinIO-KES configuration is always the same - regardless of the underlying KM ### Further references -- [Run MinIO with TLS / HTTPS](https://min.io/docs/minio/linux/operations/network-encryption.html) +- [Run MinIO with TLS / HTTPS](https://docs.min.io/community/minio-object-store/operations/network-encryption.html) - [Tweak the KES server configuration](https://github.com/minio/kes/wiki/Configuration) -- [Run a load balancer infront of KES](https://github.com/minio/kes/wiki/TLS-Proxy) +- [Run a load balancer in front of KES](https://github.com/minio/kes/wiki/TLS-Proxy) - [Understand the KES server concepts](https://github.com/minio/kes/wiki/Concepts) ## Auto Encryption @@ -137,7 +137,7 @@ Certificates are no secrets and sent in plaintext as part of the TLS handshake. ## Explore Further -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/lambda/README.md b/docs/lambda/README.md index 4c9187cba7143..7e571e850a479 100644 --- a/docs/lambda/README.md +++ b/docs/lambda/README.md @@ -4,7 +4,7 @@ MinIO's Object Lambda implementation allows for transforming your data to serve MinIO's Object Lambda, enables application developers to process data retrieved from MinIO before returning it to an application. You can register a Lambda Function target on MinIO, once successfully registered it can be used to transform the data for application GET requests on demand. -This document focuses on showing a working example on how to use Object Lambda with MinIO, you must have [MinIO deployed in your environment](https://min.io/docs/minio/linux/operations/installation.html) before you can start using external lambda functions. You also must install Python version 3.8 or later for the lambda handlers to work. +This document focuses on showing a working example on how to use Object Lambda with MinIO, you must have [MinIO deployed in your environment](https://docs.min.io/community/minio-object-store/operations/installation.html) before you can start using external lambda functions. You also must install Python version 3.8 or later for the lambda handlers to work. ## Example Lambda handler @@ -68,7 +68,7 @@ The field of `getObjectContext` means the input and output details for connectio - `outputToken` – A token added to the response headers when the Lambda function returns the transformed object. This is used by MinIO to verify the incoming response validity. -Lets start the lamdba handler. +Lets start the lambda handler. ``` python lambda_handler.py @@ -134,7 +134,7 @@ mc cp testobject myminio/functionbucket/ ## Invoke Lambda transformation via PresignedGET -Following example shows how you can use [`minio-go` PresignedGetObject](https://min.io/docs/minio/linux/developers/go/API.html#presignedgetobject-ctx-context-context-bucketname-objectname-string-expiry-time-duration-reqparams-url-values-url-url-error) +Following example shows how you can use [`minio-go` PresignedGetObject](https://docs.min.io/community/minio-object-store/developers/go/API.html#presignedgetobject-ctx-context-context-bucketname-objectname-string-expiry-time-duration-reqparams-url-values-url-url-error) ```go package main diff --git a/docs/logging/README.md b/docs/logging/README.md index 32d6d02c649d4..7abc62a1e14ce 100644 --- a/docs/logging/README.md +++ b/docs/logging/README.md @@ -17,7 +17,7 @@ Console target is on always and cannot be disabled. HTTP target logs to a generic HTTP endpoint in JSON format and is not enabled by default. To enable HTTP target logging you would have to update your MinIO server configuration using `mc admin config set` command. -Assuming `mc` is already [configured](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +Assuming `mc` is already [configured](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) ``` mc admin config get myminio/ logger_webhook @@ -42,7 +42,7 @@ minio server /mnt/data ## Audit Targets -Assuming `mc` is already [configured](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) +Assuming `mc` is already [configured](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) ### Audit HTTP Target @@ -74,7 +74,7 @@ Setting this environment variable automatically enables audit logging to the HTT NOTE: - `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds. -- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about +- Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about - Pool number the object operation was performed on. - Set number the object operation was performed on. - The list of drives participating in this operation belong to the set. @@ -82,8 +82,9 @@ NOTE: ```json { "version": "1", - "deploymentid": "51bcc7b9-a447-4251-a940-d9d0aab9af69", - "time": "2021-10-08T00:46:36.801714978Z", + "deploymentid": "90e81272-45d9-4fe8-9c45-c9a7322bf4b5", + "time": "2024-05-09T07:38:10.449688982Z", + "event": "", "trigger": "incoming", "api": { "name": "PutObject", @@ -91,51 +92,52 @@ NOTE: "object": "hosts", "status": "OK", "statusCode": 200, - "rx": 380, - "tx": 476, - "timeToResponse": "257694819ns" + "rx": 401, + "tx": 0, + "timeToResponse": "13309747ns", + "timeToResponseInNS": "13309747" }, "remotehost": "127.0.0.1", - "requestID": "16ABE7A785E7AC2C", - "userAgent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z", + "requestID": "17CDC1F4D7E69123", + "userAgent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z", + "requestPath": "/testbucket/hosts", + "requestHost": "localhost:9000", "requestHeader": { - "Authorization": "AWS4-HMAC-SHA256 Credential=minio/20211008/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=4c60a59e5eb3b0a68693c7fee9dbb5a8a509e0717668669194d37bf182fde031", - "Content-Length": "380", + "Accept-Encoding": "zstd,gzip", + "Authorization": "AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95", + "Content-Length": "401", "Content-Type": "application/octet-stream", - "User-Agent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z", + "User-Agent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z", "X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-PAYLOAD", - "X-Amz-Date": "20211008T004636Z", - "X-Amz-Decoded-Content-Length": "207", - "X-Amz-Server-Side-Encryption": "aws:kms" + "X-Amz-Date": "20240509T073810Z", + "X-Amz-Decoded-Content-Length": "228" }, "responseHeader": { "Accept-Ranges": "bytes", "Content-Length": "0", - "ETag": "4939450d1beec11e10a91ee7700bb593", + "ETag": "9fe7a344ef4227d3e53751e9d88ce41e", "Server": "MinIO", "Strict-Transport-Security": "max-age=31536000; includeSubDomains", "Vary": "Origin,Accept-Encoding", - "X-Amz-Request-Id": "16ABE7A785E7AC2C", - "X-Amz-Server-Side-Encryption": "aws:kms", - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": "arn:aws:kms:my-minio-key", + "X-Amz-Id-2": "dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8", + "X-Amz-Request-Id": "17CDC1F4D7E69123", "X-Content-Type-Options": "nosniff", - "X-Xss-Protection": "1; mode=block", - "x-amz-version-id": "ac4639f6-c544-4f3f-af1e-b4c0736f67f9" + "X-Xss-Protection": "1; mode=block" }, "tags": { - "objectErasureMap": { - "hosts": { - "poolId": 1, - "setId": 1, - "drives": [ - "/mnt/data1", - "/mnt/data2", - "/mnt/data3", - "/mnt/data4" - ] - } + "objectLocation": { + "name": "hosts", + "poolId": 1, + "setId": 1, + "drives": [ + "/mnt/data1", + "/mnt/data2", + "/mnt/data3", + "/mnt/data4" + ] } - } + }, + "accessKey": "minioadmin" } ``` @@ -176,7 +178,7 @@ On another terminal assuming you have `kafkacat` installed ``` kafkacat -b localhost:29092 -t auditlog -C -{"version":"1","deploymentid":"8a1d8091-b874-45df-b9ea-e044eede6ace","time":"2021-07-13T02:00:47.020547414Z","trigger":"incoming","api":{"name":"ListBuckets","status":"OK","statusCode":200,"timeToFirstByte":"261795ns","timeToResponse":"312490ns"},"remotehost":"127.0.0.1","requestID":"16913736591C237F","userAgent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","requestHeader":{"Authorization":"AWS4-HMAC-SHA256 Credential=minio/20210713/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=7fe65c5467e05ca21de64094688da43f96f34fec82e8955612827079f4600527","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","X-Amz-Content-Sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","X-Amz-Date":"20210713T020047Z"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"547","Content-Security-Policy":"block-all-mixed-content","Content-Type":"application/xml","Server":"MinIO","Vary":"Origin,Accept-Encoding","X-Amz-Request-Id":"16913736591C237F","X-Xss-Protection":"1; mode=block"}} +{"version":"1","deploymentid":"90e81272-45d9-4fe8-9c45-c9a7322bf4b5","time":"2024-05-09T07:38:10.449688982Z","event":"","trigger":"incoming","api":{"name":"PutObject","bucket":"testbucket","object":"hosts","status":"OK","statusCode":200,"rx":401,"tx":0,"timeToResponse":"13309747ns","timeToResponseInNS":"13309747"},"remotehost":"127.0.0.1","requestID":"17CDC1F4D7E69123","userAgent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","requestPath":"/testbucket/hosts","requestHost":"localhost:9000","requestHeader":{"Accept-Encoding":"zstd,gzip","Authorization":"AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95","Content-Length":"401","Content-Type":"application/octet-stream","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","X-Amz-Content-Sha256":"STREAMING-AWS4-HMAC-SHA256-PAYLOAD","X-Amz-Date":"20240509T073810Z","X-Amz-Decoded-Content-Length":"228"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"0","ETag":"9fe7a344ef4227d3e53751e9d88ce41e","Server":"MinIO","Strict-Transport-Security":"max-age=31536000; includeSubDomains","Vary":"Origin,Accept-Encoding","X-Amz-Id-2":"dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8","X-Amz-Request-Id":"17CDC1F4D7E69123","X-Content-Type-Options":"nosniff","X-Xss-Protection":"1; mode=block"},"tags":{"objectLocation":{"name":"hosts","poolId":1,"setId":1,"drives":["/mnt/data1","/mnt/data2","/mnt/data3","/mnt/data4"]}},"accessKey":"minioadmin"} ``` MinIO also honors environment variable for Kafka target Audit logging as shown below, this setting will override the endpoint settings in the MinIO server config. @@ -215,12 +217,12 @@ Setting this environment variable automatically enables audit logging to the Kaf NOTE: - `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds. -- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about +- Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about - Pool number the object operation was performed on. - Set number the object operation was performed on. - The list of drives participating in this operation belong to the set. ## Explore Further -- [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) -- [Configure MinIO Server with TLS](https://min.io/docs/minio/linux/operations/network-encryption.html) +- [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) +- [Configure MinIO Server with TLS](https://docs.min.io/community/minio-object-store/operations/network-encryption.html) diff --git a/docs/metrics/prometheus/README.md b/docs/metrics/prometheus/README.md index f4b45d13addb0..3cfb94e41c6e6 100644 --- a/docs/metrics/prometheus/README.md +++ b/docs/metrics/prometheus/README.md @@ -9,7 +9,7 @@ This document explains how to setup Prometheus and configure it to scrape data f ## Prerequisites -To get started with MinIO, refer [MinIO QuickStart Document](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). +To get started with MinIO, refer [MinIO QuickStart Document](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html). Follow below steps to get started with MinIO monitoring using Prometheus. ### 1. Download Prometheus @@ -49,7 +49,7 @@ minio server ~/test > If MinIO is configured to expose metrics without authentication, you don't need to use `mc` to generate prometheus config. You can skip reading further and move to 3.2 section. -The Prometheus endpoint in MinIO requires authentication by default. Prometheus supports a bearer token approach to authenticate prometheus scrape requests, override the default Prometheus config with the one generated using mc. To generate a Prometheus config for an alias, use [mc](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) as follows `mc admin prometheus generate [METRIC-TYPE]`. The valid values for METRIC-TYPE are `cluster`, `node`, `bucket` and `resource` and if not mentioned, it defaults to `cluster`. +The Prometheus endpoint in MinIO requires authentication by default. Prometheus supports a bearer token approach to authenticate prometheus scrape requests, override the default Prometheus config with the one generated using mc. To generate a Prometheus config for an alias, use [mc](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) as follows `mc admin prometheus generate [METRIC-TYPE]`. The valid values for METRIC-TYPE are `cluster`, `node`, `bucket` and `resource` and if not mentioned, it defaults to `cluster`. The command will generate the `scrape_configs` section of the prometheus.yml as follows: @@ -128,6 +128,8 @@ scrape_configs: ##### Node (optional) Optionally you can also collect per node metrics. This needs to be done on a per server instance. +The scrape configurations should use all the servers under `targets` so that graphing systems like +grafana can visualize them for all the nodes ```yaml scrape_configs: @@ -135,7 +137,7 @@ scrape_configs: metrics_path: /minio/v2/metrics/node scheme: http static_configs: - - targets: ['localhost:9000'] + - targets: ['server1:9000','server2:9000','server3:9000','server4:9000'] ``` ##### Resource (optional) diff --git a/docs/metrics/prometheus/alerts.md b/docs/metrics/prometheus/alerts.md index 8ab9f623141fb..06794d36dd5c5 100644 --- a/docs/metrics/prometheus/alerts.md +++ b/docs/metrics/prometheus/alerts.md @@ -108,7 +108,7 @@ To verify the above sample alert follow below steps }, "commonAnnotations": { "description": "MinIO instance 127.0.0.1:9000 of job minio-job has lost quorum on pool 0 on set 0 for more than 5 minutes.", - "summary": "Instance 127.0.0.1:9000 has lot quorum on pool 0 on set 0" + "summary": "Instance 127.0.0.1:9000 has lost quorum on pool 0 on set 0" }, "externalURL": "http://fedora-minio:9093", "version": "4", diff --git a/docs/metrics/prometheus/grafana/README.md b/docs/metrics/prometheus/grafana/README.md index efb55e7567b56..0b307f2a69531 100644 --- a/docs/metrics/prometheus/grafana/README.md +++ b/docs/metrics/prometheus/grafana/README.md @@ -15,12 +15,20 @@ Refer to the dashboard [json file here](https://raw.githubusercontent.com/minio/ ![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/grafana-minio.png) -Replication metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/minio-replication.json) +Node level Replication metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/minio-replication-node.json) -![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/grafana-replication.png) +![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/grafana-replication-node.png) -Bucket metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/bubcket/minio-bucket.json) +Cluster level Replication metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/minio-replication-cluster.json) + +![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/replication/grafana-replication-cluster.png) + +Bucket metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/bucket/minio-bucket.json) ![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/bucket/grafana-bucket.png) +Node metrics can be viewed in the Grafana dashboard using [json file here](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/node/minio-node.json) + +![Grafana](https://raw.githubusercontent.com/minio/minio/master/docs/metrics/prometheus/grafana/node/grafana-node.png) + Note: All these dashboards are provided as an example and need basis they should be customized as well as new graphs should be added. diff --git a/docs/metrics/prometheus/grafana/bucket/grafana-bucket.png b/docs/metrics/prometheus/grafana/bucket/grafana-bucket.png index da218d2acdbe8..55c2570796b7a 100644 Binary files a/docs/metrics/prometheus/grafana/bucket/grafana-bucket.png and b/docs/metrics/prometheus/grafana/bucket/grafana-bucket.png differ diff --git a/docs/metrics/prometheus/grafana/bucket/minio-bucket.json b/docs/metrics/prometheus/grafana/bucket/minio-bucket.json index a5a0f97eb3884..5ce8579007c41 100644 --- a/docs/metrics/prometheus/grafana/bucket/minio-bucket.json +++ b/docs/metrics/prometheus/grafana/bucket/minio-bucket.json @@ -1,59 +1,4 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "panel", - "id": "gauge", - "name": "Gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.0.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph (old)", - "version": "" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { @@ -75,7 +20,7 @@ "fiscalYearStartMonth": 0, "gnetId": 15306, "graphTooltip": 0, - "id": null, + "id": 296, "links": [ { "icon": "external link", @@ -120,11 +65,12 @@ "y": 0 }, "id": 52, - "links": [], "options": { "displayMode": "basic", + "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -134,10 +80,11 @@ "values": false }, "showUnfilled": false, + "sizing": "auto", "text": {}, "valueMode": "color" }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -146,7 +93,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (bucket,range) (minio_bucket_objects_size_distribution{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket,range) (minio_bucket_objects_size_distribution{job=~\"$scrape_jobs\"})", "format": "time_series", "instant": false, "interval": "", @@ -190,11 +137,12 @@ "y": 0 }, "id": 53, - "links": [], "options": { "displayMode": "basic", + "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -204,10 +152,11 @@ "values": false }, "showUnfilled": false, + "sizing": "auto", "text": {}, "valueMode": "color" }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -216,7 +165,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (bucket,range) (minio_bucket_objects_version_distribution{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket,range) (minio_bucket_objects_version_distribution{job=~\"$scrape_jobs\"})", "format": "time_series", "instant": false, "interval": "", @@ -260,11 +209,12 @@ "y": 6 }, "id": 59, - "links": [], "options": { "displayMode": "basic", + "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -274,10 +224,11 @@ "values": false }, "showUnfilled": false, + "sizing": "auto", "text": {}, "valueMode": "color" }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -286,7 +237,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (bucket,le,api) (minio_bucket_requests_ttfb_seconds_distribution{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket,le,api) (minio_bucket_requests_ttfb_seconds_distribution{job=~\"$scrape_jobs\"})", "format": "time_series", "instant": false, "interval": "", @@ -300,51 +251,116 @@ "type": "bargauge" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 6 }, - "hiddenSeries": false, "id": 60, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -352,91 +368,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket,api) (increase(minio_bucket_requests_4xx_errors_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket,api) (increase(minio_bucket_requests_4xx_errors_total{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket,api}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "S3 API Request 4xx Error Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 6 }, - "hiddenSeries": false, "id": 61, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -444,91 +496,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket,api) (increase(minio_bucket_requests_inflight_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket,api) (minio_bucket_requests_inflight_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket,api}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], - "title": "Inflight Requests Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "title": "Inflight Requests", + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 12 }, - "hiddenSeries": false, "id": 62, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -536,91 +624,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket,api) (increase(minio_bucket_requests_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket,api) (increase(minio_bucket_requests_total{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket,api}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Requests Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 12 }, - "hiddenSeries": false, "id": 63, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -628,91 +752,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (rate(minio_bucket_traffic_sent_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (rate(minio_bucket_traffic_sent_bytes{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "Data Sent [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Data Sent Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 12 }, - "hiddenSeries": false, "id": 64, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -720,91 +880,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (rate(minio_bucket_usage_total_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (rate(minio_bucket_usage_total_bytes{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "Usage [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Usage Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 12 }, - "hiddenSeries": false, "id": 65, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -812,183 +1008,255 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (increase(minio_bucket_usage_object_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (minio_bucket_usage_object_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], - "title": "Objects Increase Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "title": "Objects", + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 66, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (increase(minio_bucket_usage_version_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (minio_bucket_usage_version_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], - "title": "Versions Increase Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "title": "Versions", + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 18 }, - "hiddenSeries": false, "id": 67, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -996,87 +1264,97 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (increase(minio_bucket_usage_deletemarker_total{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (minio_bucket_usage_deletemarker_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], - "title": "Delete Markers Increase Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "title": "Delete Markers", + "type": "timeseries" }, { - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "Prometheus" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 18 }, - "hiddenSeries": false, "id": 68, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1084,92 +1362,126 @@ "uid": "Prometheus" }, "exemplar": true, - "expr": "minio_usage_last_activity_nano_seconds{job=\"$scrape_jobs\"}", + "expr": "minio_usage_last_activity_nano_seconds{job=~\"$scrape_jobs\"}", "interval": "1m", "legendFormat": "{{server}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Time Elapsed Since Last Scan (nanos)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 18 }, - "hiddenSeries": false, "id": 69, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1177,91 +1489,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (rate(minio_bucket_traffic_received_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (rate(minio_bucket_traffic_received_bytes{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "Data Received [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Data Received Rate", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 24 }, - "hiddenSeries": false, "id": 54, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1269,91 +1617,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_received_bytes{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_received_bytes{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Data Received [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replication Data Received", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 24 }, - "hiddenSeries": false, "id": 55, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1361,91 +1745,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_sent_bytes{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_sent_bytes{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Replication Data Sent [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replication Data Sent", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 24 }, - "hiddenSeries": false, "id": 56, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1453,91 +1873,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_total_failed_bytes{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_total_failed_bytes{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Replication Failed [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replication Data Failed", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 24 }, - "hiddenSeries": false, "id": 57, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1545,91 +2001,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_total_failed_count{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_total_failed_count{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Replication Failed Objects [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replication Failed Objects", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 30 }, - "hiddenSeries": false, "id": 70, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1637,91 +2129,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_received_count{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_received_count{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Replicated In Objects [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replicated In Objects", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 30 }, - "hiddenSeries": false, "id": 71, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1729,91 +2257,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_sent_count{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_sent_count{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Replicated Out Objects [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Replicated Out Objects", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 30 }, - "hiddenSeries": false, "id": 72, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1821,91 +2385,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (rate(minio_bucket_replication_last_hour_failed_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (rate(minio_bucket_replication_last_hour_failed_bytes{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "Last Hour Failed Size [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Last Hour Failed Size", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 30 }, - "hiddenSeries": false, "id": 73, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1913,91 +2513,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_last_hour_failed_count{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_last_hour_failed_count{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Last Hour Failed Objects [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Last Hour Failed Objects", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 36 }, - "hiddenSeries": false, "id": 74, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2005,91 +2641,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (rate(minio_bucket_replication_last_minute_failed_bytes{job=\"$scrape_jobs\"}[$__rate_interval]))", + "expr": "sum by (bucket) (rate(minio_bucket_replication_last_minute_failed_bytes{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", "intervalFactor": 2, "legendFormat": "Last Minute Failed Size [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Last Minute Failed Size", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 36 }, - "hiddenSeries": false, "id": 75, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2097,44 +2769,15 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_last_minute_failed_count{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_last_minute_failed_count{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "Last Minute Failed Objects [{{bucket}}]", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Last Minute Failed Objects", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "datasource": { @@ -2167,11 +2810,12 @@ "y": 36 }, "id": 58, - "links": [], "options": { "displayMode": "basic", + "maxVizHeight": 300, "minVizHeight": 10, "minVizWidth": 0, + "namePlacement": "auto", "orientation": "horizontal", "reduceOptions": { "calcs": [ @@ -2181,10 +2825,11 @@ "values": false }, "showUnfilled": false, + "sizing": "auto", "text": {}, "valueMode": "color" }, - "pluginVersion": "10.0.2", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2193,7 +2838,7 @@ }, "editorMode": "code", "exemplar": true, - "expr": "sum by (bucket,range,operation) (minio_bucket_replication_latency_ms{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket,range,operation) (minio_bucket_replication_latency_ms{job=~\"$scrape_jobs\"})", "format": "time_series", "instant": false, "interval": "", @@ -2207,51 +2852,116 @@ "type": "bargauge" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 42 }, - "hiddenSeries": false, "id": 76, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2259,91 +2969,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_proxied_head_requests_total{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_proxied_head_requests_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Proxied Head Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 42 }, - "hiddenSeries": false, "id": 77, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2351,91 +3097,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "minio_bucket_replication_proxied_head_requests_failures{job=\"$scrape_jobs\"}", + "expr": "minio_bucket_replication_proxied_head_requests_failures{job=~\"$scrape_jobs\"}", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Failed Proxied Head Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 42 }, - "hiddenSeries": false, "id": 78, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2443,91 +3225,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_proxied_put_tagging_requests_total{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_proxied_put_tagging_requests_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Proxied Put Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 42 }, - "hiddenSeries": false, "id": 79, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2535,91 +3353,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "minio_bucket_replication_proxied_put_tagging_requests_failures{job=\"$scrape_jobs\"}", + "expr": "minio_bucket_replication_proxied_put_tagging_requests_failures{job=~\"$scrape_jobs\"}", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Failed Proxied Put Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 0, "y": 48 }, - "hiddenSeries": false, "id": 80, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2627,91 +3481,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_proxied_get_tagging_requests_total{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_proxied_get_tagging_requests_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Proxied Get Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 6, "y": 48 }, - "hiddenSeries": false, "id": 81, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2719,91 +3609,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "minio_bucket_replication_proxied_get_tagging_requests_failures{job=\"$scrape_jobs\"}", + "expr": "minio_bucket_replication_proxied_get_tagging_requests_failures{job=~\"$scrape_jobs\"}", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Failed Proxied Get Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 12, "y": 48 }, - "hiddenSeries": false, "id": 82, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2811,91 +3737,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_proxied_delete_tagging_requests_total{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_proxied_delete_tagging_requests_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Proxied Delete Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 6, "x": 18, "y": 48 }, - "hiddenSeries": false, "id": 83, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2903,91 +3865,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "minio_bucket_replication_proxied_delete_tagging_requests_failures{job=\"$scrape_jobs\"}", + "expr": "minio_bucket_replication_proxied_delete_tagging_requests_failures{job=~\"$scrape_jobs\"}", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Failed Proxied Delete Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 12, "x": 0, "y": 54 }, - "hiddenSeries": false, "id": 84, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2995,91 +3993,127 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "sum by (bucket) (minio_bucket_replication_proxied_get_requests_total{job=\"$scrape_jobs\"})", + "expr": "sum by (bucket) (minio_bucket_replication_proxied_get_requests_total{job=~\"$scrape_jobs\"})", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{bucket}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Proxied Get Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "fill": 1, - "fillGradient": 0, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, "gridPos": { "h": 6, "w": 12, "x": 12, "y": 54 }, - "hiddenSeries": false, "id": 85, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3087,74 +4121,59 @@ "uid": "${DS_PROMETHEUS}" }, "exemplar": true, - "expr": "minio_bucket_replication_proxied_get_requests_failures{job=\"$scrape_jobs\"}", + "expr": "minio_bucket_replication_proxied_get_requests_failures{job=~\"$scrape_jobs\"}", "interval": "1m", "intervalFactor": 2, "legendFormat": "{{instance}}", "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Total Failed Proxied Get Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" } ], "refresh": "", - "schemaVersion": 38, - "style": "dark", + "schemaVersion": 39, "tags": [ "minio" ], "templating": { "list": [ { - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "label": "Data source", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "definition": "label_values(job)", - "hide": 0, "includeAll": false, - "multi": false, "name": "scrape_jobs", "options": [], "query": { + "qryType": 1, "query": "label_values(job)", - "refId": "StandardVariableQuery" + "refId": "PrometheusVariableQueryEditor-VariableQuery" }, "refresh": 1, "regex": "", - "skipUrlSync": false, - "sort": 0, "type": "query" } ] @@ -3190,6 +4209,6 @@ "timezone": "", "title": "MinIO Bucket Dashboard", "uid": "TgmJnqnnk2", - "version": 3, + "version": 1, "weekStart": "" -} +} \ No newline at end of file diff --git a/docs/metrics/prometheus/grafana/grafana-minio.png b/docs/metrics/prometheus/grafana/grafana-minio.png index d1c56cef31783..6e48f6ef7e7df 100644 Binary files a/docs/metrics/prometheus/grafana/grafana-minio.png and b/docs/metrics/prometheus/grafana/grafana-minio.png differ diff --git a/docs/metrics/prometheus/grafana/minio-dashboard.json b/docs/metrics/prometheus/grafana/minio-dashboard.json index 3efc1afab76ba..8976eb7b31d01 100644 --- a/docs/metrics/prometheus/grafana/minio-dashboard.json +++ b/docs/metrics/prometheus/grafana/minio-dashboard.json @@ -1,65 +1,4 @@ { - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "panel", - "id": "gauge", - "name": "Gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.3.1" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph (old)", - "version": "" - }, - { - "type": "panel", - "id": "piechart", - "name": "Pie chart", - "version": "" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { @@ -87,7 +26,7 @@ "fiscalYearStartMonth": 0, "gnetId": 13502, "graphTooltip": 0, - "id": null, + "id": 292, "links": [ { "icon": "external link", @@ -129,8 +68,7 @@ } ] }, - "unit": "dtdurations", - "unitScale": true + "unit": "dtdurations" }, "overrides": [] }, @@ -141,7 +79,6 @@ "y": 0 }, "id": 1, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -160,7 +97,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -210,8 +147,7 @@ } ] }, - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [] }, @@ -222,7 +158,6 @@ "y": 0 }, "id": 65, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -241,7 +176,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -293,8 +228,7 @@ "type": "special" } ], - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [ { @@ -337,7 +271,6 @@ }, "id": 50, "interval": "1m", - "links": [], "maxDataPoints": 100, "options": { "displayLabels": [], @@ -451,8 +384,7 @@ } ] }, - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [ { @@ -553,8 +485,7 @@ "value": null } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -565,7 +496,6 @@ "y": 0 }, "id": 52, - "links": [], "options": { "displayMode": "lcd", "maxVizHeight": 300, @@ -584,7 +514,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -666,8 +596,7 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -678,7 +607,6 @@ "y": 0 }, "id": 61, - "links": [], "maxDataPoints": 100, "options": { "legend": { @@ -744,8 +672,7 @@ } ] }, - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [] }, @@ -756,7 +683,6 @@ "y": 3 }, "id": 64, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -775,7 +701,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -854,8 +780,7 @@ "value": 80 } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -866,7 +791,6 @@ "y": 3 }, "id": 62, - "links": [], "maxDataPoints": 100, "options": { "legend": { @@ -921,8 +845,7 @@ } ] }, - "unit": "bool_on_off", - "unitScale": true + "unit": "bool_on_off" }, "overrides": [] }, @@ -949,7 +872,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -970,7 +893,6 @@ } ], "title": "Cluster Health Status", - "transformations": [], "type": "stat" }, { @@ -990,8 +912,7 @@ "value": null } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -1002,7 +923,6 @@ "y": 6 }, "id": 78, - "links": [], "maxDataPoints": 100, "options": { "minVizHeight": 75, @@ -1019,7 +939,7 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1094,8 +1014,7 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -1106,7 +1025,6 @@ "y": 6 }, "id": 66, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -1125,7 +1043,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1202,8 +1120,7 @@ } ] }, - "unit": "binBps", - "unitScale": true + "unit": "binBps" }, "overrides": [] }, @@ -1300,8 +1217,7 @@ } ] }, - "unit": "binBps", - "unitScale": true + "unit": "binBps" }, "overrides": [] }, @@ -1374,8 +1290,7 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -1386,7 +1301,6 @@ "y": 8 }, "id": 53, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -1405,7 +1319,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1463,8 +1377,7 @@ } ] }, - "unit": "short", - "unitScale": true + "unit": "short" }, "overrides": [] }, @@ -1475,7 +1388,6 @@ "y": 9 }, "id": 44, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -1494,7 +1406,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1536,8 +1448,7 @@ } ] }, - "unit": "ns", - "unitScale": true + "unit": "ns" }, "overrides": [] }, @@ -1548,7 +1459,6 @@ "y": 10 }, "id": 80, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -1567,7 +1477,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1610,8 +1520,7 @@ } ] }, - "unit": "ns", - "unitScale": true + "unit": "ns" }, "overrides": [] }, @@ -1622,7 +1531,6 @@ "y": 10 }, "id": 81, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -1641,7 +1549,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1664,57 +1572,116 @@ "type": "stat" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 6, "w": 9, "x": 0, "y": 12 }, - "hiddenSeries": false, "id": 60, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1729,90 +1696,120 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "S3 API Request Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 6, "w": 7, "x": 9, "y": 12 }, - "hiddenSeries": false, "id": 88, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -1827,96 +1824,126 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "S3 API Request Error Rate (4xx)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 8, - "x": 16, - "y": 12 - }, - "hiddenSeries": false, - "id": 86, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 16, + "y": 12 + }, + "id": 86, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, "exemplar": true, "expr": "sum by (server,api) (increase(minio_s3_requests_5xx_errors_total{job=~\"$scrape_jobs\"}[$__rate_interval]))", "interval": "1m", @@ -1925,37 +1952,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "S3 API Request Error Rate (5xx)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "datasource": { @@ -1976,8 +1974,7 @@ "value": null } ] - }, - "unitScale": true + } }, "overrides": [] }, @@ -2006,7 +2003,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2121,8 +2118,7 @@ } ] }, - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [] }, @@ -2151,7 +2147,7 @@ "sizing": "auto", "valueMode": "color" }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2173,54 +2169,85 @@ "type": "bargauge" }, { - "aliasColors": {}, - "bars": true, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 26 }, - "hiddenSeries": false, "id": 73, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2247,37 +2274,8 @@ "refId": "B" } ], - "thresholds": [], - "timeRegions": [], "title": "Read, Write I/O", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:381", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:382", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "datasource": { @@ -2291,7 +2289,8 @@ "mode": "percentage", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "orange", @@ -2303,8 +2302,7 @@ } ] }, - "unit": "s", - "unitScale": true + "unit": "s" }, "overrides": [] }, @@ -2330,7 +2328,7 @@ "showThresholdMarkers": true, "sizing": "auto" }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2397,7 +2395,8 @@ "mode": "absolute", "steps": [ { - "color": "green" + "color": "green", + "value": null }, { "color": "red", @@ -2405,8 +2404,7 @@ } ] }, - "unit": "bytes", - "unitScale": true + "unit": "bytes" }, "overrides": [] }, @@ -2417,7 +2415,6 @@ "y": 33 }, "id": 17, - "links": [], "options": { "legend": { "calcs": [], @@ -2465,13 +2462,6 @@ "type": "timeseries" }, { - "aliasColors": { - "available 10.13.1.25:9000": "green", - "used 10.13.1.25:9000": "blue" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -2479,103 +2469,129 @@ "description": "", "fieldConfig": { "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, "links": [], - "unit": "bytes", - "unitScale": true - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 33 - }, - "hiddenSeries": false, - "id": 8, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "links": [], - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] }, - "exemplar": true, - "expr": "minio_node_file_descriptor_open_total{job=~\"$scrape_jobs\"}", - "interval": "", - "legendFormat": "Open FDs [{{server}}]", - "refId": "B" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "File Descriptors", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "available 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "used 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + } + ] }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 33 }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "bytes", - "logBase": 1, - "min": "0", - "show": true + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_file_descriptor_open_total{job=~\"$scrape_jobs\"}", + "interval": "", + "legendFormat": "Open FDs [{{server}}]", + "refId": "B" } ], - "yaxis": { - "align": false - } + "title": "File Descriptors", + "type": "timeseries" }, { - "aliasColors": { - "Offline 10.13.1.25:9000": "dark-red", - "Total 10.13.1.25:9000": "blue" - }, - "bars": true, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -2583,47 +2599,113 @@ "description": "Number of online drives per MinIO Server", "fieldConfig": { "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, "links": [], - "unitScale": true + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Offline 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 0, "y": 40 }, - "hiddenSeries": false, "id": 11, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "total": false, - "values": false - }, - "lines": false, - "linewidth": 1, - "links": [], - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2652,89 +2734,90 @@ "refId": "B" } ], - "thresholds": [], - "timeRegions": [], "title": "Syscalls", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:185", - "decimals": 0, - "format": "short", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:186", - "format": "short", - "logBase": 1, - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 40 }, - "hiddenSeries": false, "id": 95, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2748,185 +2831,187 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Scanned Objects", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true - }, - "overrides": [] - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 7, - "w": 12, - "x": 0, - "y": 47 - }, - "hiddenSeries": false, - "id": 75, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" + "color": { + "mode": "palette-classic" }, - "exemplar": true, - "expr": "rate(minio_node_scanner_versions_scanned{job=~\"$scrape_jobs\"}[$__rate_interval])", - "interval": "1m", - "legendFormat": "[{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Scanned Versions", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 47 }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true + "id": 75, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "rate(minio_node_scanner_versions_scanned{job=~\"$scrape_jobs\"}[$__rate_interval])", + "interval": "1m", + "legendFormat": "[{{server}}]", + "refId": "A" } ], - "yaxis": { - "align": false - } + "title": "Scanned Versions", + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 7, "w": 12, "x": 12, "y": 47 }, - "hiddenSeries": false, "id": 96, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -2940,39 +3025,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Scanned Directories", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { "datasource": { @@ -2997,12 +3051,12 @@ "mode": "percentage", "steps": [ { - "color": "green" + "color": "green", + "value": null } ] }, - "unit": "dtdurations", - "unitScale": true + "unit": "dtdurations" }, "overrides": [] }, @@ -3013,7 +3067,6 @@ "y": 54 }, "id": 89, - "links": [], "maxDataPoints": 100, "options": { "colorMode": "value", @@ -3032,7 +3085,7 @@ "textMode": "auto", "wideLayout": true }, - "pluginVersion": "10.3.1", + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3055,57 +3108,116 @@ "type": "stat" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 4, "w": 4, "x": 4, "y": 54 }, - "hiddenSeries": false, "id": 91, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3120,88 +3232,90 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "KMS Request 4xx Error Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unit": "bool_on_off", - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bool_on_off" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 4, "w": 4, "x": 8, "y": 54 }, - "hiddenSeries": false, "id": 90, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3215,188 +3329,217 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "KMS Online(1)/Offline(0)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "bool_on_off", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { - "h": 7, + "h": 4, "w": 12, "x": 12, "y": 54 }, - "hiddenSeries": false, "id": 98, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, - "exemplar": true, - "expr": "rate(minio_node_scanner_bucket_scans_finished{job=~\"$scrape_jobs\"}[$__rate_interval])", - "interval": "1m", - "legendFormat": "[{{server}}]", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Bucket Scans Finished", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "unitScale": true + "exemplar": true, + "expr": "rate(minio_node_scanner_bucket_scans_finished{job=~\"$scrape_jobs\"}[$__rate_interval])", + "interval": "1m", + "legendFormat": "[{{server}}]", + "refId": "A" + } + ], + "title": "Bucket Scans Finished", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 5, "w": 6, "x": 0, "y": 58 }, - "hiddenSeries": false, "id": 92, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3411,90 +3554,120 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "KMS Request 5xx Error Rate", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] }, - "fill": 1, - "fillGradient": 0, "gridPos": { "h": 5, "w": 6, "x": 6, "y": 58 }, - "hiddenSeries": false, "id": 93, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3509,87 +3682,90 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "KMS Request Success Rate ", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" }, { - "aliasColors": {}, - "bars": false, - "dashLength": 10, - "dashes": false, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" }, "fieldConfig": { "defaults": { - "unitScale": true + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" }, "overrides": [] }, - "fill": 1, - "fillGradient": 0, "gridPos": { - "h": 9, + "h": 5, "w": 12, "x": 12, "y": 61 }, - "hiddenSeries": false, "id": 97, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", "options": { - "alertThreshold": true + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } }, - "percentage": false, - "pluginVersion": "10.3.1", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, + "pluginVersion": "10.4.0", "targets": [ { "datasource": { @@ -3603,39 +3779,8 @@ "refId": "A" } ], - "thresholds": [], - "timeRegions": [], "title": "Bucket Scans Started", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } + "type": "timeseries" } ], "refresh": "", @@ -3646,7 +3791,25 @@ "templating": { "list": [ { - "current": {}, + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "label": "Data source", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, "datasource": { "type": "prometheus", "uid": "${DS_PROMETHEUS}" @@ -3700,6 +3863,6 @@ "timezone": "", "title": "MinIO Dashboard", "uid": "TgmJnqnnk", - "version": 54, + "version": 1, "weekStart": "" } \ No newline at end of file diff --git a/docs/metrics/prometheus/grafana/node/grafana-node.png b/docs/metrics/prometheus/grafana/node/grafana-node.png new file mode 100644 index 0000000000000..4b12d5417e3d1 Binary files /dev/null and b/docs/metrics/prometheus/grafana/node/grafana-node.png differ diff --git a/docs/metrics/prometheus/grafana/node/minio-node.json b/docs/metrics/prometheus/grafana/node/minio-node.json new file mode 100644 index 0000000000000..783190dd8740d --- /dev/null +++ b/docs/metrics/prometheus/grafana/node/minio-node.json @@ -0,0 +1,955 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "MinIO Nodes Grafana Dashboard - https://min.io/", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 15306, + "graphTooltip": 0, + "id": 267, + "links": [ + { + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "minio" + ], + "type": "dashboards" + } + ], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 21, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": {}, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "max(minio_node_drive_total{job=~\"$scrape_jobs\",server=\"$server\"})", + "format": "table", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": "", + "metric": "process_start_time_seconds", + "refId": "A", + "step": 60 + } + ], + "title": "Total Drives", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 22, + "maxDataPoints": 100, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(minio_node_drive_online_total{job=~\"$scrape_jobs\",server=\"$server\"})", + "format": "time_series", + "hide": false, + "instant": true, + "interval": "", + "intervalFactor": 1, + "legendFormat": ".", + "metric": "process_start_time_seconds", + "range": false, + "refId": "A", + "step": 60 + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "max(minio_node_drive_offline_total{job=~\"$scrape_jobs\",server=\"$server\"})", + "format": "time_series", + "hide": false, + "instant": true, + "legendFormat": ".", + "range": false, + "refId": "B" + } + ], + "title": "Total Online/Offline Drives", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 23, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_total_bytes{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "Total [{{drive}}]", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_used_bytes{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "Used [{{drive}}]", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_free_bytes{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "Free [{{drive}}]", + "refId": "C" + } + ], + "title": "Drive Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 24, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_free_inodes{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "[{{drive}}]", + "refId": "B" + } + ], + "title": "Free Inodes", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 0, + "y": 5 + }, + "id": 25, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_latency_us{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "[{{drive}}:{{api}}]", + "refId": "B" + } + ], + "title": "Drive Latency (micro sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 6, + "y": 5 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_errors_availability{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "[{{drive}}]", + "refId": "B" + } + ], + "title": "Drive Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "available 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "used 10.13.1.25:9000" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 12, + "y": 5 + }, + "id": 27, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_errors_timeout{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "[{{drive}}]", + "refId": "B" + } + ], + "title": "Drive Timeout Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 18, + "y": 5 + }, + "id": 28, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_drive_io_waiting{job=~\"$scrape_jobs\",server=\"$server\"}", + "interval": "", + "legendFormat": "[{{drive}}]", + "refId": "B" + } + ], + "title": "IO Operations Waiting", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [ + "minio" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "label": "Data source", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "scrape_jobs", + "options": [], + "query": { + "query": "label_values(job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(minio_node_drive_total,server)", + "hide": 0, + "includeAll": false, + "label": "Server", + "multi": false, + "name": "server", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(minio_node_drive_total,server)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MinIO Node Dashboard", + "uid": "TgmJnnqn2k", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/docs/metrics/prometheus/grafana/replication/grafana-replication-cluster.png b/docs/metrics/prometheus/grafana/replication/grafana-replication-cluster.png new file mode 100644 index 0000000000000..0899664c15702 Binary files /dev/null and b/docs/metrics/prometheus/grafana/replication/grafana-replication-cluster.png differ diff --git a/docs/metrics/prometheus/grafana/replication/grafana-replication-node.png b/docs/metrics/prometheus/grafana/replication/grafana-replication-node.png new file mode 100644 index 0000000000000..2758631b1c329 Binary files /dev/null and b/docs/metrics/prometheus/grafana/replication/grafana-replication-node.png differ diff --git a/docs/metrics/prometheus/grafana/replication/grafana-replication.png b/docs/metrics/prometheus/grafana/replication/grafana-replication.png deleted file mode 100644 index 4145b9be01e4c..0000000000000 Binary files a/docs/metrics/prometheus/grafana/replication/grafana-replication.png and /dev/null differ diff --git a/docs/metrics/prometheus/grafana/replication/minio-replication-cluster.json b/docs/metrics/prometheus/grafana/replication/minio-replication-cluster.json new file mode 100644 index 0000000000000..7e34467dad331 --- /dev/null +++ b/docs/metrics/prometheus/grafana/replication/minio-replication-cluster.json @@ -0,0 +1,2952 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "MinIO Grafana Dashboard - https://min.io/", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 15306, + "graphTooltip": 0, + "id": 285, + "links": [ + { + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "minio" + ], + "type": "dashboards" + } + ], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 57, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_cluster_replication_received_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Received Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 58, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_cluster_replication_received_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Received Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_sent_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Sent Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_sent_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Sent Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 6 + }, + "id": 61, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_cluster_replication_total_failed_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Failed Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 6 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_cluster_replication_total_failed_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Failed Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 6 + }, + "id": 63, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_total_failed_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Total Failed Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 6 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_total_failed_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Total Failed Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 12 + }, + "id": 65, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_last_hour_failed_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Last Hour Failed Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 12 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_last_hour_failed_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Last Hour Failed Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 12 + }, + "id": 67, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_last_minute_failed_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Last Minute Failed Data", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 12 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (endpoint,server) (minio_cluster_replication_last_minute_failed_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{endpoint}},{{server}}", + "refId": "A" + } + ], + "title": "Last Minute Failed Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "id": 69, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_head_requests_total{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Proxied Head Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 18 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_head_requests_failures{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Failed Proxied Head Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 18 + }, + "id": 71, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_put_tagging_requests_total{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Proxied Put Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 18 + }, + "id": 72, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_put_tagging_requests_failures{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Failed Proxied Put Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 24 + }, + "id": 73, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_get_tagging_requests_total{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Proxied Get Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 24 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_get_tagging_requests_failures{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Failed Proxied Get Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 24 + }, + "id": 75, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_delete_tagging_requests_total{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Proxied Delete Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 24 + }, + "id": 76, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_delete_tagging_requests_failures{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Failed Proxied Delete Tag Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 77, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_get_requests_total{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Proxied Get Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 78, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": true, + "expr": "minio_cluster_replication_proxied_get_requests_failures{job=\"$scrape_jobs\"}", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Failed Proxied Get Requests", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [ + "minio" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "label": "Data source", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "scrape_jobs", + "options": [], + "query": { + "query": "label_values(job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MinIO Cluster Replication Dashboard", + "uid": "TgmJnnqnk3", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/docs/metrics/prometheus/grafana/replication/minio-replication-node.json b/docs/metrics/prometheus/grafana/replication/minio-replication-node.json new file mode 100644 index 0000000000000..6e0c29b4eb961 --- /dev/null +++ b/docs/metrics/prometheus/grafana/replication/minio-replication-node.json @@ -0,0 +1,2397 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "MinIO Grafana Dashboard - https://min.io/", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 15306, + "graphTooltip": 0, + "id": 283, + "links": [ + { + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "minio" + ], + "type": "dashboards" + } + ], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_average_active_workers{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Avg. Active Workers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 0 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server, endpoint) (minio_node_replication_average_link_latency_ms{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server,endpoint}}", + "refId": "A" + } + ], + "title": "Avg. Link Latency (millis)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 57, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_average_queued_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Avg. Queued Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 58, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_average_queued_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Avg. Queued Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 6 + }, + "id": 59, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_average_transfer_rate{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Avg. Transfer Rate (bytes/s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 6 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_current_active_workers{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Active Workers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 6 + }, + "id": 61, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server,endpoint) (minio_node_replication_current_link_latency_ms{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Current Link Latency (millis)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Replication Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Replication Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 6 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_current_transfer_rate{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Current Transfer Rate (bytes/s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 12 + }, + "id": 63, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_last_minute_queued_bytes{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Last Minute Queued", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 12 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server) (minio_node_replication_last_minute_queued_count{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{bucket}}", + "refId": "A" + } + ], + "title": "Last Minute Queued Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "S3 Errors" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "S3 Requests" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "light-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 12 + }, + "id": 65, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server,endpoint) (minio_node_replication_link_downtime_duration_seconds{job=\"$scrape_jobs\"})", + "interval": "1m", + "intervalFactor": 2, + "legendFormat": "{{server,endpoint}}", + "refId": "A" + } + ], + "title": "Link Downtime Duration (sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 12 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server,endpoint) (minio_node_replication_link_offline_duration_seconds{job=\"$scrape_jobs\"})", + "interval": "1m", + "legendFormat": "{{server,endpoint}}", + "refId": "A" + } + ], + "title": "Link Offline Duration (sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "id": 67, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_max_active_workers{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Max Active Workers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 18 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server,endpoibt) (minio_node_replication_max_link_latency_ms{job=\"$scrape_jobs\"})", + "interval": "1m", + "legendFormat": "{{server,endpoint}}", + "refId": "A" + } + ], + "title": "Max Link Latency (millis)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 18 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_max_queued_bytes{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Max Queued Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 18 + }, + "id": 71, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_max_queued_count{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Max Queued Objects", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 24 + }, + "id": 72, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_max_transfer_rate{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Max Transfer Rate (per sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 24 + }, + "id": 73, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_recent_backlog_count{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{server}}", + "refId": "A" + } + ], + "title": "Backlog (last 5 mins)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 24 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "sum by (server,endpoint) (minio_node_replication_link_online{job=\"$scrape_jobs\"})", + "interval": "1m", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ], + "title": "Link Online/Offline", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 24 + }, + "id": 75, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "10.4.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "exemplar": true, + "expr": "minio_node_replication_link_offline_duration_seconds{job=\"$scrape_jobs\"}", + "interval": "1m", + "legendFormat": "{{endpoint}}", + "refId": "A" + } + ], + "title": "Replication Link Offline Duration (sec)", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [ + "minio" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "label": "Data source", + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "current": {}, + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "definition": "label_values(job)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "scrape_jobs", + "options": [], + "query": { + "query": "label_values(job)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "MinIO Node Replication Dashboard", + "uid": "gmTJnqnnk3", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/docs/metrics/prometheus/grafana/replication/minio-replication.json b/docs/metrics/prometheus/grafana/replication/minio-replication.json deleted file mode 100644 index b830bd4812d2c..0000000000000 --- a/docs/metrics/prometheus/grafana/replication/minio-replication.json +++ /dev/null @@ -1,2802 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "panel", - "id": "gauge", - "name": "Gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "10.0.2" - }, - { - "type": "panel", - "id": "graph", - "name": "Graph (old)", - "version": "" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "datasource", - "uid": "grafana" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "description": "MinIO Grafana Dashboard - https://min.io/", - "editable": true, - "fiscalYearStartMonth": 0, - "gnetId": 15306, - "graphTooltip": 0, - "id": null, - "links": [ - { - "icon": "external link", - "includeVars": true, - "keepTime": true, - "tags": [ - "minio" - ], - "type": "dashboards" - } - ], - "liveNow": false, - "panels": [ - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 0 - }, - "hiddenSeries": false, - "id": 55, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_average_active_workers{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg. Active Workers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 0 - }, - "hiddenSeries": false, - "id": 56, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server, endpoint) (minio_cluster_replication_average_link_latency_ms{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server,endpoint}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg. Link Latency (millis)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 0 - }, - "hiddenSeries": false, - "id": 57, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_average_queued_bytes{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg. Queued Size", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 0 - }, - "hiddenSeries": false, - "id": 58, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_average_queued_count{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg. Queued Objects", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 6 - }, - "hiddenSeries": false, - "id": 59, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_average_transfer_rate{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Avg. Transfer Rate (bytes/s)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 6 - }, - "hiddenSeries": false, - "id": 60, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_current_active_workers{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Active Workers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 6 - }, - "hiddenSeries": false, - "id": 61, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server,endpoint) (minio_cluster_replication_current_link_latency_ms{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Current Link Latency (millis)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 6 - }, - "hiddenSeries": false, - "id": 62, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_current_transfer_rate{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Current Transfer Rate (bytes/s)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 12 - }, - "hiddenSeries": false, - "id": 63, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_last_minute_queued_bytes{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Last Minute Queued", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "bytes", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 12 - }, - "hiddenSeries": false, - "id": 64, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server) (minio_cluster_replication_last_minute_queued_count{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{bucket}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Last Minute Queued Objects", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "S3 Errors": "light-red", - "S3 Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 12 - }, - "hiddenSeries": false, - "id": 65, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "sum by (server,endpoint) (minio_cluster_replication_link_downtime_duration_seconds{job=\"$scrape_jobs\"})", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{server,endpoint}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Link Downtime Duration (sec)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "none", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 12 - }, - "hiddenSeries": false, - "id": 66, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "sum by (server,endpoint) (minio_cluster_replication_link_offline_duration_seconds{job=\"$scrape_jobs\"})", - "interval": "1m", - "legendFormat": "{{server,endpoint}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Link Offline Duration (sec)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 18 - }, - "hiddenSeries": false, - "id": 67, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "minio_cluster_replication_max_active_workers{job=\"$scrape_jobs\"}", - "interval": "1m", - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Max Active Workers", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 18 - }, - "hiddenSeries": false, - "id": 68, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "sum by (server,endpoibt) (minio_cluster_replication_max_link_latency_ms{job=\"$scrape_jobs\"})", - "interval": "1m", - "legendFormat": "{{server,endpoint}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Max Link Latency (millis)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 18 - }, - "hiddenSeries": false, - "id": 70, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "minio_cluster_replication_max_queued_bytes{job=\"$scrape_jobs\"}", - "interval": "1m", - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Max Queued Size", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "bytes", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 18 - }, - "hiddenSeries": false, - "id": 71, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "minio_cluster_replication_max_queued_count{job=\"$scrape_jobs\"}", - "interval": "1m", - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Max Queued Objects", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 24 - }, - "hiddenSeries": false, - "id": 72, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "minio_cluster_replication_max_transfer_rate{job=\"$scrape_jobs\"}", - "interval": "1m", - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Max Transfer Rate (per sec)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "bytes", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 24 - }, - "hiddenSeries": false, - "id": 73, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "minio_cluster_replication_recent_backlog_count{job=\"$scrape_jobs\"}", - "interval": "1m", - "legendFormat": "{{server}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Backlog (last 5 mins)", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 24 - }, - "hiddenSeries": false, - "id": 74, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "Prometheus" - }, - "exemplar": true, - "expr": "sum by (server,endpoint) (minio_cluster_replication_link_online{job=\"$scrape_jobs\"})", - "interval": "1m", - "legendFormat": "{{server,endpoint}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Link Online/Offline", - "tooltip": { - "shared": true, - "sort": 0, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:212", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - }, - { - "$$hashKey": "object:213", - "format": "none", - "logBase": 1, - "min": "0", - "show": true - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 24 - }, - "hiddenSeries": false, - "id": 75, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_head_requests_total{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Proxied Head Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 30 - }, - "hiddenSeries": false, - "id": 76, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_head_requests_failures{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Failed Proxied Head Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 30 - }, - "hiddenSeries": false, - "id": 77, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_put_tagging_requests_total{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Proxied Put Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 30 - }, - "hiddenSeries": false, - "id": 78, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_put_tagging_requests_failures{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Failed Proxied Put Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 30 - }, - "hiddenSeries": false, - "id": 79, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_get_tagging_requests_total{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Proxied Get Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 0, - "y": 36 - }, - "hiddenSeries": false, - "id": 80, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_get_tagging_requests_failures{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Failed Proxied Get Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 6, - "y": 36 - }, - "hiddenSeries": false, - "id": 81, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_delete_tagging_requests_total{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Proxied Delete Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 12, - "y": 36 - }, - "hiddenSeries": false, - "id": 82, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_delete_tagging_requests_failures{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Failed Proxied Delete Tag Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 6, - "x": 18, - "y": 36 - }, - "hiddenSeries": false, - "id": 83, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_get_requests_total{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Proxied Get Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - }, - { - "aliasColors": { - "Replication Errors": "light-red", - "Replication Requests": "light-green" - }, - "bars": false, - "dashLength": 10, - "dashes": false, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fill": 1, - "fillGradient": 0, - "gridPos": { - "h": 6, - "w": 12, - "x": 0, - "y": 42 - }, - "hiddenSeries": false, - "id": 84, - "legend": { - "avg": false, - "current": false, - "max": false, - "min": false, - "show": true, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 1, - "nullPointMode": "null", - "options": { - "alertThreshold": true - }, - "percentage": false, - "pluginVersion": "10.0.2", - "pointradius": 2, - "points": false, - "renderer": "flot", - "seriesOverrides": [], - "spaceLength": 10, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "exemplar": true, - "expr": "minio_cluster_replication_proxied_get_requests_failures{job=\"$scrape_jobs\"}", - "interval": "1m", - "intervalFactor": 2, - "legendFormat": "{{instance}}", - "refId": "A" - } - ], - "thresholds": [], - "timeRegions": [], - "title": "Total Failed Proxied Get Requests", - "tooltip": { - "shared": true, - "sort": 2, - "value_type": "individual" - }, - "type": "graph", - "xaxis": { - "mode": "time", - "show": true, - "values": [] - }, - "yaxes": [ - { - "$$hashKey": "object:331", - "format": "short", - "logBase": 1, - "show": true - }, - { - "$$hashKey": "object:332", - "format": "short", - "logBase": 1, - "show": false - } - ], - "yaxis": { - "align": false - } - } - ], - "refresh": "", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "minio" - ], - "templating": { - "list": [ - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "label_values(job)", - "hide": 0, - "includeAll": false, - "multi": false, - "name": "scrape_jobs", - "options": [], - "query": { - "query": "label_values(job)", - "refId": "StandardVariableQuery" - }, - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "10s", - "30s", - "1m", - "5m", - "15m", - "30m", - "1h", - "2h", - "1d" - ], - "time_options": [ - "5m", - "15m", - "1h", - "6h", - "12h", - "24h", - "2d", - "7d", - "30d" - ] - }, - "timezone": "", - "title": "MinIO Replication Dashboard", - "uid": "TgmJnqnnk3", - "version": 3, - "weekStart": "" -} \ No newline at end of file diff --git a/docs/metrics/prometheus/list.md b/docs/metrics/prometheus/list.md index 74edba6a9bb9d..7050006482335 100644 --- a/docs/metrics/prometheus/list.md +++ b/docs/metrics/prometheus/list.md @@ -34,7 +34,6 @@ For deployments behind a load balancer, use the load balancer hostname instead o | `minio_cluster_usage_total_bytes` | Total cluster usage in bytes | | `minio_cluster_usage_version_total` | Total number of versions (includes delete marker) in a cluster | | `minio_cluster_usage_deletemarker_total` | Total number of delete markers in a cluster | -| `minio_cluster_usage_total_bytes` | Total cluster usage in bytes | | `minio_cluster_bucket_total` | Total number of buckets in the cluster | ## Cluster Drive Metrics @@ -79,34 +78,15 @@ For deployments behind a load balancer, use the load balancer hostname instead o ## Cluster Replication Metrics -Metrics marked as ``Site Replication Only`` only populate on deployments with [Site Replication](https://min.io/docs/minio/linux/operations/install-deploy-manage/multi-site-replication.html) configurations. -For deployments with [bucket](https://min.io/docs/minio/linux/administration/bucket-replication.html) or [batch](https://min.io/docs/minio/linux/administration/batch-framework.html#replicate) configurations, these metrics populate instead under the [Bucket Metrics](#bucket-metrics) endpoint. +Metrics marked as ``Site Replication Only`` only populate on deployments with [Site Replication](https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/multi-site-replication.html) configurations. +For deployments with [bucket](https://docs.min.io/community/minio-object-store/administration/bucket-replication.html) or [batch](https://docs.min.io/community/minio-object-store/administration/batch-framework.html#replicate) configurations, these metrics populate instead under the [Bucket Metrics](#bucket-metrics) endpoint. | Name | Description |:-----------------------------------------------------------|:---------------------------------------------------------------------------------------------------------| -| `minio_cluster_replication_current_active_workers` | Total number of active replication workers | -| `minio_cluster_replication_average_active_workers` | Average number of active replication workers | -| `minio_cluster_replication_max_active_workers` | Maximum number of active replication workers seen since server start | -| `minio_cluster_replication_link_online` | Reports whether the replication link is online (1) or offline (0). | -| `minio_cluster_replication_link_offline_duration_seconds` | Total duration of replication link being offline in seconds since last offline event | -| `minio_cluster_replication_link_downtime_duration_seconds` | Total downtime of replication link in seconds since server start | -| `minio_cluster_replication_average_link_latency_ms` | Average replication link latency in milliseconds | -| `minio_cluster_replication_max_link_latency_ms` | Maximum replication link latency in milliseconds seen since server start | -| `minio_cluster_replication_current_link_latency_ms` | Current replication link latency in milliseconds | -| `minio_cluster_replication_current_transfer_rate` | Current replication transfer rate in bytes/sec | -| `minio_cluster_replication_average_transfer_rate` | Average replication transfer rate in bytes/sec | -| `minio_cluster_replication_max_transfer_rate` | Maximum replication transfer rate in bytes/sec seen since server start | -| `minio_cluster_replication_last_minute_queued_count` | Total number of objects queued for replication in the last full minute | -| `minio_cluster_replication_last_minute_queued_bytes` | Total number of bytes queued for replication in the last full minute | -| `minio_cluster_replication_average_queued_count` | Average number of objects queued for replication since server start | -| `minio_cluster_replication_average_queued_bytes` | Average number of bytes queued for replication since server start | -| `minio_cluster_replication_max_queued_bytes` | Maximum number of bytes queued for replication seen since server start | -| `minio_cluster_replication_max_queued_count` | Maximum number of objects queued for replication seen since server start | -| `minio_cluster_replication_recent_backlog_count` | Total number of objects seen in replication backlog in the last 5 minutes | -| `minio_cluster_replication_last_minute_failed_bytes` | Total number of bytes failed at least once to replicate in the last full minute. | -| `minio_cluster_replication_last_minute_failed_count` | Total number of objects which failed replication in the last full minute. | | `minio_cluster_replication_last_hour_failed_bytes` | (_Site Replication Only_) Total number of bytes failed at least once to replicate in the last full hour. | | `minio_cluster_replication_last_hour_failed_count` | (_Site Replication Only_) Total number of objects which failed replication in the last full hour. | +| `minio_cluster_replication_last_minute_failed_bytes` | Total number of bytes failed at least once to replicate in the last full minute. | +| `minio_cluster_replication_last_minute_failed_count` | Total number of objects which failed replication in the last full minute. | | `minio_cluster_replication_total_failed_bytes` | (_Site Replication Only_) Total number of bytes failed at least once to replicate since server start. | | `minio_cluster_replication_total_failed_count` | (_Site Replication Only_) Total number of objects which failed replication since server start. | | `minio_cluster_replication_received_bytes` | (_Site Replication Only_) Total number of bytes replicated to this cluster from another source cluster. | @@ -126,6 +106,33 @@ For deployments with [bucket](https://min.io/docs/minio/linux/administration/buc | `minio_cluster_replication_proxied_put_tagging_requests_failures` | (_Site Replication Only_)Number of failures proxying PUT tagging requests to replication target | +## Node Replication Metrics + +Metrics marked as ``Site Replication Only`` only populate on deployments with [Site Replication](https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/multi-site-replication.html) configurations. +For deployments with [bucket](https://docs.min.io/community/minio-object-store/administration/bucket-replication.html) or [batch](https://docs.min.io/community/minio-object-store/administration/batch-framework.html#replicate) configurations, these metrics populate instead under the [Bucket Metrics](#bucket-metrics) endpoint. + +| Name | Description +|:-----------------------------------------------------------|:---------------------------------------------------------------------------------------------------------| +| `minio_node_replication_current_active_workers` | Total number of active replication workers | +| `minio_node_replication_average_active_workers` | Average number of active replication workers | +| `minio_node_replication_max_active_workers` | Maximum number of active replication workers seen since server start | +| `minio_node_replication_link_online` | Reports whether the replication link is online (1) or offline (0). | +| `minio_node_replication_link_offline_duration_seconds` | Total duration of replication link being offline in seconds since last offline event | +| `minio_node_replication_link_downtime_duration_seconds` | Total downtime of replication link in seconds since server start | +| `minio_node_replication_average_link_latency_ms` | Average replication link latency in milliseconds | +| `minio_node_replication_max_link_latency_ms` | Maximum replication link latency in milliseconds seen since server start | +| `minio_node_replication_current_link_latency_ms` | Current replication link latency in milliseconds | +| `minio_node_replication_current_transfer_rate` | Current replication transfer rate in bytes/sec | +| `minio_node_replication_average_transfer_rate` | Average replication transfer rate in bytes/sec | +| `minio_node_replication_max_transfer_rate` | Maximum replication transfer rate in bytes/sec seen since server start | +| `minio_node_replication_last_minute_queued_count` | Total number of objects queued for replication in the last full minute | +| `minio_node_replication_last_minute_queued_bytes` | Total number of bytes queued for replication in the last full minute | +| `minio_node_replication_average_queued_count` | Average number of objects queued for replication since server start | +| `minio_node_replication_average_queued_bytes` | Average number of bytes queued for replication since server start | +| `minio_node_replication_max_queued_bytes` | Maximum number of bytes queued for replication seen since server start | +| `minio_node_replication_max_queued_count` | Maximum number of objects queued for replication seen since server start | +| `minio_node_replication_recent_backlog_count` | Total number of objects seen in replication backlog in the last 5 minutes | + ## Healing Metrics | Name | Description | @@ -149,9 +156,9 @@ For deployments with [bucket](https://min.io/docs/minio/linux/administration/buc | Name | Description | |:-----------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------| -| `minio_notify_current_send_in_progress` | Number of concurrent async Send calls active to all targets (deprecated, please use 'minio_notify_target_current_send_in_progress' instead) | -| `minio_notify_events_errors_total` | Events that were failed to be sent to the targets (deprecated, please use 'minio_notify_target_failed_events' instead) | -| `minio_notify_events_sent_total` | Total number of events sent to the targets (deprecated, please use 'minio_notify_target_total_events' instead) | +| `minio_notify_current_send_in_progress` | Number of concurrent async Send calls active to all targets (deprecated, please use `minio_notify_target_current_send_in_progress` instead) | +| `minio_notify_events_errors_total` | Events that were failed to be sent to the targets (deprecated, please use `minio_notify_target_failed_events` instead) | +| `minio_notify_events_sent_total` | Total number of events sent to the targets (deprecated, please use `minio_notify_target_total_events` instead) | | `minio_notify_events_skipped_total` | Events that were skipped to be sent to the targets due to the in-memory queue being full | | `minio_notify_target_current_send_in_progress` | Number of concurrent async Send calls active to the target | | `minio_notify_target_queue_length` | Number of events currently staged in the queue_dir configured for the target. | @@ -186,19 +193,20 @@ For deployments with [bucket](https://min.io/docs/minio/linux/administration/buc ## Drive Metrics -| Name | Description | -|:---------------------------------------|:------------------------------------------------------------------------------------| -| `minio_node_drive_free_bytes` | Total storage available on a drive. | -| `minio_node_drive_free_inodes` | Total free inodes. | -| `minio_node_drive_latency_us` | Average last minute latency in µs for drive API storage operations. | -| `minio_node_drive_offline_total` | Total drives offline in this node. | -| `minio_node_drive_online_total` | Total drives online in this node. | -| `minio_node_drive_total` | Total drives in this node. | -| `minio_node_drive_total_bytes` | Total storage on a drive. | -| `minio_node_drive_used_bytes` | Total storage used on a drive. | -| `minio_node_drive_errors_timeout` | Total number of drive timeout errors since server start | -| `minio_node_drive_errors_availability` | Total number of drive I/O errors, permission denied and timeouts since server start | -| `minio_node_drive_io_waiting` | Total number I/O operations waiting on drive | +| Name | Description | +|:---------------------------------------|:--------------------------------------------------------------------| +| `minio_node_drive_free_bytes` | Total storage available on a drive. | +| `minio_node_drive_free_inodes` | Total free inodes. | +| `minio_node_drive_latency_us` | Average last minute latency in µs for drive API storage operations. | +| `minio_node_drive_offline_total` | Total drives offline in this node. | +| `minio_node_drive_online_total` | Total drives online in this node. | +| `minio_node_drive_total` | Total drives in this node. | +| `minio_node_drive_total_bytes` | Total storage on a drive. | +| `minio_node_drive_used_bytes` | Total storage used on a drive. | +| `minio_node_drive_errors_timeout` | Total number of drive timeout errors since server start | +| `minio_node_drive_errors_ioerror` | Total number of drive I/O errors since server start | +| `minio_node_drive_errors_availability` | Total number of drive I/O errors, timeouts since server start | +| `minio_node_drive_io_waiting` | Total number I/O operations waiting on drive | ## Identity and Access Management (IAM) Metrics @@ -245,7 +253,7 @@ For deployments with [bucket](https://min.io/docs/minio/linux/administration/buc | `minio_node_io_read_bytes` | Total bytes read by the process from the underlying storage system, /proc/[pid]/io read_bytes. | | `minio_node_io_wchar_bytes` | Total bytes written by the process to the underlying storage system including page cache, /proc/[pid]/io wchar. | | `minio_node_io_write_bytes` | Total bytes written by the process to the underlying storage system, /proc/[pid]/io write_bytes. | -| `minio_node_process_cpu_total_seconds` | Total user and system CPU time spent in seconds. | +| `minio_node_process_cpu_total_seconds` | Total user and system CPU time spent in seconds by the process. | | `minio_node_process_resident_memory_bytes` | Resident memory size in bytes. | | `minio_node_process_virtual_memory_bytes` | Virtual memory size in bytes. | | `minio_node_process_starttime_seconds` | Start time for MinIO process per node, time in seconds since Unix epoc. | @@ -288,8 +296,8 @@ For deployments behind a load balancer, use the load balancer hostname instead o ## Replication Metrics -These metrics only populate on deployments with [Bucket Replication](https://min.io/docs/minio/linux/administration/bucket-replication.html) or [Batch Replication](https://min.io/docs/minio/linux/administration/batch-framework.html) configurations. -For deployments with [Site Replication](https://min.io/docs/minio/linux/operations/install-deploy-manage/multi-site-replication.html) configured, select metrics populate under the [Cluster Metrics](#cluster-metrics) endpoint. +These metrics only populate on deployments with [Bucket Replication](https://docs.min.io/community/minio-object-store/administration/bucket-replication.html) or [Batch Replication](https://docs.min.io/community/minio-object-store/administration/batch-framework.html) configurations. +For deployments with [Site Replication](https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/multi-site-replication.html) configured, select metrics populate under the [Cluster Metrics](#cluster-metrics) endpoint. | Name | Description | |:----------------------------------------------------|:---------------------------------------------------------------------------------| diff --git a/docs/minio-limits.md b/docs/minio-limits.md index 47f27bd815d9f..e7295d195f907 100644 --- a/docs/minio-limits.md +++ b/docs/minio-limits.md @@ -36,20 +36,20 @@ For optimal production setup MinIO recommends Linux kernel version 4.x and later > NOTE: While MinIO does not implement an upper boundary on buckets, your cluster's hardware has natural limits that depend on the workload and its scaling patterns. We strongly recommend [MinIO SUBNET](https://min.io/pricing) for architecture and sizing guidance for your production use case. -## List of Amazon S3 API's not supported on MinIO +## List of Amazon S3 APIs not supported on MinIO We found the following APIs to be redundant or less useful outside of AWS S3. If you have a different view on any of the APIs we missed, please consider opening a [GitHub issue](https://github.com/minio/minio/issues) with relevant details on why MinIO must implement them. -### List of Amazon S3 Bucket API's not supported on MinIO +### List of Amazon S3 Bucket APIs not supported on MinIO -- BucketACL (Use [bucket policies](https://min.io/docs/minio/linux/administration/identity-access-management/policy-based-access-control.html) instead) +- BucketACL (Use [bucket policies](https://docs.min.io/community/minio-object-store/administration/identity-access-management/policy-based-access-control.html) instead) - BucketCORS (CORS enabled by default on all buckets for all HTTP verbs, you can optionally restrict the CORS domains) - BucketWebsite (Use [`caddy`](https://github.com/caddyserver/caddy) or [`nginx`](https://www.nginx.com/resources/wiki/)) -- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](https://min.io/docs/minio/linux/administration/monitoring/bucket-notifications.html) APIs) +- BucketAnalytics, BucketMetrics, BucketLogging (Use [bucket notification](https://docs.min.io/community/minio-object-store/administration/monitoring/bucket-notifications.html) APIs) -### List of Amazon S3 Object API's not supported on MinIO +### List of Amazon S3 Object APIs not supported on MinIO -- ObjectACL (Use [bucket policies](https://min.io/docs/minio/linux/administration/identity-access-management/policy-based-access-control.html) instead) +- ObjectACL (Use [bucket policies](https://docs.min.io/community/minio-object-store/administration/identity-access-management/policy-based-access-control.html) instead) ## Object name restrictions on MinIO diff --git a/docs/multi-tenancy/README.md b/docs/multi-tenancy/README.md index 9af1e15d8bb0b..055535e7ab050 100644 --- a/docs/multi-tenancy/README.md +++ b/docs/multi-tenancy/README.md @@ -64,4 +64,4 @@ minio server --address :9003 http://192.168.10.1{1...4}/data/tenant3 ## Cloud Scale Deployment -A container orchestration platform (e.g. Kubernetes) is recommended for large-scale, multi-tenant MinIO deployments. See the [MinIO Deployment Quickstart Guide](https://min.io/docs/minio/container/index.html#quickstart-for-linux) to get started with MinIO on orchestration platforms. +A container orchestration platform (e.g. Kubernetes) is recommended for large-scale, multi-tenant MinIO deployments. See the [MinIO Deployment Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) to get started with MinIO on orchestration platforms. diff --git a/docs/multi-user/README.md b/docs/multi-user/README.md index b36e1386d239a..3dbd6ce1438d2 100644 --- a/docs/multi-user/README.md +++ b/docs/multi-user/README.md @@ -8,13 +8,13 @@ In this document we will explain in detail on how to configure multiple users. ### 1. Prerequisites -- Install mc - [MinIO Client Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) -- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) +- Install mc - [MinIO Client Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) +- Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) - Configure etcd - [Etcd V3 Quickstart Guide](https://github.com/minio/minio/blob/master/docs/sts/etcd.md) ### 2. Create a new user with canned policy -Use [`mc admin policy`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-policy.html) to create canned policies. Server provides a default set of canned policies namely `writeonly`, `readonly` and `readwrite` *(these policies apply to all resources on the server)*. These can be overridden by custom policies using `mc admin policy` command. +Use [`mc admin policy`](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin/mc-admin-policy.html) to create canned policies. Server provides a default set of canned policies namely `writeonly`, `readonly` and `readwrite` *(these policies apply to all resources on the server)*. These can be overridden by custom policies using `mc admin policy` command. Create new canned policy file `getonly.json`. This policy enables users to download all objects under `my-bucketname`. @@ -272,7 +272,7 @@ Following example shows LDAP users full programmatic access to a LDAP user-speci ## Explore Further -- [MinIO Client Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [MinIO STS Quickstart Guide](https://min.io/docs/minio/linux/developers/security-token-service.html) -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Client Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [MinIO STS Quickstart Guide](https://docs.min.io/community/minio-object-store/developers/security-token-service.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/multi-user/admin/README.md b/docs/multi-user/admin/README.md index ae9084e4a1490..ff43cd7b1efde 100644 --- a/docs/multi-user/admin/README.md +++ b/docs/multi-user/admin/README.md @@ -8,12 +8,12 @@ In this document we will explain in detail on how to configure admin users. ### 1. Prerequisites -- Install mc - [MinIO Client Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) -- Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) +- Install mc - [MinIO Client Quickstart Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart) +- Install MinIO - [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html) ### 2. Create a new admin user with CreateUser, DeleteUser and ConfigUpdate permissions -Use [`mc admin policy`](https://min.io/docs/minio/linux/reference/minio-mc-admin/mc-admin-policy.html#command-mc.admin.policy) to create custom admin policies. +Use [`mc admin policy`](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin/mc-admin-policy.html#command-mc.admin.policy) to create custom admin policies. Create new canned policy file `adminManageUser.json`. This policy enables admin user to manage other users. @@ -162,11 +162,11 @@ mc admin policy attach myminio-admin1 user1policy --user=user1 ### 5. Using an external IDP for admin users Admin users can also be externally managed by an IDP by configuring admin policy with -special permissions listed above. Follow [MinIO STS Quickstart Guide](https://min.io/docs/minio/linux/developers/security-token-service.html) to manage users with an IDP. +special permissions listed above. Follow [MinIO STS Quickstart Guide](https://docs.min.io/community/minio-object-store/developers/security-token-service.html) to manage users with an IDP. ## Explore Further -- [MinIO Client Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [MinIO STS Quickstart Guide](https://min.io/docs/minio/linux/developers/security-token-service.html) -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Client Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [MinIO STS Quickstart Guide](https://docs.min.io/community/minio-object-store/developers/security-token-service.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/orchestration/README.md b/docs/orchestration/README.md index 6b6d5cf3cbff9..327f83b9d98a4 100644 --- a/docs/orchestration/README.md +++ b/docs/orchestration/README.md @@ -4,7 +4,7 @@ MinIO is a cloud-native application designed to scale in a sustainable manner in | Orchestration platforms | |:---------------------------------------------------------------------------------------------------| -| [`Kubernetes`](https://min.io/docs/minio/kubernetes/upstream/index.html#quickstart-for-kubernetes) | +| [`Kubernetes`](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html) | ## Why is MinIO cloud-native? diff --git a/docs/orchestration/docker-compose/README.md b/docs/orchestration/docker-compose/README.md index 220db2cc51630..2e1965141bdba 100644 --- a/docs/orchestration/docker-compose/README.md +++ b/docs/orchestration/docker-compose/README.md @@ -11,7 +11,7 @@ With Compose, you use a Compose file to configure MinIO services. Then, using a ## 2. Run Distributed MinIO on Docker Compose -To deploy Distributed MinIO on Docker Compose, please download [docker-compose.yaml](https://github.com/minio/minio/blob/master/docs/orchestration/docker-compose/docker-compose.yaml?raw=true) and [nginx.conf](https://github.com/minio/minio/blob/master/docs/orchestration/docker-compose/nginx.conf?raw=true) to your current working directory. Note that Docker Compose pulls the MinIO Docker image, so there is no need to explicitly download MinIO binary. Then run one of the below commands +To deploy Distributed MinIO on Docker Compose, please download [docker-compose.yaml](https://github.com/minio/minio/blob/master/docs/orchestration/docker-compose/docker-compose.yaml?raw=true) and [nginx.conf](https://github.com/minio/minio/blob/master/docs/orchestration/docker-compose/nginx.conf?raw=true) to your current working directory. Note that Docker Compose pulls the MinIO Docker image, so there is no need to build MinIO from source when using Docker. For non-Docker deployments, MinIO community edition is now source-only and can be installed via `go install github.com/minio/minio@latest`. Then run one of the below commands ### GNU/Linux and macOS @@ -50,10 +50,10 @@ Distributed instances are now accessible on the host using the Minio CLI on port * Update the command section in each service. * Add a new MinIO server instance to the upstream directive in the Nginx configuration file. - Read more about distributed MinIO [here](https://min.io/docs/minio/container/operations/install-deploy-manage/deploy-minio-single-node-multi-drive.html). + Read more about distributed MinIO [here](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html). ### Explore Further * [Overview of Docker Compose](https://docs.docker.com/compose/overview/) -* [MinIO Docker Quickstart Guide](https://min.io/docs/minio/container/index.html#quickstart-for-containers) -* [MinIO Erasure Code QuickStart Guide](https://min.io/docs/minio/container/operations/concepts/erasure-coding.html) +* [MinIO Docker Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-as-a-container.html) +* [MinIO Erasure Code QuickStart Guide](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) diff --git a/docs/orchestration/docker-compose/docker-compose.yaml b/docs/orchestration/docker-compose/docker-compose.yaml index 97f6a904c648a..cfd7924a03656 100644 --- a/docs/orchestration/docker-compose/docker-compose.yaml +++ b/docs/orchestration/docker-compose/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3.7' # Settings and configurations that are common for all containers x-minio-common: &minio-common - image: quay.io/minio/minio:RELEASE.2024-03-07T00-43-48Z + image: quay.io/minio/minio:RELEASE.2025-09-06T17-38-46Z command: server --console-address ":9001" http://minio{1...4}/data{1...2} expose: - "9000" diff --git a/docs/orchestration/kubernetes/README.md b/docs/orchestration/kubernetes/README.md index 7a46418e63e43..3ea3fb27afdca 100644 --- a/docs/orchestration/kubernetes/README.md +++ b/docs/orchestration/kubernetes/README.md @@ -16,6 +16,6 @@ MinIO server exposes un-authenticated liveness endpoints so Kubernetes can nativ ## Explore Further -- [MinIO Erasure Code QuickStart Guide](https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html) +- [MinIO Erasure Code QuickStart Guide](https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html) - [Kubernetes Documentation](https://kubernetes.io/docs/home/) - [Helm package manager for kubernetes](https://helm.sh/) diff --git a/docs/resiliency/docker-compose.yaml b/docs/resiliency/docker-compose.yaml new file mode 100644 index 0000000000000..842d766a144d2 --- /dev/null +++ b/docs/resiliency/docker-compose.yaml @@ -0,0 +1,125 @@ +# Settings and configurations that are common for all containers +x-minio-common: &minio-common + build: + context: ../../. + dockerfile: Dockerfile + command: server --console-address ":9001" http://minio{1...4}/data{1...8} + expose: + - "9000" + - "9001" + environment: + MINIO_CI_CD: 1 + healthcheck: + test: ["CMD", "mc", "ready", "local"] + interval: 5s + timeout: 5s + retries: 5 + +# starts 4 docker containers running minio server instances. +# using nginx reverse proxy, load balancing, you can access +# it through port 9000. +services: + minio1: + <<: *minio-common + hostname: minio1 + volumes: + - data1-1:/data1 + - data1-2:/data2 + - data1-3:/data3 + - data1-4:/data4 + - data1-5:/data5 + - data1-6:/data6 + - data1-7:/data7 + - data1-8:/data8 + + minio2: + <<: *minio-common + hostname: minio2 + volumes: + - data2-1:/data1 + - data2-2:/data2 + - data2-3:/data3 + - data2-4:/data4 + - data2-5:/data5 + - data2-6:/data6 + - data2-7:/data7 + - data2-8:/data8 + + minio3: + <<: *minio-common + hostname: minio3 + volumes: + - data3-1:/data1 + - data3-2:/data2 + - data3-3:/data3 + - data3-4:/data4 + - data3-5:/data5 + - data3-6:/data6 + - data3-7:/data7 + - data3-8:/data8 + + minio4: + <<: *minio-common + hostname: minio4 + volumes: + - data4-1:/data1 + - data4-2:/data2 + - data4-3:/data3 + - data4-4:/data4 + - data4-5:/data5 + - data4-6:/data6 + - data4-7:/data7 + - data4-8:/data8 + + nginx: + image: nginx:1.19.2-alpine + hostname: nginx + volumes: + - ./nginx.conf:/etc/nginx/nginx.conf:ro + ports: + - "9000:9000" + - "9001:9001" + depends_on: + - minio1 + - minio2 + - minio3 + - minio4 + +## By default this config uses default local driver, +## For custom volumes replace with volume driver configuration. +volumes: + data1-1: + data1-2: + data1-3: + data1-4: + data1-5: + data1-6: + data1-7: + data1-8: + + data2-1: + data2-2: + data2-3: + data2-4: + data2-5: + data2-6: + data2-7: + data2-8: + + data3-1: + data3-2: + data3-3: + data3-4: + data3-5: + data3-6: + data3-7: + data3-8: + + data4-1: + data4-2: + data4-3: + data4-4: + data4-5: + data4-6: + data4-7: + data4-8: diff --git a/docs/resiliency/nginx.conf b/docs/resiliency/nginx.conf new file mode 100644 index 0000000000000..cca82f6fe6f6d --- /dev/null +++ b/docs/resiliency/nginx.conf @@ -0,0 +1,106 @@ +user nginx; +worker_processes auto; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 4096; +} + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + sendfile on; + keepalive_timeout 65; + + # include /etc/nginx/conf.d/*.conf; + + upstream minio { + server minio1:9000; + server minio2:9000; + server minio3:9000; + server minio4:9000; + } + + upstream console { + ip_hash; + server minio1:9001; + server minio2:9001; + server minio3:9001; + server minio4:9001; + } + + server { + listen 9000; + listen [::]:9000; + server_name localhost; + + # To allow special characters in headers + ignore_invalid_headers off; + # Allow any size file to be uploaded. + # Set to a value such as 1000m; to restrict file size to a specific value + client_max_body_size 0; + # To disable buffering + proxy_buffering off; + proxy_request_buffering off; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + proxy_connect_timeout 300; + # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://minio; + } + } + + server { + listen 9001; + listen [::]:9001; + server_name localhost; + + # To allow special characters in headers + ignore_invalid_headers off; + # Allow any size file to be uploaded. + # Set to a value such as 1000m; to restrict file size to a specific value + client_max_body_size 0; + # To disable buffering + proxy_buffering off; + proxy_request_buffering off; + + location / { + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-NginX-Proxy true; + + # This is necessary to pass the correct IP to be hashed + real_ip_header X-Real-IP; + + proxy_connect_timeout 300; + + # To support websocket + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + + chunked_transfer_encoding off; + + proxy_pass http://console; + } + } +} diff --git a/docs/resiliency/resiliency-initial-script.sh b/docs/resiliency/resiliency-initial-script.sh new file mode 100755 index 0000000000000..fcdca6fffa1fa --- /dev/null +++ b/docs/resiliency/resiliency-initial-script.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# This script will run inside ubuntu-pod that is located at default namespace in the cluster +# This script will not and should not be executed in the self hosted runner + +echo "script failed" >resiliency-initial.log # assume initial state + +echo "sleep to wait for MinIO Server to be ready prior mc commands" +# https://github.com/minio/mc/issues/3599 + +MINIO_SERVER_URL="http://127.0.0.1:9000" +ALIAS_NAME=myminio +BUCKET="test-bucket" +SRC_DIR="/tmp/data" +INLINED_DIR="/tmp/inlined" +DEST_DIR="/tmp/dest" + +TIMEOUT=10 +while true; do + if [[ ${TIMEOUT} -le 0 ]]; then + echo retry: timeout while running: mc alias set + exit 1 + fi + eval ./mc alias set "${ALIAS_NAME}" "${MINIO_SERVER_URL}" minioadmin minioadmin && break + TIMEOUT=$((TIMEOUT - 1)) + sleep 1 +done + +./mc ready "${ALIAS_NAME}" + +./mc mb "${ALIAS_NAME}"/"${BUCKET}" +rm -rf "${SRC_DIR}" "${INLINED_DIR}" "${DEST_DIR}" && mkdir -p "${SRC_DIR}" "${INLINED_DIR}" "${DEST_DIR}" +for idx in {1..10}; do + # generate random nr of blocks + COUNT=$((RANDOM % 100 + 100)) + # generate random content + dd if=/dev/urandom bs=50K count="${COUNT}" of="${SRC_DIR}"/file"$idx" +done + +# create small object that will be inlined into xl.meta +dd if=/dev/urandom bs=50K count=1 of="${INLINED_DIR}"/inlined + +if ./mc cp --quiet --recursive "${SRC_DIR}/" "${ALIAS_NAME}"/"${BUCKET}"/initial-data/; then + if ./mc cp --quiet --recursive "${INLINED_DIR}/" "${ALIAS_NAME}"/"${BUCKET}"/inlined-data/; then + echo "script passed" >resiliency-initial.log + fi +fi diff --git a/docs/resiliency/resiliency-tests.sh b/docs/resiliency/resiliency-tests.sh new file mode 100755 index 0000000000000..12b093d35de8f --- /dev/null +++ b/docs/resiliency/resiliency-tests.sh @@ -0,0 +1,433 @@ +#!/usr/bin/env bash + +TESTS_RUN_STATUS=1 + +function cleanup() { + echo "Cleaning up MinIO deployment" + docker compose -f "${DOCKER_COMPOSE_FILE}" down --volumes + for container in $(docker ps -q); do + echo Removing docker $container + docker rm -f $container >/dev/null 2>&1 + docker wait $container + done +} + +function cleanup_and_prune() { + cleanup + docker system prune --volumes --force + docker image prune --all --force +} + +function verify_resiliency() { + docs/resiliency/resiliency-verify-script.sh + RESULT=$(grep "script passed" /dev/null 2>&1 + STATUS=$? + if [ $STATUS -eq 0 ]; then + DATA_DRIVE=1 + fi + + if [ $DATA_DRIVE -eq -1 ]; then + # Check for existence of file in erasure set 2 + docker exec resiliency-minio1-1 /bin/sh -c "stat /data5/test-bucket/$DIR/$FILE/xl.meta" >/dev/null 2>&1 + STATUS=$? + if [ $STATUS -eq 0 ]; then + DATA_DRIVE=5 + fi + fi + echo $DATA_DRIVE +} + +function test_resiliency_healing_missing_xl_metas() { + echo + echo -e "${GREEN}Running test_resiliency_healing_missing_xl_metas ...${NC}" + + DIR="initial-data" + FILE="file1" + DATA_DRIVE=$(find_erasure_set_for_file $FILE $DIR) + STATUS=$? + if [ $STATUS -ne 0 ]; then + echo -e "${RED}Could not find erasure set for file: ${FILE}${NC}" + echo -e "${RED}"${FUNCNAME[0]}" Failed${NC}" + TESTS_RUN_STATUS=$((TESTS_RUN_STATUS & 0)) + return 1 + fi + + # Remove single xl.meta -- status still green + OUTPUT=$(docker exec resiliency-minio1-1 /bin/sh -c "rm /data$((DATA_DRIVE))/test-bucket/initial-data/$FILE/xl.meta") + WANT='{ "before": { "color": "green", "missing": 1, "corrupted": 0 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Remove two xl.meta's -- status becomes yellow + OUTPUT=$(docker exec resiliency-minio1-1 /bin/sh -c "rm /data$((DATA_DRIVE))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio2-1 /bin/sh -c "rm /data$((DATA_DRIVE + 1))/test-bucket/initial-data/$FILE/xl.meta") + WANT='{ "before": { "color": "yellow", "missing": 2, "corrupted": 0 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Remove three xl.meta's -- status becomes red (3 missing) + OUTPUT=$(docker exec resiliency-minio1-1 /bin/sh -c "rm /data$((DATA_DRIVE))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio2-1 /bin/sh -c "rm /data$((DATA_DRIVE + 1))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio3-1 /bin/sh -c "rm /data$((DATA_DRIVE + 2))/test-bucket/initial-data/$FILE/xl.meta") + WANT='{ "before": { "color": "red", "missing": 3, "corrupted": 0 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Remove four xl.meta's -- status becomes red (4 missing) + OUTPUT=$(docker exec resiliency-minio1-1 /bin/sh -c "rm /data$((DATA_DRIVE))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio2-1 /bin/sh -c "rm /data$((DATA_DRIVE + 1))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio3-1 /bin/sh -c "rm /data$((DATA_DRIVE + 2))/test-bucket/initial-data/$FILE/xl.meta") + OUTPUT=$(docker exec resiliency-minio4-1 /bin/sh -c "rm /data$((DATA_DRIVE + 3))/test-bucket/initial-data/$FILE/xl.meta") + WANT='{ "before": { "color": "red", "missing": 4, "corrupted": 0 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" +} + +function test_resiliency_healing_truncated_parts() { + echo + echo -e "${GREEN}Running test_resiliency_healing_truncated_parts ...${NC}" + + DIR="initial-data" + FILE="file2" + DATA_DRIVE=$(find_erasure_set_for_file $FILE $DIR) + STATUS=$? + if [ $STATUS -ne 0 ]; then + echo -e "${RED}Could not find erasure set for file: ${FILE}${NC}" + echo -e "${RED}"${FUNCNAME[0]}" Failed${NC}" + TESTS_RUN_STATUS=$((TESTS_RUN_STATUS & 0)) + return 1 + fi + + # Truncate single part -- status still green + OUTPUT=$(docker exec resiliency-minio1-1 /bin/sh -c "truncate --size=10K /data$((DATA_DRIVE))/test-bucket/initial-data/$FILE/*/part.1") + WANT='{ "before": { "color": "green", "missing": 0, "corrupted": 1 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Truncate two parts -- status becomes yellow (2 missing) + OUTPUT=$(docker exec resiliency-minio2-1 /bin/sh -c "truncate --size=10K /data{$((DATA_DRIVE))..$((DATA_DRIVE + 1))}/test-bucket/initial-data/$FILE/*/part.1") + WANT='{ "before": { "color": "yellow", "missing": 0, "corrupted": 2 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Truncate three parts -- status becomes red (3 missing) + OUTPUT=$(docker exec resiliency-minio3-1 /bin/sh -c "truncate --size=10K /data{$((DATA_DRIVE))..$((DATA_DRIVE + 2))}/test-bucket/initial-data/$FILE/*/part.1") + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 3 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Truncate four parts -- status becomes red (4 missing) + OUTPUT=$(docker exec resiliency-minio4-1 /bin/sh -c "truncate --size=10K /data{$((DATA_DRIVE))..$((DATA_DRIVE + 3))}/test-bucket/initial-data/$FILE/*/part.1") + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 4 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" +} + +function induce_bitrot() { + local NODE=$1 + local DIR=$2 + local FILE=$3 + # Figure out the UUID of the directory where the `part.*` files are stored + UUID=$(docker exec resiliency-minio$NODE-1 /bin/sh -c "ls -l $DIR/test-bucket/initial-data/$FILE/*/part.1") + UUID=$(echo $UUID | cut -d " " -f 9 | cut -d "/" -f 6) + + # Determine head and tail size of file where we will introduce bitrot + FILE_SIZE=$(docker exec resiliency-minio$NODE-1 /bin/sh -c "stat --printf="%s" $DIR/test-bucket/initial-data/$FILE/$UUID/part.1") + TAIL_SIZE=$((FILE_SIZE - 32 * 2)) + + # Extract head and tail of file + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat $DIR/test-bucket/initial-data/$FILE/$UUID/part.1 | head --bytes 32 > /tmp/head") + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat $DIR/test-bucket/initial-data/$FILE/$UUID/part.1 | tail --bytes $TAIL_SIZE > /tmp/tail") + + # Corrupt the part by writing head twice followed by tail + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat /tmp/head /tmp/head /tmp/tail > $DIR/test-bucket/initial-data/$FILE/$UUID/part.1") +} + +function test_resiliency_healing_induced_bitrot() { + echo + echo -e "${GREEN}Running test_resiliency_healing_induced_bitrot ...${NC}" + + DIR="initial-data" + FILE="file3" + DATA_DRIVE=$(find_erasure_set_for_file $FILE $DIR) + STATUS=$? + if [ $STATUS -ne 0 ]; then + echo -e "${RED}Could not find erasure set for file: ${FILE}${NC}" + echo -e "${RED}"${FUNCNAME[0]}" Failed${NC}" + TESTS_RUN_STATUS=$((TESTS_RUN_STATUS & 0)) + return 1 + fi + + # Induce bitrot in single part -- status still green + induce_bitrot "2" "/data"$((DATA_DRIVE + 1)) $FILE + WANT='{ "before": { "color": "green", "missing": 0, "corrupted": 1 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'", "deep": true} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in two parts -- status becomes yellow (2 corrupted) + induce_bitrot "2" "/data"$((DATA_DRIVE)) $FILE + induce_bitrot "1" "/data"$((DATA_DRIVE + 1)) $FILE + WANT='{ "before": { "color": "yellow", "missing": 0, "corrupted": 2 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'", "deep": true} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in three parts -- status becomes red (3 corrupted) + induce_bitrot "3" "/data"$((DATA_DRIVE)) $FILE + induce_bitrot "2" "/data"$((DATA_DRIVE + 1)) $FILE + induce_bitrot "1" "/data"$((DATA_DRIVE + 2)) $FILE + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 3 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'", "deep": true} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in four parts -- status becomes red (4 corrupted) + induce_bitrot "4" "/data"$((DATA_DRIVE)) $FILE + induce_bitrot "3" "/data"$((DATA_DRIVE + 1)) $FILE + induce_bitrot "2" "/data"$((DATA_DRIVE + 2)) $FILE + induce_bitrot "1" "/data"$((DATA_DRIVE + 3)) $FILE + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 4 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'", "deep": true} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" +} + +function induce_bitrot_for_xlmeta() { + local NODE=$1 + local DIR=$2 + local FILE=$3 + + # Determine head and tail size of file where we will introduce bitrot + FILE_SIZE=$(docker exec resiliency-minio$NODE-1 /bin/sh -c "stat --printf="%s" $DIR/test-bucket/inlined-data/$FILE/xl.meta") + HEAD_SIZE=$((FILE_SIZE - 32 * 2)) + + # Extract head and tail of file + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat $DIR/test-bucket/inlined-data/$FILE/xl.meta | head --bytes $HEAD_SIZE > /head") + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat $DIR/test-bucket/inlined-data/$FILE/xl.meta | tail --bytes 32 > /tail") + + # Corrupt xl.meta by writing head followed by tail twice + $(docker exec resiliency-minio$NODE-1 /bin/sh -c "cat /head /tail tmp/tail > $DIR/test-bucket/inlined-data/$FILE/xl.meta") +} + +function test_resiliency_healing_inlined_metadata() { + echo + echo -e "${GREEN}Running test_resiliency_healing_inlined_metadata ...${NC}" + + DIR="inlined-data" + FILE="inlined" + DATA_DRIVE=$(find_erasure_set_for_file $FILE $DIR) + STATUS=$? + if [ $STATUS -ne 0 ]; then + echo -e "${RED}Could not find erasure set for file: ${FILE}${NC}" + echo -e "${RED}"${FUNCNAME[0]}" Failed${NC}" + TESTS_RUN_STATUS=$((TESTS_RUN_STATUS & 0)) + return 1 + fi + + # Induce bitrot in single inlined xl.meta -- status still green + induce_bitrot_for_xlmeta "2" "/data"$((DATA_DRIVE + 1)) $FILE + WANT='{ "before": { "color": "green", "missing": 0, "corrupted": 1 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in two inlined xl.meta's -- status becomes yellow (2 corrupted) + induce_bitrot_for_xlmeta "3" "/data"$((DATA_DRIVE + 1)) $FILE + induce_bitrot_for_xlmeta "3" "/data"$((DATA_DRIVE + 2)) $FILE + WANT='{ "before": { "color": "yellow", "missing": 0, "corrupted": 2 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in three inlined xl.meta's -- status becomes red (3 corrupted) + induce_bitrot_for_xlmeta "4" "/data"$((DATA_DRIVE + 1)) $FILE + induce_bitrot_for_xlmeta "4" "/data"$((DATA_DRIVE + 2)) $FILE + induce_bitrot_for_xlmeta "4" "/data"$((DATA_DRIVE + 3)) $FILE + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 3 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" + + # Induce bitrot in four inlined xl.meta's -- status becomes red (4 corrupted) + induce_bitrot_for_xlmeta "1" "/data"$((DATA_DRIVE)) $FILE + induce_bitrot_for_xlmeta "1" "/data"$((DATA_DRIVE + 1)) $FILE + induce_bitrot_for_xlmeta "1" "/data"$((DATA_DRIVE + 2)) $FILE + induce_bitrot_for_xlmeta "1" "/data"$((DATA_DRIVE + 3)) $FILE + WANT='{ "before": { "color": "red", "missing": 0, "corrupted": 4 }, "after": { "color": "green", "missing": 0, "corrupted": 0 }, "args": {"file": "'${FILE}'", "dir": "'${DIR}'"} }' + verify_resiliency_healing "${FUNCNAME[0]}" "${WANT}" +} + +function main() { + if [ ! -f ./mc ]; then + wget -q https://dl.minio.io/client/mc/release/linux-amd64/mc && chmod +x ./mc + fi + + export MC_HOST_myminio=http://minioadmin:minioadmin@localhost:9000 + + cleanup_and_prune + + # Run resiliency tests against MinIO + docker compose -f "${DOCKER_COMPOSE_FILE}" up -d + + # Initial setup + docs/resiliency/resiliency-initial-script.sh + RESULT=$(grep "script passed" resiliency-verify-failure.log # assume initial state + +ALIAS_NAME=myminio +BUCKET="test-bucket" +DEST_DIR="/tmp/dest" + +OUT=$(./mc cp --quiet --recursive "${ALIAS_NAME}"/"${BUCKET}"/initial-data/ "${DEST_DIR}"/) +RET=${?} +if [ ${RET} -ne 0 ]; then + # It is a success scenario as get objects should fail + echo "GET objects failed as expected" + echo "script passed" >resiliency-verify-failure.log + exit 0 +else + echo "GET objects expected to fail, but succeeded: ${OUT}" +fi diff --git a/docs/resiliency/resiliency-verify-healing-script.sh b/docs/resiliency/resiliency-verify-healing-script.sh new file mode 100755 index 0000000000000..8127d03d1c79e --- /dev/null +++ b/docs/resiliency/resiliency-verify-healing-script.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +echo "script failed" >resiliency-verify-healing.log # assume initial state + +# Extract arguments from json object ... +FILE=$(echo $1 | jq -r '.args.file') +DIR=$(echo $1 | jq -r '.args.dir') +DEEP=$(echo $1 | jq -r '.args.deep') +WANT=$(echo $1 | jq 'del(.args)') # ... and remove args from wanted result + +ALIAS_NAME=myminio +BUCKET="test-bucket" +JQUERY='select(.name=="'"${BUCKET}"'/'"${DIR}"'/'"${FILE}"'") | {"before":{"color": .before.color, "missing": .before.missing, "corrupted": .before.corrupted},"after":{"color": .after.color, "missing": .after.missing, "corrupted": .after.corrupted}}' +if [ "$DEEP" = "true" ]; then + SCAN_DEEP="--scan=deep" +fi + +GOT=$(./mc admin heal --json ${SCAN_DEEP} ${ALIAS_NAME}/${BUCKET}/${DIR}/${FILE}) +GOT=$(echo $GOT | jq "${JQUERY}") + +if [ "$(echo "$GOT" | jq -S .)" = "$(echo "$WANT" | jq -S .)" ]; then + echo "script passed" >resiliency-verify-healing.log +else + echo "Error during healing:" + echo "----GOT: "$GOT + echo "---WANT: "$WANT +fi diff --git a/docs/resiliency/resiliency-verify-script.sh b/docs/resiliency/resiliency-verify-script.sh new file mode 100755 index 0000000000000..50220b0a203af --- /dev/null +++ b/docs/resiliency/resiliency-verify-script.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +echo "script failed" >resiliency-verify.log # assume initial state + +ALIAS_NAME=myminio +BUCKET="test-bucket" +SRC_DIR="/tmp/data" +DEST_DIR="/tmp/dest" + +./mc admin config set "$ALIAS_NAME" api requests_max=400 + +OBJ_COUNT_AFTER_STOP=$(./mc ls "${ALIAS_NAME}"/"${BUCKET}"/initial-data/ | wc -l) +# Count should match the initial count of 10 +if [ "${OBJ_COUNT_AFTER_STOP}" -ne 10 ]; then + echo "Expected 10 objects; received ${OBJ_COUNT_AFTER_STOP}" + exit 1 +fi + +./mc ready "${ALIAS_NAME}" --json + +OUT=$(./mc cp --quiet "${SRC_DIR}"/* "${ALIAS_NAME}"/"${BUCKET}"/new-data/) +RET=${?} +if [ ${RET} -ne 0 ]; then + echo "Error copying objects to new prefix: ${OUT}" + exit 1 +fi + +OBJ_COUNT_AFTER_COPY=$(./mc ls "${ALIAS_NAME}"/"${BUCKET}"/new-data/ | wc -l) +if [ "${OBJ_COUNT_AFTER_COPY}" -ne "${OBJ_COUNT_AFTER_STOP}" ]; then + echo "Expected ${OBJ_COUNT_AFTER_STOP} objects; received ${OBJ_COUNT_AFTER_COPY}" + exit 1 +fi + +OUT=$(./mc cp --quiet --recursive "${ALIAS_NAME}"/"${BUCKET}"/new-data/ "${DEST_DIR}"/) +RET=${?} +if [ ${RET} -ne 0 ]; then + echo "Get objects failed: ${OUT}" + exit 1 +fi + +# Check if check sums match for source and destination directories +CHECK_SUM_SRC=$(sha384sum <(sha384sum "${SRC_DIR}"/* | cut -d " " -f 1 | sort) | cut -d " " -f 1) +CHECK_SUM_DEST=$(sha384sum <(sha384sum "${DEST_DIR}"/* | cut -d " " -f 1 | sort) | cut -d " " -f 1) +if [ "${CHECK_SUM_SRC}" != "${CHECK_SUM_DEST}" ]; then + echo "Checksum verification of source files and destination files failed" + exit 1 +fi + +echo "script passed" >resiliency-verify.log diff --git a/docs/security/README.md b/docs/security/README.md index 69e7d3f9359a8..9d56e4b81bb28 100644 --- a/docs/security/README.md +++ b/docs/security/README.md @@ -11,7 +11,7 @@ MinIO supports two different types of server-side encryption ([SSE](#sse)): #### Secret Keys -The MinIO server uses an unique, randomly generated secret key per object also known as, Object Encryption Key ([OEK](#oek)). Neither the client-provided SSE-C key nor the KMS-managed key is directly used to en/decrypt an object. Instead, the OEK is stored as part of the object metadata next to the object in an encrypted form. To en/decrypt the OEK another secret key is needed also known as, Key Encryption Key ([KEK](#kek)). +The MinIO server uses a unique, randomly generated secret key per object also known as, Object Encryption Key ([OEK](#oek)). Neither the client-provided SSE-C key nor the KMS-managed key is directly used to en/decrypt an object. Instead, the OEK is stored as part of the object metadata next to the object in an encrypted form. To en/decrypt the OEK another secret key is needed also known as, Key Encryption Key ([KEK](#kek)). The MinIO server runs a key-derivation algorithm to generate the KEK using a pseudo-random function ([PRF](#prf)): `KEK := PRF(EK, IV, context_values)` where: @@ -28,7 +28,7 @@ To summarize for any encrypted object there exists (at least) three different ke #### Content Encryption -The MinIO server uses an authenticated encryption scheme ([AEAD](#aead)) to en/decrypt and authenticate the object content. The AEAD is combined with some state to build a *Secure Channel*. A *Secure Channel* is a cryptographic construction that ensures confidentiality and integrity of the processed data. In particular the *Secure Channel* splits the plaintext content into fixed size chunks and en/decrypts each chunk separately using an unique key-nonce combination. +The MinIO server uses an authenticated encryption scheme ([AEAD](#aead)) to en/decrypt and authenticate the object content. The AEAD is combined with some state to build a *Secure Channel*. A *Secure Channel* is a cryptographic construction that ensures confidentiality and integrity of the processed data. In particular the *Secure Channel* splits the plaintext content into fixed size chunks and en/decrypts each chunk separately using a unique key-nonce combination. ##### Figure 1 - Secure Channel construction @@ -42,7 +42,7 @@ plaintext := chunk_0 || chunk_1 || chunk_2 ciphertext := sealed_chunk_0 || sealed_chunk_1 || sealed_chunk_2 || ... ``` -In case of a S3 multi-part operation each part is en/decrypted with the scheme shown in Figure 1. However, for each part an unique secret key is derived from the OEK and the part number using a PRF. So in case of multi-part not the OEK but the output of `PRF(OEK, part_id)` is used as secret key. +In case of a S3 multi-part operation each part is en/decrypted with the scheme shown in Figure 1. However, for each part a unique secret key is derived from the OEK and the part number using a PRF. So in case of multi-part not the OEK but the output of `PRF(OEK, part_id)` is used as secret key. #### Cryptographic Primitives diff --git a/docs/select/README.md b/docs/select/README.md index ee5333d469689..7aff355380009 100644 --- a/docs/select/README.md +++ b/docs/select/README.md @@ -12,7 +12,7 @@ You can use the Select API to query objects with following features: Type inference and automatic conversion of values is performed based on the context when the value is un-typed (such as when reading CSV data). If present, the CAST function overrides automatic conversion. -The [mc sql](https://min.io/docs/minio/linux/reference/minio-mc/mc-sql.html) command can be used for executing queries using the command line. +The [mc sql](https://docs.min.io/community/minio-object-store/reference/minio-mc/mc-sql.html) command can be used for executing queries using the command line. (*) Parquet is disabled on the MinIO server by default. See below how to enable it. @@ -27,7 +27,7 @@ To enable Parquet set the environment variable `MINIO_API_SELECT_PARQUET=on`. ### 1. Prerequisites -- Install MinIO Server from [here](https://min.io/docs/minio/linux/index.html#procedure). +- Install MinIO Server from [here](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html#procedure). - Familiarity with AWS S3 API. - Familiarity with Python and installing dependencies. @@ -113,11 +113,11 @@ For a more detailed SELECT SQL reference, please see [here](https://docs.aws.ama ## 5. Explore Further -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `mc sql` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc/mc-sql.html#command-mc.sql) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [Use `mc` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +- [Use `mc sql` with MinIO Server](https://docs.min.io/community/minio-object-store/reference/minio-mc/mc-sql.html#command-mc.sql) +- [Use `minio-go` SDK with MinIO Server](https://docs.min.io/community/minio-object-store/developers/go/minio-go.html) +- [Use `aws-cli` with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/aws-cli-with-minio.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) ## 6. Implementation Status diff --git a/docs/site-replication/README.md b/docs/site-replication/README.md index 15024af30b1ef..8edcc09deedf3 100644 --- a/docs/site-replication/README.md +++ b/docs/site-replication/README.md @@ -25,7 +25,7 @@ The following Bucket features will **not be replicated**, is designed to differ - **Removing a site** is not allowed from a set of replicated sites once configured. - All sites must be using the **same** external IDP(s) if any. -- For [SSE-S3 or SSE-KMS encryption via KMS](https://min.io/docs/minio/linux/operations/server-side-encryption.html "MinIO KMS Guide"), all sites **must** have access to a central KMS deployment. This can be achieved via a central KES server or multiple KES servers (say one per site) connected via a central KMS (Vault) server. +- For [SSE-S3 or SSE-KMS encryption via KMS](https://docs.min.io/community/minio-object-store/operations/server-side-encryption.html "MinIO KMS Guide"), all sites **must** have access to a central KMS deployment. This can be achieved via a central KES server or multiple KES servers (say one per site) connected via a central KMS (Vault) server. ## Configuring Site Replication diff --git a/docs/site-replication/run-multi-site-ldap.sh b/docs/site-replication/run-multi-site-ldap.sh index 5bb108288e13b..351ca92a39101 100755 --- a/docs/site-replication/run-multi-site-ldap.sh +++ b/docs/site-replication/run-multi-site-ldap.sh @@ -59,12 +59,14 @@ site2_pid=$! minio server --config-dir /tmp/minio-ldap --address ":9003" /tmp/minio-ldap-idp3/{1...4} >/tmp/minio3_1.log 2>&1 & site3_pid=$! -sleep 10 - export MC_HOST_minio1=http://minio:minio123@localhost:9001 export MC_HOST_minio2=http://minio:minio123@localhost:9002 export MC_HOST_minio3=http://minio:minio123@localhost:9003 +./mc ready minio1 +./mc ready minio2 +./mc ready minio3 + ./mc admin replicate add minio1 minio2 minio3 ./mc idp ldap policy attach minio1 consoleAdmin --user="uid=dillon,ou=people,ou=swengg,dc=min,dc=io" @@ -205,13 +207,13 @@ expected_checksum=$(cat ./lrgfile | md5sum) ./mc mb minio1/bucket2 sleep 5 -./mc stat minio2/newbucket +./mc stat --no-list minio2/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket +./mc stat --no-list minio3/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 @@ -220,20 +222,20 @@ fi ./mc cp README.md minio2/newbucket/ sleep 5 -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket/README.md +./mc stat --no-list minio3/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 fi sleep 10 -./mc stat minio3/newbucket/lrgfile +./mc stat --no-list minio3/newbucket/lrgfile if [ $? -ne 0 ]; then echo "expected object to be present, exiting.." exit_1 @@ -252,13 +254,13 @@ if [ $? -ne 0 ]; then fi sleep 5 -./mc stat minio1/newbucket/lrgfile +./mc stat --no-list minio1/newbucket/lrgfile if [ $? -eq 0 ]; then echo "expected object to be deleted permanently after replication, exiting.." exit_1 fi -vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID) +vID=$(./mc stat --no-list minio2/newbucket/README.md --json | jq .versionID) if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 @@ -277,7 +279,7 @@ if [ $? -ne 0 ]; then fi sleep 5 -replStatus_minio2=$(./mc stat minio2/newbucket/README.md --json | jq -r .replicationStatus) +replStatus_minio2=$(./mc stat --no-list minio2/newbucket/README.md --json | jq -r .replicationStatus) if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 @@ -291,13 +293,13 @@ fi ./mc rm minio3/newbucket/README.md sleep 5 -./mc stat minio2/newbucket/README.md +./mc stat --no-list minio2/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 fi -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 diff --git a/docs/site-replication/run-multi-site-minio-idp.sh b/docs/site-replication/run-multi-site-minio-idp.sh index 7ea34001c5a32..48df4f241142e 100755 --- a/docs/site-replication/run-multi-site-minio-idp.sh +++ b/docs/site-replication/run-multi-site-minio-idp.sh @@ -59,8 +59,6 @@ site3_pid1=$! minio server --config-dir /tmp/minio-internal --address ":9030" http://localhost:9003/tmp/minio-internal-idp3/{1...4} http://localhost:9030/tmp/minio-internal-idp3/{5...8} >/tmp/minio3_2.log 2>&1 & site3_pid2=$! -sleep 10 - export MC_HOST_minio1=http://minio:minio123@localhost:9001 export MC_HOST_minio2=http://minio:minio123@localhost:9002 export MC_HOST_minio3=http://minio:minio123@localhost:9003 @@ -69,6 +67,13 @@ export MC_HOST_minio10=http://minio:minio123@localhost:9010 export MC_HOST_minio20=http://minio:minio123@localhost:9020 export MC_HOST_minio30=http://minio:minio123@localhost:9030 +./mc ready minio1 +./mc ready minio2 +./mc ready minio3 +./mc ready minio10 +./mc ready minio20 +./mc ready minio30 + ./mc admin replicate add minio1 minio2 site_enabled=$(./mc admin replicate info minio1) @@ -218,19 +223,19 @@ expected_checksum=$(cat ./lrgfile | md5sum) ./mc cp ./lrgfile minio1/newbucket sleep 5 -./mc stat minio2/newbucket +./mc stat --no-list minio2/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket +./mc stat --no-list minio3/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 fi -err_minio2=$(./mc stat minio2/newbucket/xxx --json | jq -r .error.cause.message) +err_minio2=$(./mc stat --no-list minio2/newbucket/xxx --json | jq -r .error.cause.message) if [ $? -ne 0 ]; then echo "expecting object to be missing. exiting.." exit_1 @@ -244,20 +249,20 @@ fi ./mc cp README.md minio2/newbucket/ sleep 5 -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket/README.md +./mc stat --no-list minio3/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 fi sleep 10 -./mc stat minio3/newbucket/lrgfile +./mc stat --no-list minio3/newbucket/lrgfile if [ $? -ne 0 ]; then echo "expected object to be present, exiting.." exit_1 @@ -277,13 +282,13 @@ if [ $? -ne 0 ]; then fi sleep 5 -./mc stat minio1/newbucket/lrgfile +./mc stat --no-list minio1/newbucket/lrgfile if [ $? -eq 0 ]; then echo "expected object to be deleted permanently after replication, exiting.." exit_1 fi -vID=$(./mc stat minio2/newbucket/README.md --json | jq .versionID) +vID=$(./mc stat --no-list minio2/newbucket/README.md --json | jq .versionID) if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 @@ -306,7 +311,7 @@ if [ $? -ne 0 ]; then fi sleep 5 -replStatus_minio2=$(./mc stat minio2/newbucket/README.md --json | jq -r .replicationStatus) +replStatus_minio2=$(./mc stat --no-list minio2/newbucket/README.md --json | jq -r .replicationStatus) if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 @@ -320,13 +325,13 @@ fi ./mc rm minio3/newbucket/README.md sleep 5 -./mc stat minio2/newbucket/README.md +./mc stat --no-list minio2/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 fi -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 @@ -335,6 +340,8 @@ fi ./mc mb --with-lock minio3/newbucket-olock sleep 5 +set -x + enabled_minio2=$(./mc stat --json minio2/newbucket-olock | jq -r .ObjectLock.enabled) if [ $? -ne 0 ]; then echo "expected bucket to be mirrored with object-lock but not present, exiting..." @@ -357,6 +364,8 @@ if [ "${enabled_minio1}" != "Enabled" ]; then exit_1 fi +set +x + # "Test if most recent tag update is replicated" ./mc tag set minio2/newbucket "key=val1" if [ $? -ne 0 ]; then diff --git a/docs/site-replication/run-multi-site-oidc.sh b/docs/site-replication/run-multi-site-oidc.sh index 2add4695b6839..d71a86a4a93cb 100755 --- a/docs/site-replication/run-multi-site-oidc.sh +++ b/docs/site-replication/run-multi-site-oidc.sh @@ -57,12 +57,14 @@ if [ ! -f ./mc ]; then chmod +x mc fi -sleep 10 - export MC_HOST_minio1=http://minio:minio123@localhost:9001 export MC_HOST_minio2=http://minio:minio123@localhost:9002 export MC_HOST_minio3=http://minio:minio123@localhost:9003 +./mc ready minio1 +./mc ready minio2 +./mc ready minio3 + ./mc admin replicate add minio1 minio2 minio3 ./mc admin policy create minio1 projecta ./docs/site-replication/rw.json @@ -174,13 +176,13 @@ expected_checksum=$(cat ./lrgfile | md5sum) ./mc cp ./lrgfile minio1/newbucket sleep 5 -./mc stat minio2/newbucket +./mc stat --no-list minio2/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket +./mc stat --no-list minio3/newbucket if [ $? -ne 0 ]; then echo "expecting bucket to be present. exiting.." exit_1 @@ -189,13 +191,13 @@ fi ./mc cp README.md minio2/newbucket/ sleep 5 -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 fi -./mc stat minio3/newbucket/README.md +./mc stat --no-list minio3/newbucket/README.md if [ $? -ne 0 ]; then echo "expecting object to be present. exiting.." exit_1 @@ -204,20 +206,20 @@ fi ./mc rm minio3/newbucket/README.md sleep 5 -./mc stat minio2/newbucket/README.md +./mc stat --no-list minio2/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 fi -./mc stat minio1/newbucket/README.md +./mc stat --no-list minio1/newbucket/README.md if [ $? -eq 0 ]; then echo "expected file to be deleted, exiting.." exit_1 fi sleep 10 -./mc stat minio3/newbucket/lrgfile +./mc stat --no-list minio3/newbucket/lrgfile if [ $? -ne 0 ]; then echo "expected object to be present, exiting.." exit_1 @@ -236,7 +238,7 @@ if [ $? -ne 0 ]; then fi sleep 5 -./mc stat minio1/newbucket/lrgfile +./mc stat --no-list minio1/newbucket/lrgfile if [ $? -eq 0 ]; then echo "expected object to be deleted permanently after replication, exiting.." exit_1 diff --git a/docs/site-replication/run-replication-with-checksum-header.sh b/docs/site-replication/run-replication-with-checksum-header.sh new file mode 100755 index 0000000000000..f7bf81a2222d7 --- /dev/null +++ b/docs/site-replication/run-replication-with-checksum-header.sh @@ -0,0 +1,262 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2120 +exit_1() { + cleanup + + echo "minio1 ============" + cat /tmp/minio1_1.log + echo "minio2 ============" + cat /tmp/minio2_1.log + + exit 1 +} + +cleanup() { + echo -n "Cleaning up instances of MinIO ..." + pkill -9 minio || sudo pkill -9 minio + rm -rf /tmp/minio{1,2} + echo "done" +} + +# Function to convert number to corresponding alphabet +num_to_alpha() { + local num=$1 + # ASCII value of 'a' is 97, so we add (num - 1) to 97 to get the corresponding alphabet + local ascii_value=$((96 + num)) + # Convert the ASCII value to the character using printf + printf "\\$(printf '%03o' "$ascii_value")" +} + +cleanup + +export MINIO_CI_CD=1 +export MINIO_BROWSER=off +export MINIO_ROOT_USER="minio" +export MINIO_ROOT_PASSWORD="minio123" + +# Download AWS CLI +echo -n "Download and install AWS CLI" +rm -rf /usr/local/aws-cli || sudo rm -rf /usr/local/aws-cli +curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +unzip -qq awscliv2.zip +./aws/install || sudo ./aws/install +echo "done" + +# Add credentials to ~/.aws/credentials +if ! [ -d ~/.aws ]; then + mkdir -p ~/.aws +fi +cat >~/.aws/credentials </tmp/minio1_1.log 2>&1 & +CI=on MINIO_KMS_SECRET_KEY=minio-default-key:IyqsU3kMFloCNup4BsZtf/rmfHVcTgznO2F25CkEH1g= MINIO_ROOT_USER=minio MINIO_ROOT_PASSWORD=minio123 minio server --certs-dir /tmp/certs --address ":9002" --console-address ":11000" /tmp/minio2/{1...4}/disk{1...4} /tmp/minio2/{5...8}/disk{1...4} >/tmp/minio2_1.log 2>&1 & +echo "done" + +if [ ! -f ./mc ]; then + echo -n "Downloading MinIO client ..." + wget -O mc https://dl.min.io/client/mc/release/linux-amd64/mc && + chmod +x mc + echo "done" +fi + +export MC_HOST_minio1=https://minio:minio123@localhost:9001 +export MC_HOST_minio2=https://minio:minio123@localhost:9002 + +./mc ready minio1 --insecure +./mc ready minio2 --insecure + +# Prepare data for tests +echo -n "Preparing test data ..." +mkdir -p /tmp/data +echo "Hello World" >/tmp/data/obj +touch /tmp/data/mpartobj +shred -s 500M /tmp/data/mpartobj +echo "done" + +# Add replication site +./mc admin replicate add minio1 minio2 --insecure +# sleep for replication to complete +sleep 30 + +# Create bucket in source cluster +echo "Create bucket in source MinIO instance" +./mc mb minio1/test-bucket --insecure + +# Load objects to source site with checksum header +echo "Loading objects to source MinIO instance" +OBJ_CHKSUM=$(openssl dgst -sha256 -binary fileparts.json +jq fileparts.json +jq /tmp/minio1_1.log 2>&1 & +CI=on MINIO_KMS_SECRET_KEY=minio-default-key:IyqsU3kMFloCNup4BsZtf/rmfHVcTgznO2F25CkEH1g= MINIO_ROOT_USER=minio MINIO_ROOT_PASSWORD=minio123 minio server --certs-dir /tmp/certs --address ":9002" --console-address ":11000" /tmp/minio2/{1...4}/disk{1...4} /tmp/minio2/{5...8}/disk{1...4} >/tmp/minio2_1.log 2>&1 & +echo "done" + +if [ ! -f ./mc ]; then + echo -n "Downloading MinIO client ..." + wget -O mc https://dl.min.io/client/mc/release/linux-amd64/mc && + chmod +x mc + echo "done" +fi + +export MC_HOST_minio1=https://minio:minio123@localhost:9001 +export MC_HOST_minio2=https://minio:minio123@localhost:9002 + +./mc ready minio1 --insecure +./mc ready minio2 --insecure + +# Prepare data for tests +echo -n "Preparing test data ..." +mkdir -p /tmp/data +echo "Hello from encrypted world" >/tmp/data/encrypted +touch /tmp/data/mpartobj +shred -s 500M /tmp/data/mpartobj +touch /tmp/data/defpartsize +shred -s 500M /tmp/data/defpartsize +touch /tmp/data/custpartsize +shred -s 500M /tmp/data/custpartsize +echo "done" + +# Add replication site +./mc admin replicate add minio1 minio2 --insecure +# sleep for replication to complete +sleep 30 + +# Create bucket in source cluster +echo "Create bucket in source MinIO instance" +./mc mb minio1/test-bucket --insecure + +# Enable SSE KMS for the bucket +./mc encrypt set sse-kms minio-default-key minio1/test-bucket --insecure + +# Load objects to source site +echo "Loading objects to source MinIO instance" +./mc cp /tmp/data/encrypted minio1/test-bucket --insecure +./mc cp /tmp/data/mpartobj minio1/test-bucket/mpartobj --enc-c "minio1/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure +./mc cp /tmp/data/defpartsize minio1/test-bucket --insecure +./mc put /tmp/data/custpartsize minio1/test-bucket --insecure --part-size 50MiB +sleep 120 + +# List the objects from source site +echo "Objects from source instance" +./mc ls minio1/test-bucket --insecure +count1=$(./mc ls minio1/test-bucket/encrypted --insecure | wc -l) +if [ "${count1}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/encrypted not found" + exit_1 +fi +count2=$(./mc ls minio1/test-bucket/mpartobj --insecure | wc -l) +if [ "${count2}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/mpartobj not found" + exit_1 +fi +count3=$(./mc ls minio1/test-bucket/defpartsize --insecure | wc -l) +if [ "${count3}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/defpartsize not found" + exit_1 +fi +count4=$(./mc ls minio1/test-bucket/custpartsize --insecure | wc -l) +if [ "${count4}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/custpartsize not found" + exit_1 +fi + +# List the objects from replicated site +echo "Objects from replicated instance" +./mc ls minio2/test-bucket --insecure +repcount1=$(./mc ls minio2/test-bucket/encrypted --insecure | wc -l) +if [ "${repcount1}" -ne 1 ]; then + echo "BUG: object test-bucket/encrypted not replicated" + exit_1 +fi +repcount2=$(./mc ls minio2/test-bucket/mpartobj --insecure | wc -l) +if [ "${repcount2}" -ne 1 ]; then + echo "BUG: object test-bucket/mpartobj not replicated" + exit_1 +fi +repcount3=$(./mc ls minio2/test-bucket/defpartsize --insecure | wc -l) +if [ "${repcount3}" -ne 1 ]; then + echo "BUG: object test-bucket/defpartsize not replicated" + exit_1 +fi +repcount4=$(./mc ls minio2/test-bucket/custpartsize --insecure | wc -l) +if [ "${repcount4}" -ne 1 ]; then + echo "BUG: object test-bucket/custpartsize not replicated" + exit_1 +fi + +# Stat the objects from source site +echo "Stat minio1/test-bucket/encrypted" +./mc stat --no-list minio1/test-bucket/encrypted --insecure --json +stat_out1=$(./mc stat --no-list minio1/test-bucket/encrypted --insecure --json) +src_obj1_algo=$(echo "${stat_out1}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +src_obj1_keyid=$(echo "${stat_out1}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio1/test-bucket/defpartsize" +./mc stat --no-list minio1/test-bucket/defpartsize --insecure --json +stat_out2=$(./mc stat --no-list minio1/test-bucket/defpartsize --insecure --json) +src_obj2_algo=$(echo "${stat_out2}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +src_obj2_keyid=$(echo "${stat_out2}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio1/test-bucket/custpartsize" +./mc stat --no-list minio1/test-bucket/custpartsize --insecure --json +stat_out3=$(./mc stat --no-list minio1/test-bucket/custpartsize --insecure --json) +src_obj3_algo=$(echo "${stat_out3}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +src_obj3_keyid=$(echo "${stat_out3}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio1/test-bucket/mpartobj" +./mc stat --no-list minio1/test-bucket/mpartobj --enc-c "minio1/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out4=$(./mc stat --no-list minio1/test-bucket/mpartobj --enc-c "minio1/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj4_etag=$(echo "${stat_out4}" | jq '.etag') +src_obj4_size=$(echo "${stat_out4}" | jq '.size') +src_obj4_md5=$(echo "${stat_out4}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Stat the objects from replicated site +echo "Stat minio2/test-bucket/encrypted" +./mc stat --no-list minio2/test-bucket/encrypted --insecure --json +stat_out1_rep=$(./mc stat --no-list minio2/test-bucket/encrypted --insecure --json) +rep_obj1_algo=$(echo "${stat_out1_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +rep_obj1_keyid=$(echo "${stat_out1_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio2/test-bucket/defpartsize" +./mc stat --no-list minio2/test-bucket/defpartsize --insecure --json +stat_out2_rep=$(./mc stat --no-list minio2/test-bucket/defpartsize --insecure --json) +rep_obj2_algo=$(echo "${stat_out2_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +rep_obj2_keyid=$(echo "${stat_out2_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio2/test-bucket/custpartsize" +./mc stat --no-list minio2/test-bucket/custpartsize --insecure --json +stat_out3_rep=$(./mc stat --no-list minio2/test-bucket/custpartsize --insecure --json) +rep_obj3_algo=$(echo "${stat_out3_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption"') +rep_obj3_keyid=$(echo "${stat_out3_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"') +echo "Stat minio2/test-bucket/mpartobj" +./mc stat --no-list minio2/test-bucket/mpartobj --enc-c "minio2/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out4_rep=$(./mc stat --no-list minio2/test-bucket/mpartobj --enc-c "minio2/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj4_etag=$(echo "${stat_out4}" | jq '.etag') +rep_obj4_size=$(echo "${stat_out4}" | jq '.size') +rep_obj4_md5=$(echo "${stat_out4}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Check the algo and keyId of replicated objects +if [ "${rep_obj1_algo}" != "${src_obj1_algo}" ]; then + echo "BUG: Algorithm: '${rep_obj1_algo}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_algo}'" + exit_1 +fi +if [ "${rep_obj1_keyid}" != "${src_obj1_keyid}" ]; then + echo "BUG: KeyId: '${rep_obj1_keyid}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_keyid}'" + exit_1 +fi +if [ "${rep_obj2_algo}" != "${src_obj2_algo}" ]; then + echo "BUG: Algorithm: '${rep_obj2_algo}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_algo}'" + exit_1 +fi +if [ "${rep_obj2_keyid}" != "${src_obj2_keyid}" ]; then + echo "BUG: KeyId: '${rep_obj2_keyid}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_keyid}'" + exit_1 +fi +if [ "${rep_obj3_algo}" != "${src_obj3_algo}" ]; then + echo "BUG: Algorithm: '${rep_obj3_algo}' of replicated object: 'minio2/test-bucket/custpartsize' doesn't match with source value: '${src_obj3_algo}'" + exit_1 +fi +if [ "${rep_obj3_keyid}" != "${src_obj3_keyid}" ]; then + echo "BUG: KeyId: '${rep_obj3_keyid}' of replicated object: 'minio2/test-bucket/custpartsize' doesn't match with source value: '${src_obj3_keyid}'" + exit_1 +fi + +# Check the etag, size and md5 of replicated SSEC object +if [ "${rep_obj4_etag}" != "${src_obj4_etag}" ]; then + echo "BUG: Etag: '${rep_obj4_etag}' of replicated object: 'minio2/test-bucket/mpartobj' doesn't match with source value: '${src_obj4_etag}'" + exit_1 +fi +if [ "${rep_obj4_size}" != "${src_obj4_size}" ]; then + echo "BUG: Size: '${rep_obj4_size}' of replicated object: 'minio2/test-bucket/mpartobj' doesn't match with source value: '${src_obj4_size}'" + exit_1 +fi +if [ "${src_obj4_md5}" != "${rep_obj4_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/mpartobj' doesn't match with source. Expected: '${src_obj4_md5}', Found: '${rep_obj4_md5}'" + exit_1 +fi + +# Check content of replicated objects +./mc cat minio2/test-bucket/encrypted --insecure +./mc cat minio2/test-bucket/mpartobj --enc-c "minio2/test-bucket/mpartobj=${TEST_MINIO_ENC_KEY}" --insecure >/dev/null || exit_1 +./mc cat minio2/test-bucket/defpartsize --insecure >/dev/null || exit_1 +./mc cat minio2/test-bucket/custpartsize --insecure >/dev/null || exit_1 + +echo -n "Starting MinIO instances with different kms key ..." +CI=on MINIO_KMS_SECRET_KEY=minio3-default-key:IyqsU3kMFloCNup4BsZtf/rmfHVcTgznO2F25CkEH1g= MINIO_ROOT_USER=minio MINIO_ROOT_PASSWORD=minio123 minio server --certs-dir /tmp/certs --address ":9003" --console-address ":10000" /tmp/minio3/disk{1...4} >/tmp/minio3_1.log 2>&1 & +CI=on MINIO_KMS_SECRET_KEY=minio4-default-key:IyqsU3kMFloCNup4BsZtf/rmfHVcTgznO2F25CkEH1g= MINIO_ROOT_USER=minio MINIO_ROOT_PASSWORD=minio123 minio server --certs-dir /tmp/certs --address ":9004" --console-address ":11000" /tmp/minio4/disk{1...4} >/tmp/minio4_1.log 2>&1 & +echo "done" + +export MC_HOST_minio3=https://minio:minio123@localhost:9003 +export MC_HOST_minio4=https://minio:minio123@localhost:9004 + +./mc ready minio3 --insecure +./mc ready minio4 --insecure + +./mc admin replicate add minio3 minio4 --insecure +./mc mb minio3/bucket --insecure +./mc cp --insecure --enc-kms minio3/bucket=minio3-default-key /tmp/data/encrypted minio3/bucket/x +sleep 10 +st=$(./mc stat --json --no-list --insecure minio3/bucket/x | jq -r .replicationStatus) +if [ "${st}" != "FAILED" ]; then + echo "BUG: Replication succeeded when kms key is different" + exit_1 +fi + +cleanup diff --git a/docs/site-replication/run-ssec-object-replication-with-compression.sh b/docs/site-replication/run-ssec-object-replication-with-compression.sh new file mode 100755 index 0000000000000..4f55f7a1b1077 --- /dev/null +++ b/docs/site-replication/run-ssec-object-replication-with-compression.sh @@ -0,0 +1,196 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2120 +exit_1() { + cleanup + + echo "minio1 ============" + cat /tmp/minio1_1.log + echo "minio2 ============" + cat /tmp/minio2_1.log + + exit 1 +} + +cleanup() { + echo -n "Cleaning up instances of MinIO ..." + pkill minio || sudo pkill minio + pkill -9 minio || sudo pkill -9 minio + rm -rf /tmp/minio{1,2} + echo "done" +} + +cleanup + +export MINIO_CI_CD=1 +export MINIO_BROWSER=off +export MINIO_ROOT_USER="minio" +export MINIO_ROOT_PASSWORD="minio123" +TEST_MINIO_ENC_KEY="MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" + +# Create certificates for TLS enabled MinIO +echo -n "Setup certs for MinIO instances ..." +wget -O certgen https://github.com/minio/certgen/releases/latest/download/certgen-linux-amd64 && chmod +x certgen +./certgen --host localhost +mkdir -p /tmp/certs +mv public.crt /tmp/certs || sudo mv public.crt /tmp/certs +mv private.key /tmp/certs || sudo mv private.key /tmp/certs +echo "done" + +# Start MinIO instances +echo -n "Starting MinIO instances ..." +minio server --certs-dir /tmp/certs --address ":9001" --console-address ":10000" /tmp/minio1/{1...4}/disk{1...4} /tmp/minio1/{5...8}/disk{1...4} >/tmp/minio1_1.log 2>&1 & +minio server --certs-dir /tmp/certs --address ":9002" --console-address ":11000" /tmp/minio2/{1...4}/disk{1...4} /tmp/minio2/{5...8}/disk{1...4} >/tmp/minio2_1.log 2>&1 & +echo "done" + +if [ ! -f ./mc ]; then + echo -n "Downloading MinIO client ..." + wget -O mc https://dl.min.io/client/mc/release/linux-amd64/mc && + chmod +x mc + echo "done" +fi + +export MC_HOST_minio1=https://minio:minio123@localhost:9001 +export MC_HOST_minio2=https://minio:minio123@localhost:9002 + +./mc ready minio1 --insecure +./mc ready minio2 --insecure + +# Prepare data for tests +echo -n "Preparing test data ..." +mkdir -p /tmp/data +echo "Hello world" >/tmp/data/plainfile +echo "Hello from encrypted world" >/tmp/data/encrypted +touch /tmp/data/defpartsize +shred -s 500M /tmp/data/defpartsize +touch /tmp/data/mpartobj.txt +shred -s 500M /tmp/data/mpartobj.txt +echo "done" + +# Enable compression for site minio1 +./mc admin config set minio1 compression enable=on extensions=".txt" --insecure +./mc admin config set minio1 compression allow_encryption=off --insecure + +# Create bucket in source cluster +echo "Create bucket in source MinIO instance" +./mc mb minio1/test-bucket --insecure + +# Load objects to source site +echo "Loading objects to source MinIO instance" +./mc cp /tmp/data/plainfile minio1/test-bucket --insecure +./mc cp /tmp/data/encrypted minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure +./mc cp /tmp/data/defpartsize minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure + +# Below should fail as compression and SSEC used at the same time +# DISABLED: We must check the response header to see if compression was actually applied +#RESULT=$({ ./mc put /tmp/data/mpartobj.txt minio1/test-bucket/mpartobj.txt --enc-c "minio1/test-bucket/mpartobj.txt=${TEST_MINIO_ENC_KEY}" --insecure; } 2>&1) +#if [[ ${RESULT} != *"Server side encryption specified with SSE-C with compression not allowed"* ]]; then +# echo "BUG: Loading an SSE-C object to site with compression should fail. Succeeded though." +# exit_1 +#fi + +# Add replication site +./mc admin replicate add minio1 minio2 --insecure +# sleep for replication to complete +sleep 30 + +# List the objects from source site +echo "Objects from source instance" +./mc ls minio1/test-bucket --insecure +count1=$(./mc ls minio1/test-bucket/plainfile --insecure | wc -l) +if [ "${count1}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/plainfile not found" + exit_1 +fi +count2=$(./mc ls minio1/test-bucket/encrypted --insecure | wc -l) +if [ "${count2}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/encrypted not found" + exit_1 +fi +count3=$(./mc ls minio1/test-bucket/defpartsize --insecure | wc -l) +if [ "${count3}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/defpartsize not found" + exit_1 +fi +sleep 120 + +# List the objects from replicated site +echo "Objects from replicated instance" +./mc ls minio2/test-bucket --insecure +repcount1=$(./mc ls minio2/test-bucket/plainfile --insecure | wc -l) +if [ "${repcount1}" -ne 1 ]; then + echo "BUG: object test-bucket/plainfile not replicated" + exit_1 +fi +repcount2=$(./mc ls minio2/test-bucket/encrypted --insecure | wc -l) +if [ "${repcount2}" -ne 1 ]; then + echo "BUG: object test-bucket/encrypted not replicated" + exit_1 +fi +repcount3=$(./mc ls minio2/test-bucket/defpartsize --insecure | wc -l) +if [ "${repcount3}" -ne 1 ]; then + echo "BUG: object test-bucket/defpartsize not replicated" + exit_1 +fi + +# Stat the SSEC objects from source site +echo "Stat minio1/test-bucket/encrypted" +./mc stat --no-list minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out1=$(./mc stat --no-list minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj1_etag=$(echo "${stat_out1}" | jq '.etag') +src_obj1_size=$(echo "${stat_out1}" | jq '.size') +src_obj1_md5=$(echo "${stat_out1}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio1/test-bucket/defpartsize" +./mc stat --no-list minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out2=$(./mc stat --no-list minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj2_etag=$(echo "${stat_out2}" | jq '.etag') +src_obj2_size=$(echo "${stat_out2}" | jq '.size') +src_obj2_md5=$(echo "${stat_out2}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Stat the SSEC objects from replicated site +echo "Stat minio2/test-bucket/encrypted" +./mc stat --no-list minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out1_rep=$(./mc stat --no-list minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj1_etag=$(echo "${stat_out1_rep}" | jq '.etag') +rep_obj1_size=$(echo "${stat_out1_rep}" | jq '.size') +rep_obj1_md5=$(echo "${stat_out1_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio2/test-bucket/defpartsize" +./mc stat --no-list minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out2_rep=$(./mc stat --no-list minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj2_etag=$(echo "${stat_out2_rep}" | jq '.etag') +rep_obj2_size=$(echo "${stat_out2_rep}" | jq '.size') +rep_obj2_md5=$(echo "${stat_out2_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Check the etag and size of replicated SSEC objects +if [ "${rep_obj1_etag}" != "${src_obj1_etag}" ]; then + echo "BUG: Etag: '${rep_obj1_etag}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_etag}'" + exit_1 +fi +if [ "${rep_obj1_size}" != "${src_obj1_size}" ]; then + echo "BUG: Size: '${rep_obj1_size}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_size}'" + exit_1 +fi +if [ "${rep_obj2_etag}" != "${src_obj2_etag}" ]; then + echo "BUG: Etag: '${rep_obj2_etag}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_etag}'" + exit_1 +fi +if [ "${rep_obj2_size}" != "${src_obj2_size}" ]; then + echo "BUG: Size: '${rep_obj2_size}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_size}'" + exit_1 +fi + +# Check content of replicated SSEC objects +./mc cat minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure +./mc cat minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure >/dev/null || exit_1 + +# Check the MD5 checksums of encrypted objects from source and target +if [ "${src_obj1_md5}" != "${rep_obj1_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/encrypted' doesn't match with source. Expected: '${src_obj1_md5}', Found: '${rep_obj1_md5}'" + exit_1 +fi +if [ "${src_obj2_md5}" != "${rep_obj2_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/defpartsize' doesn't match with source. Expected: '${src_obj2_md5}', Found: '${rep_obj2_md5}'" + exit_1 +fi + +cleanup diff --git a/docs/site-replication/run-ssec-object-replication.sh b/docs/site-replication/run-ssec-object-replication.sh new file mode 100755 index 0000000000000..0f50f83457f30 --- /dev/null +++ b/docs/site-replication/run-ssec-object-replication.sh @@ -0,0 +1,223 @@ +#!/usr/bin/env bash + +# shellcheck disable=SC2120 +exit_1() { + cleanup + + echo "minio1 ============" + cat /tmp/minio1_1.log + echo "minio2 ============" + cat /tmp/minio2_1.log + + exit 1 +} + +cleanup() { + echo -n "Cleaning up instances of MinIO ..." + pkill minio || sudo pkill minio + pkill -9 minio || sudo pkill -9 minio + rm -rf /tmp/minio{1,2} + echo "done" +} + +cleanup + +export MINIO_CI_CD=1 +export MINIO_BROWSER=off +export MINIO_ROOT_USER="minio" +export MINIO_ROOT_PASSWORD="minio123" +TEST_MINIO_ENC_KEY="MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDA" + +# Create certificates for TLS enabled MinIO +echo -n "Setup certs for MinIO instances ..." +wget -O certgen https://github.com/minio/certgen/releases/latest/download/certgen-linux-amd64 && chmod +x certgen +./certgen --host localhost +mkdir -p /tmp/certs +mv public.crt /tmp/certs || sudo mv public.crt /tmp/certs +mv private.key /tmp/certs || sudo mv private.key /tmp/certs +echo "done" + +# Start MinIO instances +echo -n "Starting MinIO instances ..." +minio server --certs-dir /tmp/certs --address ":9001" --console-address ":10000" /tmp/minio1/{1...4}/disk{1...4} /tmp/minio1/{5...8}/disk{1...4} >/tmp/minio1_1.log 2>&1 & +minio server --certs-dir /tmp/certs --address ":9002" --console-address ":11000" /tmp/minio2/{1...4}/disk{1...4} /tmp/minio2/{5...8}/disk{1...4} >/tmp/minio2_1.log 2>&1 & +echo "done" + +if [ ! -f ./mc ]; then + echo -n "Downloading MinIO client ..." + wget -O mc https://dl.min.io/client/mc/release/linux-amd64/mc && + chmod +x mc + echo "done" +fi + +export MC_HOST_minio1=https://minio:minio123@localhost:9001 +export MC_HOST_minio2=https://minio:minio123@localhost:9002 + +./mc ready minio1 --insecure +./mc ready minio2 --insecure + +# Prepare data for tests +echo -n "Preparing test data ..." +mkdir -p /tmp/data +echo "Hello world" >/tmp/data/plainfile +echo "Hello from encrypted world" >/tmp/data/encrypted +touch /tmp/data/defpartsize +shred -s 500M /tmp/data/defpartsize +touch /tmp/data/custpartsize +shred -s 500M /tmp/data/custpartsize +echo "done" + +# Add replication site +./mc admin replicate add minio1 minio2 --insecure +# sleep for replication to complete +sleep 30 + +# Create bucket in source cluster +echo "Create bucket in source MinIO instance" +./mc mb minio1/test-bucket --insecure + +# Load objects to source site +echo "Loading objects to source MinIO instance" +set -x +./mc cp /tmp/data/plainfile minio1/test-bucket --insecure +./mc cp /tmp/data/encrypted minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure +./mc cp /tmp/data/defpartsize minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure +./mc put /tmp/data/custpartsize minio1/test-bucket/custpartsize --enc-c "minio1/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure --part-size 50MiB +set +x +sleep 120 + +# List the objects from source site +echo "Objects from source instance" +./mc ls minio1/test-bucket --insecure +count1=$(./mc ls minio1/test-bucket/plainfile --insecure | wc -l) +if [ "${count1}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/plainfile not found" + exit_1 +fi +count2=$(./mc ls minio1/test-bucket/encrypted --insecure | wc -l) +if [ "${count2}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/encrypted not found" + exit_1 +fi +count3=$(./mc ls minio1/test-bucket/defpartsize --insecure | wc -l) +if [ "${count3}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/defpartsize not found" + exit_1 +fi +count4=$(./mc ls minio1/test-bucket/custpartsize --insecure | wc -l) +if [ "${count4}" -ne 1 ]; then + echo "BUG: object minio1/test-bucket/custpartsize not found" + exit_1 +fi + +# List the objects from replicated site +echo "Objects from replicated instance" +./mc ls minio2/test-bucket --insecure +repcount1=$(./mc ls minio2/test-bucket/plainfile --insecure | wc -l) +if [ "${repcount1}" -ne 1 ]; then + echo "BUG: object test-bucket/plainfile not replicated" + exit_1 +fi +repcount2=$(./mc ls minio2/test-bucket/encrypted --insecure | wc -l) +if [ "${repcount2}" -ne 1 ]; then + echo "BUG: object test-bucket/encrypted not replicated" + exit_1 +fi +repcount3=$(./mc ls minio2/test-bucket/defpartsize --insecure | wc -l) +if [ "${repcount3}" -ne 1 ]; then + echo "BUG: object test-bucket/defpartsize not replicated" + exit_1 +fi + +repcount4=$(./mc ls minio2/test-bucket/custpartsize --insecure | wc -l) +if [ "${repcount4}" -ne 1 ]; then + echo "BUG: object test-bucket/custpartsize not replicated" + exit_1 +fi + +# Stat the SSEC objects from source site +echo "Stat minio1/test-bucket/encrypted" +./mc stat --no-list minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out1=$(./mc stat --no-list minio1/test-bucket/encrypted --enc-c "minio1/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj1_etag=$(echo "${stat_out1}" | jq '.etag') +src_obj1_size=$(echo "${stat_out1}" | jq '.size') +src_obj1_md5=$(echo "${stat_out1}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio1/test-bucket/defpartsize" +./mc stat --no-list minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out2=$(./mc stat --no-list minio1/test-bucket/defpartsize --enc-c "minio1/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj2_etag=$(echo "${stat_out2}" | jq '.etag') +src_obj2_size=$(echo "${stat_out2}" | jq '.size') +src_obj2_md5=$(echo "${stat_out2}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio1/test-bucket/custpartsize" +./mc stat --no-list minio1/test-bucket/custpartsize --enc-c "minio1/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out3=$(./mc stat --no-list minio1/test-bucket/custpartsize --enc-c "minio1/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +src_obj3_etag=$(echo "${stat_out3}" | jq '.etag') +src_obj3_size=$(echo "${stat_out3}" | jq '.size') +src_obj3_md5=$(echo "${stat_out3}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Stat the SSEC objects from replicated site +echo "Stat minio2/test-bucket/encrypted" +./mc stat --no-list minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out1_rep=$(./mc stat --no-list minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj1_etag=$(echo "${stat_out1_rep}" | jq '.etag') +rep_obj1_size=$(echo "${stat_out1_rep}" | jq '.size') +rep_obj1_md5=$(echo "${stat_out1_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio2/test-bucket/defpartsize" +./mc stat --no-list minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out2_rep=$(./mc stat --no-list minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj2_etag=$(echo "${stat_out2_rep}" | jq '.etag') +rep_obj2_size=$(echo "${stat_out2_rep}" | jq '.size') +rep_obj2_md5=$(echo "${stat_out2_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') +echo "Stat minio2/test-bucket/custpartsize" +./mc stat --no-list minio2/test-bucket/custpartsize --enc-c "minio2/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json +stat_out3_rep=$(./mc stat --no-list minio2/test-bucket/custpartsize --enc-c "minio2/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure --json) +rep_obj3_etag=$(echo "${stat_out3_rep}" | jq '.etag') +rep_obj3_size=$(echo "${stat_out3_rep}" | jq '.size') +rep_obj3_md5=$(echo "${stat_out3_rep}" | jq '.metadata."X-Amz-Server-Side-Encryption-Customer-Key-Md5"') + +# Check the etag and size of replicated SSEC objects +if [ "${rep_obj1_etag}" != "${src_obj1_etag}" ]; then + echo "BUG: Etag: '${rep_obj1_etag}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_etag}'" + exit_1 +fi +if [ "${rep_obj1_size}" != "${src_obj1_size}" ]; then + echo "BUG: Size: '${rep_obj1_size}' of replicated object: 'minio2/test-bucket/encrypted' doesn't match with source value: '${src_obj1_size}'" + exit_1 +fi +if [ "${rep_obj2_etag}" != "${src_obj2_etag}" ]; then + echo "BUG: Etag: '${rep_obj2_etag}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_etag}'" + exit_1 +fi +if [ "${rep_obj2_size}" != "${src_obj2_size}" ]; then + echo "BUG: Size: '${rep_obj2_size}' of replicated object: 'minio2/test-bucket/defpartsize' doesn't match with source value: '${src_obj2_size}'" + exit_1 +fi +if [ "${rep_obj3_etag}" != "${src_obj3_etag}" ]; then + echo "BUG: Etag: '${rep_obj3_etag}' of replicated object: 'minio2/test-bucket/custpartsize' doesn't match with source value: '${src_obj3_etag}'" + exit_1 +fi +if [ "${rep_obj3_size}" != "${src_obj3_size}" ]; then + echo "BUG: Size: '${rep_obj3_size}' of replicated object: 'minio2/test-bucket/custpartsize' doesn't match with source value: '${src_obj3_size}'" + exit_1 +fi + +# Check content of replicated SSEC objects +./mc cat minio2/test-bucket/encrypted --enc-c "minio2/test-bucket/encrypted=${TEST_MINIO_ENC_KEY}" --insecure +./mc cat minio2/test-bucket/defpartsize --enc-c "minio2/test-bucket/defpartsize=${TEST_MINIO_ENC_KEY}" --insecure >/dev/null || exit_1 +./mc cat minio2/test-bucket/custpartsize --enc-c "minio2/test-bucket/custpartsize=${TEST_MINIO_ENC_KEY}" --insecure >/dev/null || exit_1 + +# Check the MD5 checksums of encrypted objects from source and target +if [ "${src_obj1_md5}" != "${rep_obj1_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/encrypted' doesn't match with source. Expected: '${src_obj1_md5}', Found: '${rep_obj1_md5}'" + exit_1 +fi +if [ "${src_obj2_md5}" != "${rep_obj2_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/defpartsize' doesn't match with source. Expected: '${src_obj2_md5}', Found: '${rep_obj2_md5}'" + exit_1 +fi +if [ "${src_obj3_md5}" != "${rep_obj3_md5}" ]; then + echo "BUG: MD5 checksum of object 'minio2/test-bucket/custpartsize' doesn't match with source. Expected: '${src_obj3_md5}', Found: '${rep_obj3_md5}'" + exit_1 +fi + +cleanup diff --git a/docs/sts/README.md b/docs/sts/README.md index 87098ce6c6d08..62fa48b18584b 100644 --- a/docs/sts/README.md +++ b/docs/sts/README.md @@ -106,5 +106,5 @@ These credentials can now be used to perform MinIO API operations. ## Explore Further -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/sts/assume-role.go b/docs/sts/assume-role.go index f5878ec7e86d1..7c8735ffe9e03 100644 --- a/docs/sts/assume-role.go +++ b/docs/sts/assume-role.go @@ -30,6 +30,7 @@ import ( "os" "time" + "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" cr "github.com/minio/minio-go/v7/pkg/credentials" ) @@ -85,6 +86,7 @@ func main() { if f, err := os.Open(sessionPolicyFile); err != nil { log.Fatalf("Unable to open session policy file: %v", err) } else { + defer f.Close() bs, err := io.ReadAll(f) if err != nil { log.Fatalf("Error reading session policy file: %v", err) @@ -111,6 +113,11 @@ func main() { Secure: stsEndpointURL.Scheme == "https", } + mopts := &madmin.Options{ + Creds: li, + Secure: stsEndpointURL.Scheme == "https", + } + v, err := li.Get() if err != nil { log.Fatalf("Error retrieving STS credentials: %v", err) @@ -124,6 +131,18 @@ func main() { return } + // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. + // New returns an MinIO Admin client object. + madmClnt, err := madmin.NewWithOptions(stsEndpointURL.Host, mopts) + if err != nil { + log.Fatalln(err) + } + + err = madmClnt.ServiceRestart(context.Background()) + if err != nil { + log.Fatalln(err) + } + // Use generated credentials to authenticate with MinIO server minioClient, err := minio.New(stsEndpointURL.Host, opts) if err != nil { diff --git a/docs/sts/assume-role.md b/docs/sts/assume-role.md index e94b541038c17..8b9600f7fc0a6 100644 --- a/docs/sts/assume-role.md +++ b/docs/sts/assume-role.md @@ -89,7 +89,7 @@ export MINIO_ROOT_PASSWORD=minio123 minio server ~/test ``` -Create new users following the multi-user guide [here](https://min.io/docs/minio/linux/administration/identity-access-management.html) +Create new users following the multi-user guide [here](https://docs.min.io/community/minio-object-store/administration/identity-access-management.html) ### Testing an example with awscli tool @@ -134,5 +134,5 @@ SessionToken: eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiIyN1lEUllFT ## Explore Further -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/sts/casdoor.md b/docs/sts/casdoor.md index d11814abea23d..4bdf745b3e639 100644 --- a/docs/sts/casdoor.md +++ b/docs/sts/casdoor.md @@ -112,5 +112,5 @@ This will open the login page of Casdoor, upon successful login, STS credentials ## Explore Further - [Casdoor MinIO Integration](https://casdoor.org/docs/integration/minio) -- [MinIO STS Quickstart Guide](https://min.io/docs/minio/linux/developers/security-token-service.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO STS Quickstart Guide](https://docs.min.io/community/minio-object-store/developers/security-token-service.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/sts/client-grants.md b/docs/sts/client-grants.md index 1ae0286c9c766..1d4132adfc317 100644 --- a/docs/sts/client-grants.md +++ b/docs/sts/client-grants.md @@ -111,5 +111,5 @@ $ go run client-grants.go -cid PoEgXP6uVO45IsENRngDXj5Au5Ya -csec eKsw6z8CtOJVBt ## Explore Further -- [MinIO Admin Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc-admin.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) +- [MinIO Admin Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc-admin.html) +- [The MinIO documentation website](https://docs.min.io/community/minio-object-store/index.html) diff --git a/docs/sts/dex.md b/docs/sts/dex.md index e2439b0e0e1c0..e7bf3d8351372 100644 --- a/docs/sts/dex.md +++ b/docs/sts/dex.md @@ -99,5 +99,5 @@ and add relevant policies on MinIO using `mc admin policy create myminio/ NOTE: A zero value of `requests_max` means MinIO will automatically calculate requests based on available RAM size and that is the default behavior. -### Configuring connection (wait) deadline - -This value works in conjunction with max connection setting, setting this value allows for long waiting requests to quickly time out when there is no slot available to perform the request. - -This will reduce the pileup of waiting requests when clients are not configured with timeouts. Default wait time is *10 seconds* if *MINIO_API_REQUESTS_MAX* is enabled. This may need to be tuned to your application needs. - -Example: Limit a MinIO cluster to accept at max 1600 simultaneous S3 API requests across 8 servers, and set the wait deadline of *2 minutes* per API operation. - -```sh -export MINIO_API_REQUESTS_MAX=1600 -export MINIO_API_REQUESTS_DEADLINE=2m -export MINIO_ROOT_USER=your-access-key -export MINIO_ROOT_PASSWORD=your-secret-key -minio server http://server{1...8}/mnt/hdd{1...16} -``` - -or - -```sh -mc admin config set myminio/ api requests_max=1600 requests_deadline=2m -mc admin service restart myminio/ -``` diff --git a/docs/tls/README.md b/docs/tls/README.md index 5d1908f4f72d7..09ec84e5b31a9 100644 --- a/docs/tls/README.md +++ b/docs/tls/README.md @@ -9,11 +9,11 @@ This guide explains how to configure MinIO Server with TLS certificates on Linux ## 1. Install MinIO Server -Install MinIO Server using the instructions in the [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). +Install MinIO Server using the instructions in the [MinIO Quickstart Guide](https://docs.min.io/community/minio-object-store/operations/deployments/baremetal-deploy-minio-on-redhat-linux.html). ## 2. Use an Existing Key and Certificate with MinIO -This section describes how to use a private key and public certificate that have been obtained from a certificate authority (CA). If these files have not been obtained, skip to [3. Generate Self-signed Certificates](#generate-use-self-signed-keys-certificates) or generate them with [Let's Encrypt](https://letsencrypt.org) using these instructions: [Generate Let's Encrypt certificate using Certbot for MinIO](https://min.io/docs/minio/linux/integrations/generate-lets-encrypt-certificate-using-certbot-for-minio.html). For more about TLS and certificates in MinIO, see the [Network Encryption documentation](https://min.io/docs/minio/kubernetes/upstream/operations/network-encryption.html). +This section describes how to use a private key and public certificate that have been obtained from a certificate authority (CA). If these files have not been obtained, skip to [3. Generate Self-signed Certificates](#generate-use-self-signed-keys-certificates) or generate them with [Let's Encrypt](https://letsencrypt.org) using these instructions: [Generate Let's Encrypt certificate using Certbot for MinIO](https://docs.min.io/community/minio-object-store/integrations/generate-lets-encrypt-certificate-using-certbot-for-minio.html). For more about TLS and certificates in MinIO, see the [Network Encryption documentation](https://docs.min.io/community/minio-object-store/operations/network-encryption.html). Copy the existing private key and public certificate to the `certs` directory. The default certs directory is: @@ -238,7 +238,7 @@ MinIO can connect to other servers, including MinIO nodes or other server types ## Explore Further * [TLS Configuration for MinIO server on Kubernetes](https://github.com/minio/minio/tree/master/docs/tls/kubernetes) -* [MinIO Client Complete Guide](https://min.io/docs/minio/linux/reference/minio-mc.html) -* [MinIO Network Encryption Overview](https://min.io/docs/minio/linux/operations/network-encryption.html) -* [Generate Let's Encrypt Certificate](https://min.io/docs/minio/linux/integrations/generate-lets-encrypt-certificate-using-certbot-for-minio.html) -* [Setup nginx Proxy with MinIO Server](https://min.io/docs/minio/linux/integrations/setup-nginx-proxy-with-minio.html) +* [MinIO Client Complete Guide](https://docs.min.io/community/minio-object-store/reference/minio-mc.html) +* [MinIO Network Encryption Overview](https://docs.min.io/community/minio-object-store/operations/network-encryption.html) +* [Generate Let's Encrypt Certificate](https://docs.min.io/community/minio-object-store/integrations/generate-lets-encrypt-certificate-using-certbot-for-minio.html) +* [Setup nginx Proxy with MinIO Server](https://docs.min.io/community/minio-object-store/integrations/setup-nginx-proxy-with-minio.html) diff --git a/docs/tls/kubernetes/README.md b/docs/tls/kubernetes/README.md index ce0a549b3541c..952909c85a9c7 100644 --- a/docs/tls/kubernetes/README.md +++ b/docs/tls/kubernetes/README.md @@ -4,13 +4,13 @@ This document explains how to configure MinIO server with TLS certificates on Ku ## 1. Prerequisites -- Familiarity with [MinIO deployment process on Kubernetes](https://min.io/docs/minio/kubernetes/upstream/operations/installation.html). +- Familiarity with [MinIO deployment process on Kubernetes](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html). - Kubernetes cluster with `kubectl` configured. -- Acquire TLS certificates, either from a CA or [create self-signed certificates](https://min.io/docs/minio/kubernetes/upstream/operations/network-encryption.html). +- Acquire TLS certificates, either from a CA or [create self-signed certificates](https://docs.min.io/community/minio-object-store/operations/network-encryption.html). -For a [distributed MinIO setup](https://min.io/docs/minio/kubernetes/upstream/operations/installation.html#procedure), where there are multiple pods with different domain names expected to run, you will either need wildcard certificates valid for all the domains or have specific certificates for each domain. If you are going to use specific certificates, make sure to create Kubernetes secrets accordingly. +For a [distributed MinIO setup](https://docs.min.io/community/minio-object-store/operations/deployments/kubernetes.html), where there are multiple pods with different domain names expected to run, you will either need wildcard certificates valid for all the domains or have specific certificates for each domain. If you are going to use specific certificates, make sure to create Kubernetes secrets accordingly. For testing purposes, here is [how to create self-signed certificates](https://github.com/minio/minio/tree/master/docs/tls#3-generate-self-signed-certificates). diff --git a/docs/tuning/README.md b/docs/tuning/README.md new file mode 100644 index 0000000000000..7a0721eefea98 --- /dev/null +++ b/docs/tuning/README.md @@ -0,0 +1,26 @@ +# How to enable 'minio' performance profile with tuned? + +## Prerequisites + +Please make sure the following packages are already installed via `dnf` or `apt`: + +- `tuned` +- `curl` + +### Install `tuned.conf` performance profile + +#### Step 1 - download `tuned.conf` from the referenced link +``` +wget https://raw.githubusercontent.com/minio/minio/master/docs/tuning/tuned.conf +``` + +#### Step 2 - install tuned.conf as supported performance profile on all nodes +``` +sudo mkdir -p /usr/lib/tuned/minio/ +sudo mv tuned.conf /usr/lib/tuned/minio +``` + +#### Step 3 - to enable minio performance profile on all the nodes +``` +sudo tuned-adm profile minio +``` diff --git a/docs/tuning/tuned.conf b/docs/tuning/tuned.conf new file mode 100644 index 0000000000000..18f5dece0b933 --- /dev/null +++ b/docs/tuning/tuned.conf @@ -0,0 +1,83 @@ +[main] +summary=Maximum server performance for MinIO + +[vm] +transparent_hugepage=madvise + +[sysfs] +/sys/kernel/mm/transparent_hugepage/defrag=defer+madvise +/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none=0 + +[cpu] +force_latency=1 +governor=performance +energy_perf_bias=performance +min_perf_pct=100 + +[sysctl] +fs.xfs.xfssyncd_centisecs=72000 +net.core.busy_read=50 +net.core.busy_poll=50 +kernel.numa_balancing=1 + +# Do not use swap at all +vm.swappiness=0 +vm.vfs_cache_pressure=50 + +# Start writeback at 3% memory +vm.dirty_background_ratio=3 +# Force writeback at 10% memory +vm.dirty_ratio=10 + +# Quite a few memory map +# areas may be consumed +vm.max_map_count=524288 + +# Default is 500000 = 0.5ms +kernel.sched_migration_cost_ns=5000000 + +# stalled hdd io threads +kernel.hung_task_timeout_secs=85 + +# network tuning for bigger throughput +net.core.netdev_max_backlog=250000 +net.core.somaxconn=16384 +net.ipv4.tcp_syncookies=0 +net.ipv4.tcp_max_syn_backlog=16384 +net.core.wmem_max=4194304 +net.core.rmem_max=4194304 +net.core.rmem_default=4194304 +net.core.wmem_default=4194304 +net.ipv4.tcp_rmem="4096 87380 4194304" +net.ipv4.tcp_wmem="4096 65536 4194304" + +# Reduce CPU utilization +net.ipv4.tcp_timestamps=0 + +# Increase throughput +net.ipv4.tcp_sack=1 + +# Low latency mode for TCP +net.ipv4.tcp_low_latency=1 + +# The following variable is used to tell the kernel how +# much of the socket buffer space should be used for TCP +# window size, and how much to save for an application buffer. +net.ipv4.tcp_adv_win_scale=1 + +# disable RFC2861 behavior +net.ipv4.tcp_slow_start_after_idle = 0 + +# Fix faulty network setups +net.ipv4.tcp_mtu_probing=1 +net.ipv4.tcp_base_mss=1280 + +# Disable ipv6 +net.ipv6.conf.all.disable_ipv6=1 +net.ipv6.conf.default.disable_ipv6=1 +net.ipv6.conf.lo.disable_ipv6=1 + +[bootloader] +# Avoid firing timers for all CPUs at the same time. This is irrelevant for +# full nohz systems +cmdline=skew_tick=1 \ No newline at end of file diff --git a/go.mod b/go.mod index 209471f70b6eb..a957085237fdc 100644 --- a/go.mod +++ b/go.mod @@ -1,259 +1,276 @@ module github.com/minio/minio -go 1.21 +go 1.24.0 + +toolchain go1.24.8 + +// Install tools using 'go install tool'. +tool ( + github.com/tinylib/msgp + golang.org/x/tools/cmd/stringer +) require ( - cloud.google.com/go/storage v1.38.0 - github.com/Azure/azure-storage-blob-go v0.15.0 - github.com/Azure/go-autorest/autorest v0.11.29 - github.com/Azure/go-autorest/autorest/adal v0.9.23 - github.com/IBM/sarama v1.42.2 + aead.dev/mtls v0.2.1 + cloud.google.com/go/storage v1.52.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 + github.com/IBM/sarama v1.45.1 github.com/alecthomas/participle v0.7.1 - github.com/bcicen/jstream v1.0.1 - github.com/beevik/ntp v1.3.1 + github.com/beevik/ntp v1.4.3 github.com/buger/jsonparser v1.1.1 - github.com/cespare/xxhash/v2 v2.2.0 + github.com/cespare/xxhash/v2 v2.3.0 github.com/cheggaaa/pb v1.0.29 - github.com/coredns/coredns v1.11.1 - github.com/coreos/go-oidc v2.2.1+incompatible + github.com/coreos/go-oidc/v3 v3.14.1 github.com/coreos/go-systemd/v22 v22.5.0 - github.com/cosnicolaou/pbzip2 v1.0.3 + github.com/cosnicolaou/pbzip2 v1.0.5 github.com/dchest/siphash v1.2.3 github.com/dustin/go-humanize v1.0.1 - github.com/eclipse/paho.mqtt.golang v1.4.3 + github.com/eclipse/paho.mqtt.golang v1.5.0 github.com/elastic/go-elasticsearch/v7 v7.17.10 - github.com/fatih/color v1.16.0 - github.com/felixge/fgprof v0.9.3 + github.com/fatih/color v1.18.0 + github.com/felixge/fgprof v0.9.5 github.com/fraugster/parquet-go v0.12.0 - github.com/go-ldap/ldap/v3 v3.4.6 - github.com/go-openapi/loads v0.21.5 - github.com/go-sql-driver/mysql v1.7.1 - github.com/gobwas/ws v1.3.2 - github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/gomodule/redigo v1.8.9 + github.com/go-ldap/ldap/v3 v3.4.11 + github.com/go-openapi/loads v0.22.0 + github.com/go-sql-driver/mysql v1.9.2 + github.com/gobwas/ws v1.4.0 + github.com/golang-jwt/jwt/v4 v4.5.2 + github.com/gomodule/redigo v1.9.2 github.com/google/uuid v1.6.0 - github.com/hashicorp/golang-lru v1.0.2 github.com/inconshreveable/mousetrap v1.1.0 github.com/json-iterator/go v1.1.12 - github.com/klauspost/compress v1.17.6 - github.com/klauspost/cpuid/v2 v2.2.6 + github.com/klauspost/compress v1.18.0 + github.com/klauspost/cpuid/v2 v2.2.10 github.com/klauspost/filepathx v1.1.1 github.com/klauspost/pgzip v1.2.6 github.com/klauspost/readahead v1.4.0 - github.com/klauspost/reedsolomon v1.12.1 + github.com/klauspost/reedsolomon v1.12.4 github.com/lib/pq v1.10.9 - github.com/lithammer/shortuuid/v4 v4.0.0 - github.com/miekg/dns v1.1.58 + github.com/lithammer/shortuuid/v4 v4.2.0 + github.com/miekg/dns v1.1.65 github.com/minio/cli v1.24.2 - github.com/minio/console v1.0.0 + github.com/minio/console v1.7.7-0.20250905210349-2017f33b26e1 github.com/minio/csvparser v1.0.0 github.com/minio/dnscache v0.1.1 - github.com/minio/dperf v0.5.3 - github.com/minio/highwayhash v1.0.2 - github.com/minio/kms-go/kes v0.3.0 - github.com/minio/madmin-go/v3 v3.0.50-0.20240307080957-112c599cb563 - github.com/minio/minio-go/v7 v7.0.68 - github.com/minio/mux v1.9.0 - github.com/minio/pkg/v2 v2.0.9-0.20240209124402-7990a27fd79d + github.com/minio/dperf v0.6.3 + github.com/minio/highwayhash v1.0.3 + github.com/minio/kms-go/kes v0.3.1 + github.com/minio/kms-go/kms v0.5.1-0.20250225090116-4e64ce8d0f35 + github.com/minio/madmin-go/v3 v3.0.109 + github.com/minio/minio-go/v7 v7.0.91 + github.com/minio/mux v1.9.2 + github.com/minio/pkg/v3 v3.1.3 github.com/minio/selfupdate v0.6.0 - github.com/minio/sha256-simd v1.0.1 github.com/minio/simdjson-go v0.4.5 - github.com/minio/sio v0.3.1 + github.com/minio/sio v0.4.1 github.com/minio/xxml v0.0.3 - github.com/minio/zipindex v0.3.0 + github.com/minio/zipindex v0.4.0 github.com/mitchellh/go-homedir v1.1.0 - github.com/nats-io/nats-server/v2 v2.9.23 - github.com/nats-io/nats.go v1.32.0 + github.com/nats-io/nats-server/v2 v2.11.1 + github.com/nats-io/nats.go v1.41.2 github.com/nats-io/stan.go v0.10.4 github.com/ncw/directio v1.0.5 github.com/nsqio/go-nsq v1.1.0 - github.com/philhofer/fwd v1.1.2 - github.com/pierrec/lz4 v2.6.1+incompatible + github.com/philhofer/fwd v1.2.0 + github.com/pierrec/lz4/v4 v4.1.22 github.com/pkg/errors v0.9.1 - github.com/pkg/sftp v1.13.6 - github.com/pkg/xattr v0.4.9 - github.com/prometheus/client_golang v1.18.0 - github.com/prometheus/client_model v0.5.0 - github.com/prometheus/common v0.46.0 - github.com/prometheus/procfs v0.12.0 - github.com/rabbitmq/amqp091-go v1.9.0 - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 - github.com/rs/cors v1.10.1 + github.com/pkg/sftp v1.13.9 + github.com/pkg/xattr v0.4.10 + github.com/prometheus/client_golang v1.22.0 + github.com/prometheus/client_model v0.6.2 + github.com/prometheus/common v0.63.0 + github.com/prometheus/procfs v0.16.1 + github.com/puzpuzpuz/xsync/v3 v3.5.1 + github.com/rabbitmq/amqp091-go v1.10.0 + github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 + github.com/rs/cors v1.11.1 github.com/secure-io/sio-go v0.3.1 - github.com/shirou/gopsutil/v3 v3.24.1 - github.com/tidwall/gjson v1.17.0 - github.com/tinylib/msgp v1.1.9 + github.com/shirou/gopsutil/v3 v3.24.5 + github.com/tinylib/msgp v1.4.0 github.com/valyala/bytebufferpool v1.0.0 github.com/xdg/scram v1.0.5 github.com/zeebo/xxh3 v1.0.2 - go.etcd.io/etcd/api/v3 v3.5.12 - go.etcd.io/etcd/client/v3 v3.5.12 + go.etcd.io/etcd/api/v3 v3.5.21 + go.etcd.io/etcd/client/v3 v3.5.21 go.uber.org/atomic v1.11.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 goftp.io/server/v2 v2.0.1 - golang.org/x/crypto v0.21.0 - golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 - golang.org/x/oauth2 v0.17.0 - golang.org/x/sys v0.18.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.164.0 + golang.org/x/crypto v0.37.0 + golang.org/x/oauth2 v0.29.0 + golang.org/x/sync v0.13.0 + golang.org/x/sys v0.32.0 + golang.org/x/term v0.31.0 + golang.org/x/time v0.11.0 + google.golang.org/api v0.230.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 ) require ( aead.dev/mem v0.2.0 // indirect - aead.dev/minisign v0.2.1 // indirect - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect - github.com/Azure/azure-pipeline-go v0.2.3 // indirect - github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect - github.com/Azure/go-autorest/logger v0.2.1 // indirect - github.com/Azure/go-autorest/tracing v0.6.0 // indirect + aead.dev/minisign v0.3.0 // indirect + cel.dev/expr v0.23.1 // indirect + cloud.google.com/go v0.120.1 // indirect + cloud.google.com/go/auth v0.16.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.5.2 // indirect + cloud.google.com/go/monitoring v1.24.2 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect - github.com/apache/thrift v0.19.0 // indirect - github.com/armon/go-metrics v0.4.0 // indirect + github.com/apache/thrift v0.21.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/charmbracelet/bubbles v0.18.0 // indirect - github.com/charmbracelet/bubbletea v0.25.0 // indirect - github.com/charmbracelet/lipgloss v0.9.1 // indirect - github.com/containerd/console v1.0.4 // indirect + github.com/charmbracelet/bubbles v0.20.0 // indirect + github.com/charmbracelet/bubbletea v1.3.4 // indirect + github.com/charmbracelet/lipgloss v1.0.0 // indirect + github.com/charmbracelet/x/ansi v0.8.0 // indirect + github.com/charmbracelet/x/term v0.2.1 // indirect + github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/eapache/go-resiliency v1.5.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect + github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect + github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect github.com/fatih/structs v1.1.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/frankban/quicktest v1.14.4 // indirect - github.com/gdamore/encoding v1.0.0 // indirect - github.com/gdamore/tcell/v2 v2.7.0 // indirect - github.com/go-asn1-ber/asn1-ber v1.5.5 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-jose/go-jose/v4 v4.1.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/go-openapi/analysis v0.22.2 // indirect - github.com/go-openapi/errors v0.21.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/runtime v0.27.1 // indirect - github.com/go-openapi/spec v0.20.14 // indirect - github.com/go-openapi/strfmt v0.22.0 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.1 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect - github.com/goccy/go-json v0.10.2 // indirect + github.com/goccy/go-json v0.10.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 // indirect - github.com/google/s2a-go v0.1.7 // indirect + github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v1.0.0 // indirect + github.com/google/go-tpm v0.9.3 // indirect + github.com/google/pprof v0.0.0-20250422154841-e1f9c1950416 // indirect + github.com/google/s2a-go v0.1.9 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect - github.com/gorilla/websocket v1.5.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect + github.com/googleapis/gax-go/v2 v2.14.1 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/hashicorp/go-hclog v1.2.0 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/jcmturner/aescts/v2 v2.0.0 // indirect github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect - github.com/jedib0t/go-pretty/v6 v6.5.4 // indirect - github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/jedib0t/go-pretty/v6 v6.6.7 // indirect + github.com/jessevdk/go-flags v1.6.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/juju/ratelimit v1.0.2 // indirect github.com/kr/fs v0.1.0 // indirect - github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect + github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect - github.com/lestrrat-go/jwx v1.2.29 // indirect + github.com/lestrrat-go/jwx/v2 v2.1.4 // indirect github.com/lestrrat-go/option v1.0.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect - github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-ieproxy v0.0.11 // indirect + github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/mattn/go-colorable v0.1.14 // indirect + github.com/mattn/go-ieproxy v0.0.12 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/minio/colorjson v1.0.6 // indirect + github.com/minio/colorjson v1.0.8 // indirect + github.com/minio/crc64nvme v1.0.1 // indirect github.com/minio/filepath v1.0.0 // indirect - github.com/minio/mc v0.0.0-20240209221824-669cb0a9a475 // indirect + github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/minio/websocket v1.6.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/montanaflynn/stats v0.7.1 // indirect github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect github.com/muesli/cancelreader v0.2.2 // indirect github.com/muesli/reflow v0.3.0 // indirect - github.com/muesli/termenv v0.15.2 // indirect - github.com/nats-io/jwt/v2 v2.5.0 // indirect - github.com/nats-io/nats-streaming-server v0.24.3 // indirect - github.com/nats-io/nkeys v0.4.7 // indirect + github.com/muesli/termenv v0.16.0 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nats-io/jwt/v2 v2.7.4 // indirect + github.com/nats-io/nats-streaming-server v0.24.6 // indirect + github.com/nats-io/nkeys v0.4.11 // indirect github.com/nats-io/nuid v1.0.1 // indirect - github.com/navidys/tvxwidgets v0.5.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/posener/complete v1.2.3 // indirect - github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/pquerna/cachecontrol v0.2.0 // indirect - github.com/prometheus/prom2json v1.3.3 // indirect - github.com/rivo/tview v0.0.0-20240204151237-861aa94d61c8 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/prom2json v1.4.2 // indirect + github.com/prometheus/prometheus v0.303.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rjeczalik/notify v0.9.3 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/safchain/ethtool v0.3.0 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/safchain/ethtool v0.5.10 // indirect + github.com/segmentio/asm v1.2.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/tklauser/go-sysconf v0.3.13 // indirect - github.com/tklauser/numcpus v0.7.0 // indirect - github.com/unrolled/secure v1.14.0 // indirect - github.com/vbauerster/mpb/v8 v8.7.2 // indirect + github.com/tklauser/go-sysconf v0.3.15 // indirect + github.com/tklauser/numcpus v0.10.0 // indirect + github.com/unrolled/secure v1.17.0 // indirect + github.com/vbauerster/mpb/v8 v8.9.3 // indirect github.com/xdg/stringprep v1.0.3 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect - go.mongodb.org/mongo-driver v1.13.1 // indirect - go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel v1.23.1 // indirect - go.opentelemetry.io/otel/metric v1.23.1 // indirect - go.opentelemetry.io/otel/trace v1.23.1 // indirect + github.com/zeebo/errs v1.4.0 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect + go.mongodb.org/mongo-driver v1.17.3 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.35.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/term v0.18.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/tools v0.18.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/grpc v1.61.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect - gopkg.in/h2non/filetype.v1 v1.0.5 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/net v0.39.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/tools v0.32.0 // indirect + google.golang.org/genproto v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f // indirect + google.golang.org/grpc v1.72.0 // indirect + google.golang.org/protobuf v1.36.6 // indirect ) diff --git a/go.sum b/go.sum index 497bd284839eb..1ef381cff6a16 100644 --- a/go.sum +++ b/go.sum @@ -1,47 +1,64 @@ aead.dev/mem v0.2.0 h1:ufgkESS9+lHV/GUjxgc2ObF43FLZGSemh+W+y27QFMI= aead.dev/mem v0.2.0/go.mod h1:4qj+sh8fjDhlvne9gm/ZaMRIX9EkmDrKOLwmyDtoMWM= aead.dev/minisign v0.2.0/go.mod h1:zdq6LdSd9TbuSxchxwhpA9zEb9YXcVGoE8JakuiGaIQ= -aead.dev/minisign v0.2.1 h1:Z+7HA9dsY/eGycYj6kpWHpcJpHtjAwGiJFvbiuO9o+M= -aead.dev/minisign v0.2.1/go.mod h1:oCOjeA8VQNEbuSCFaaUXKekOusa/mll6WtMoO5JY4M4= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/storage v1.38.0 h1:Az68ZRGlnNTpIBbLjSMIV2BDcwwXYlRlQzis0llkpJg= -cloud.google.com/go/storage v1.38.0/go.mod h1:tlUADB0mAb9BgYls9lq+8MGkfzOXuLrnHXlpHmvFJoY= -github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= -github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk= -github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= -github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= -github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= -github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= -github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= -github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +aead.dev/minisign v0.3.0 h1:8Xafzy5PEVZqYDNP60yJHARlW1eOQtsKNp/Ph2c0vRA= +aead.dev/minisign v0.3.0/go.mod h1:NLvG3Uoq3skkRMDuc3YHpWUTMTrSExqm+Ij73W13F6Y= +aead.dev/mtls v0.2.1 h1:47NHWciMvrmEhlkpnis8/RGEa9HR9gcbDPfcArG+Yqs= +aead.dev/mtls v0.2.1/go.mod h1:rZvRApIcPkCNu2AgpFoaMxKBee/XVkKs7wEuYgqLI3Q= +cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg= +cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= +cloud.google.com/go v0.120.1 h1:Z+5V7yd383+9617XDCyszmK5E4wJRJL+tquMfDj9hLM= +cloud.google.com/go v0.120.1/go.mod h1:56Vs7sf/i2jYM6ZL9NYlC82r04PThNcPS5YgFmb0rp8= +cloud.google.com/go/auth v0.16.0 h1:Pd8P1s9WkcrBE2n/PhAwKsdrR35V3Sg2II9B+ndM3CU= +cloud.google.com/go/auth v0.16.0/go.mod h1:1howDHJ5IETh/LwYs3ZxvlkXF48aSqqJUM+5o02dNOI= +cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= +cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8= +cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE= +cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc= +cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA= +cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE= +cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY= +cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM= +cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U= +cloud.google.com/go/storage v1.52.0 h1:ROpzMW/IwipKtatA69ikxibdzQSiXJrY9f6IgBa9AlA= +cloud.google.com/go/storage v1.52.0/go.mod h1:4wrBAbAYUvYkbrf19ahGm4I5kDQhESSqN3CGEkMGvOY= +cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4= +cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 h1:Gt0j3wceWMwPmiazCa8MzMA0MfhmPIz0Qp0FJ6qcM0U= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0 h1:OVoM452qUFBrX+URdH3VpR299ma4kfom0yB0URYky9g= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.9.0/go.mod h1:kUjrAo8bgEwLeZ/CmHqNl3Z/kPm7y6FKfxxK0izYUg4= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY= +github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo= +github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 h1:mFRzDkZVAjdal+s7s0MwaRv9igoPqLRdzOLzw/8Xvq8= github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358/go.mod h1:chxPXzSsl7ZWRAuOIE23GDNzjWuZquvFlgA8xmpunjU= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM= +github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs= +github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/IBM/sarama v1.42.2 h1:VoY4hVIZ+WQJ8G9KNY/SQlWguBQXQ9uvFPOnrcu8hEw= -github.com/IBM/sarama v1.42.2/go.mod h1:FLPGUGwYqEs62hq2bVG6Io2+5n+pS6s/WOXVKWSLFtE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0= +github.com/IBM/sarama v1.45.1 h1:nY30XqYpqyXOXSNoe2XCgjj9jklGM1Ye94ierUb1jQ0= +github.com/IBM/sarama v1.45.1/go.mod h1:qifDhA3VWSrQ1TjSMyxDl3nYL3oX2C83u+G6L79sq4w= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= @@ -49,253 +66,230 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6 github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= github.com/alecthomas/participle v0.7.1/go.mod h1:HfdmEuwvr12HXQN44HPWXR0lHmVolVYe4dyL6lQ3duY= github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= -github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7VVbI0o4wBRNQIgn917usHWOd6VAffYI= +github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4= +github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op h1:+OSa/t11TFhqfrX0EOSqQBDJ0YlpmK0rDSiB19dg9M0= +github.com/antithesishq/antithesis-sdk-go v0.4.3-default-no-op/go.mod h1:IUpT2DPAKh6i/YhSbt6Gl3v2yvUZjmKncl7U91fup7E= github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= -github.com/apache/thrift v0.19.0 h1:sOqkWPzMj7w6XaYbJQG7m4sGqVolaW/0D28Ln7yPzMk= -github.com/apache/thrift v0.19.0/go.mod h1:SUALL216IiaOw2Oy+5Vs9lboJ/t9g40C+G07Dc0QC1I= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q= -github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= -github.com/bcicen/jstream v1.0.1 h1:BXY7Cu4rdmc0rhyTVyT3UkxAiX3bnLpKLas9btbH5ck= -github.com/bcicen/jstream v1.0.1/go.mod h1:9ielPxqFry7Y4Tg3j4BfjPocfJ3TbsRtXOAYXYmRuAQ= -github.com/beevik/ntp v1.3.1 h1:Y/srlT8L1yQr58kyPWFPZIxRL8ttx2SRIpVYJqZIlAM= -github.com/beevik/ntp v1.3.1/go.mod h1:fT6PylBq86Tsq23ZMEe47b7QQrZfYBFPnpzt0a9kJxw= +github.com/aymanbagabas/go-udiff v0.2.0 h1:TK0fH4MteXUDspT88n8CKzvK0X9O2xu9yQjWpi6yML8= +github.com/aymanbagabas/go-udiff v0.2.0/go.mod h1:RE4Ex0qsGkTAJoQdQQCA0uG+nAzJO/pI/QwceO5fgrA= +github.com/beevik/ntp v1.4.3 h1:PlbTvE5NNy4QHmA4Mg57n7mcFTmr1W1j3gcK7L1lqho= +github.com/beevik/ntp v1.4.3/go.mod h1:Unr8Zg+2dRn7d8bHFuehIMSvvUYssHMxW3Q5Nx4RW5Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= -github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw= -github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM= -github.com/charmbracelet/bubbletea v0.25.0/go.mod h1:EN3QDR1T5ZdWmdfDzYcqOCAps45+QIJbLOBxmVNWNNg= -github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg= -github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charmbracelet/bubbles v0.20.0 h1:jSZu6qD8cRQ6k9OMfR1WlM+ruM8fkPWkHvQWD9LIutE= +github.com/charmbracelet/bubbles v0.20.0/go.mod h1:39slydyswPy+uVOHZ5x/GjwVAFkCsV8IIVy+4MhzwwU= +github.com/charmbracelet/bubbletea v1.3.4 h1:kCg7B+jSCFPLYRA52SDZjr51kG/fMUEoPoZrkaDHyoI= +github.com/charmbracelet/bubbletea v1.3.4/go.mod h1:dtcUCyCGEX3g9tosuYiut3MXgY/Jsv9nKVdibKKRRXo= +github.com/charmbracelet/lipgloss v1.0.0 h1:O7VkGDvqEdGi93X+DeqsQ7PKHDgtQfF8j8/O2qFMQNg= +github.com/charmbracelet/lipgloss v1.0.0/go.mod h1:U5fy9Z+C38obMs+T+tJqst9VGzlOYGj4ri9reL3qUlo= +github.com/charmbracelet/x/ansi v0.8.0 h1:9GTq3xq9caJW8ZrBTe0LIe2fvfLR/bYXKTx2llXn7xE= +github.com/charmbracelet/x/ansi v0.8.0/go.mod h1:wdYl/ONOLHLIVmQaxbIYEC/cRKOQyjTkowiI4blgS9Q= +github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b h1:MnAMdlwSltxJyULnrYbkZpp4k58Co7Tah3ciKhSNo0Q= +github.com/charmbracelet/x/exp/golden v0.0.0-20240815200342-61de596daa2b/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= +github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ= +github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/coredns/coredns v1.11.1 h1:IYBM+j/Xx3nTV4HE1s626G9msmJZSdKL9k0ZagYcZFQ= -github.com/coredns/coredns v1.11.1/go.mod h1:X0ac9RLzd/WAxKuEe3A52miPSm6XjfoxVNAjEQgjphk= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f h1:C5bqEmzEPLsHm9Mv73lSE9e9bKV23aB1vxOsmZrkl3k= +github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= -github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-oidc/v3 v3.14.1 h1:9ePWwfdwC4QKRlCXsJGou56adA/owXczOzwKdOumLqk= +github.com/coreos/go-oidc/v3 v3.14.1/go.mod h1:HaZ3szPaZ0e4r6ebqvsLWlk2Tn+aejfmrfah6hnSYEU= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cosnicolaou/pbzip2 v1.0.3 h1:CebGEQSYOg9uFDfTgIXNDI3cuaQovlnBUcdB614dQws= -github.com/cosnicolaou/pbzip2 v1.0.3/go.mod h1:uCNfm0iE2wIKGRlLyq31M4toziFprNhEnvueGmh5u3M= +github.com/cosnicolaou/pbzip2 v1.0.5 h1:+PZ8yRBx6bRXncOJWQvEThyFm8XhF9Yb6WUMN6KsgrA= +github.com/cosnicolaou/pbzip2 v1.0.5/go.mod h1:uCNfm0iE2wIKGRlLyq31M4toziFprNhEnvueGmh5u3M= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/siphash v1.2.3 h1:QXwFc8cFOR2dSa/gE6o/HokBMWtLUaNDVd+22aKHeEA= github.com/dchest/siphash v1.2.3/go.mod h1:0NvQU092bT0ipiFN++/rXm69QG9tVxLAlQHIXMPAkHc= -github.com/decred/dcrd/crypto/blake256 v1.0.1/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/eapache/go-resiliency v1.5.0 h1:dRsaR00whmQD+SgVKlq/vCRFNgtEb5yppyeVos3Yce0= -github.com/eapache/go-resiliency v1.5.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= -github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= +github.com/eclipse/paho.mqtt.golang v1.5.0 h1:EH+bUVJNgttidWFkLLVKaQPGmkTUfQQqjOsyvMGvD6o= +github.com/eclipse/paho.mqtt.golang v1.5.0/go.mod h1:du/2qNQVqJf/Sqs4MEL77kR8QTqANF7XU7Fk0aOTAgk= github.com/elastic/go-elasticsearch/v7 v7.17.10 h1:TCQ8i4PmIJuBunvBS6bwT2ybzVFxxUhhltAs3Gyu1yo= github.com/elastic/go-elasticsearch/v7 v7.17.10/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= +github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M= +github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA= +github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A= +github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= +github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= +github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= +github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= +github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= -github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= -github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fraugster/parquet-go v0.12.0 h1:1slnC5y2VWEOUSlzbeXatM0BvSWcLUDsR/EcZsXXCZc= github.com/fraugster/parquet-go v0.12.0/go.mod h1:dGzUxdNqXsAijatByVgbAWVPlFirnhknQbdazcUIjY0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell/v2 v2.7.0 h1:I5LiGTQuwrysAt1KS9wg1yFfOI3arI3ucFrxtd/xqaA= -github.com/gdamore/tcell/v2 v2.7.0/go.mod h1:hl/KtAANGBecfIPxk+FzKvThTqI84oplgbPEmVX60b8= -github.com/go-asn1-ber/asn1-ber v1.5.5 h1:MNHlNMBDgEKD4TcKr36vQN68BA00aDfjIt3/bD50WnA= -github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-ldap/ldap/v3 v3.4.6 h1:ert95MdbiG7aWo/oPYp9btL3KJlMPKnP58r09rI8T+A= -github.com/go-ldap/ldap/v3 v3.4.6/go.mod h1:IGMQANNtxpsOzj7uUAMjpGBaOVTC4DYyIy8VsTdxmtc= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667 h1:BP4M0CvQ4S3TGls2FvczZtj5Re/2ZzkV9VwqPHH/3Bo= +github.com/go-asn1-ber/asn1-ber v1.5.8-0.20250403174932-29230038a667/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY= +github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw= +github.com/go-ldap/ldap/v3 v3.4.11 h1:4k0Yxweg+a3OyBLjdYn5OKglv18JNvfDykSoI8bW0gU= +github.com/go-ldap/ldap/v3 v3.4.11/go.mod h1:bY7t0FLK8OAVpp/vV6sSlpz3EQDGcQwc8pF0ujLgKvM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= -github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= -github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= -github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= -github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= -github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= -github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= -github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= -github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= -github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU= +github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= -github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= +github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.3.2 h1:zlnbNHxumkRvfPWgfXu8RBwyNR1x8wh9cf5PTOCqs9Q= -github.com/gobwas/ws v1.3.2/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= -github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs= +github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= +github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.9 h1:Sl3u+2BI/kk+VEatbj0scLdrFhjPmbxOc1myhDP41ws= -github.com/gomodule/redigo v1.8.9/go.mod h1:7ArFNvsTjH8GMMzB4uy1snslv2BwmginuMs06a1uzZE= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs= +github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/gomodule/redigo v1.9.2 h1:HrutZBLhSIU8abiSfW8pj8mPhOyMYjZT/wcA4/L9L9s= +github.com/gomodule/redigo v1.9.2/go.mod h1:KsU3hiK/Ay8U42qpaJk+kuNa3C+spxapWpM+ywhcgtw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/go-tpm v0.9.3 h1:+yx0/anQuGzi+ssRqeD6WpXjW2L/V0dItUayO0i9sRc= +github.com/google/go-tpm v0.9.3/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= -github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= -github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20250422154841-e1f9c1950416 h1:1/qwHx8P72glDXdyCKesJ+/c40x71SY4q2avOxJ2iYQ= +github.com/google/pprof v0.0.0-20250422154841-e1f9c1950416/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= +github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4= +github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA= +github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q= +github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM= -github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= @@ -314,9 +308,9 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= -github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= -github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/hashicorp/raft v1.3.9 h1:9yuo1aR0bFTr1cw7pj3S2Bk6MhJCsnr2NAxvIBrP2x4= +github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -332,87 +326,82 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/jedib0t/go-pretty/v6 v6.5.4 h1:gOGo0613MoqUcf0xCj+h/V3sHDaZasfv152G6/5l91s= -github.com/jedib0t/go-pretty/v6 v6.5.4/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg= -github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jedib0t/go-pretty/v6 v6.6.7 h1:m+LbHpm0aIAPLzLbMfn8dc3Ht8MW7lsSO4MPItz/Uuo= +github.com/jedib0t/go-pretty/v6 v6.6.7/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU= +github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= +github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= -github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/klauspost/filepathx v1.1.1 h1:201zvAsL1PhZvmXTP+QLer3AavWrO3U1NILWpniHK4w= github.com/klauspost/filepathx v1.1.1/go.mod h1:XWxdp8rEw4gupPBrxrV5Q57dL/71xj0OgV1gKt2zTfU= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/readahead v1.4.0 h1:w4hQ3BpdLjBnRQkZyNi+nwdHU7eGP9buTexWK9lU7gY= github.com/klauspost/readahead v1.4.0/go.mod h1:7bolpMKhT5LKskLwYXGSDOyA2TYtMFgdgV0Y8gy7QhA= -github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q= -github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs= +github.com/klauspost/reedsolomon v1.12.4 h1:5aDr3ZGoJbgu/8+j45KtUJxzYm8k08JGtB9Wx1VQ4OA= +github.com/klauspost/reedsolomon v1.12.4/go.mod h1:d3CzOMOt0JXGIFZm1StgkyF14EYr3xneR2rNWo7NcMU= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lestrrat-go/backoff/v2 v2.0.8 h1:oNb5E5isby2kiro9AgdHLv5N5tint1AnDVVf2E2un5A= -github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= github.com/lestrrat-go/blackmagic v1.0.2 h1:Cg2gVSc9h7sz9NOByczrbUvLopQmXrfFx//N+AkAr5k= github.com/lestrrat-go/blackmagic v1.0.2/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc v1.0.6 h1:qgmgIRhpvBqexMJjA/PmwSvhNk679oqD1RbovdCGW8k= +github.com/lestrrat-go/httprc v1.0.6/go.mod h1:mwwz3JMTPBjHUkkDv/IGJ39aALInZLrhBp0X7KGUZlo= github.com/lestrrat-go/iter v1.0.2 h1:gMXo1q4c2pHmC3dn8LzRhJfP1ceCbgSiT9lUydIzltI= github.com/lestrrat-go/iter v1.0.2/go.mod h1:Momfcq3AnRlRjI5b5O8/G5/BvpzrhoFTZcn06fEOPt4= -github.com/lestrrat-go/jwx v1.2.29 h1:QT0utmUJ4/12rmsVQrJ3u55bycPkKqGYuGT4tyRhxSQ= -github.com/lestrrat-go/jwx v1.2.29/go.mod h1:hU8k2l6WF0ncx20uQdOmik/Gjg6E3/wIRtXSNFeZuB8= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/jwx/v2 v2.1.4 h1:uBCMmJX8oRZStmKuMMOFb0Yh9xmEMgNJLgjuKKt4/qc= +github.com/lestrrat-go/jwx/v2 v2.1.4/go.mod h1:nWRbDFR1ALG2Z6GJbBXzfQaYyvn751KuuyySN2yR6is= github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU= github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= -github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= +github.com/lithammer/shortuuid/v4 v4.2.0 h1:LMFOzVB3996a7b8aBuEXxqOBflbfPQAiVzkIcHO0h8c= +github.com/lithammer/shortuuid/v4 v4.2.0/go.mod h1:D5noHZ2oFw/YaKCfGy0YxyE7M0wMbezmMjPdhyEFe6Y= github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed h1:036IscGBfJsFIgJQzlui7nK1Ncm0tp2ktmPj8xO4N/0= -github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 h1:PpXWgLPs+Fqr325bN2FD2ISlRRztXibcX6e8f5FR5Dc= +github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35/go.mod h1:autxFIvghDt3jPTLoqZ9OZ7s9qTGNAWmYCjVFWPX/zg= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= -github.com/mattn/go-ieproxy v0.0.11 h1:MQ/5BuGSgDAHZOJe6YY80IF2UVCfGkwfo6AeD7HtHYo= -github.com/mattn/go-ieproxy v0.0.11/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0= +github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE= +github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8= +github.com/mattn/go-ieproxy v0.0.12 h1:OZkUFJC3ESNZPQ+6LzC3VJIFSnreeFLQyqvBWtvfL2M= +github.com/mattn/go-ieproxy v0.0.12/go.mod h1:Vn+N61199DAnVeTgaF8eoB9PvLO8P3OBnG95ENh7B7c= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= @@ -421,59 +410,62 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= -github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= +github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc= +github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck= github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg= github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= -github.com/minio/colorjson v1.0.6 h1:m7TUvpvt0u7FBmVIEQNIa0T4NBQlxrcMBp4wJKsg2Ik= -github.com/minio/colorjson v1.0.6/go.mod h1:LUXwS5ZGNb6Eh9f+t+3uJiowD3XsIWtsvTriUBeqgYs= -github.com/minio/console v1.0.0 h1:FelvkcllBALpBsjuaFx4ZOqxGb4ylUq3vcUoRxoYCrs= -github.com/minio/console v1.0.0/go.mod h1:VvJdNfELVCc2VELF1rJtIPCb3FcWGtNfM/Ktvnu2MZ0= +github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg= +github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w= +github.com/minio/console v1.7.7-0.20250905210349-2017f33b26e1 h1:jOW1ggtITn8sreTzUjcdYE/ZffxeVmWstXNlBLOE6j4= +github.com/minio/console v1.7.7-0.20250905210349-2017f33b26e1/go.mod h1:hKNkzdKBKU84w5wXqMnkH74QocJGHW2zjvFtuGETDsc= +github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY= +github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg= github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU= github.com/minio/csvparser v1.0.0/go.mod h1:lKXskSLzPgC5WQyzP7maKH7Sl1cqvANXo9YCto8zbtM= github.com/minio/dnscache v0.1.1 h1:AMYLqomzskpORiUA1ciN9k7bZT1oB3YZN4cEIi88W5o= github.com/minio/dnscache v0.1.1/go.mod h1:WCumm6offO4rQ/82oTCSoHnlhTmc81+vXgKpBtSYRbg= -github.com/minio/dperf v0.5.3 h1:D58ZrMfxrRw83EvAhr4FggvRT0DwWXsWrvsM8Xne+EM= -github.com/minio/dperf v0.5.3/go.mod h1:WrI7asRe/kv5zmnZ4XwHY74PV8OyUN+efeKINRgk5UI= +github.com/minio/dperf v0.6.3 h1:+UzGe64Xmb/sXFBH38CCnXJiMEoQKURHydc+baGkysI= +github.com/minio/dperf v0.6.3/go.mod h1:+3BJsm3Jrb1yTRmkoeKCNootOmULPiLoAu+qBP7MaMk= github.com/minio/filepath v1.0.0 h1:fvkJu1+6X+ECRA6G3+JJETj4QeAYO9sV43I79H8ubDY= github.com/minio/filepath v1.0.0/go.mod h1:/nRZA2ldl5z6jT9/KQuvZcQlxZIMQoFFQPvEXx9T/Bw= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/kms-go/kes v0.3.0 h1:SU8VGVM/Hk9w1OiSby3OatkcojooUqIdDHl6dtM6NkY= -github.com/minio/kms-go/kes v0.3.0/go.mod h1:w6DeVT878qEOU3nUrYVy1WOT5H1Ig9hbDIh698NYJKY= -github.com/minio/madmin-go/v3 v3.0.50-0.20240307080957-112c599cb563 h1:Sewy11CUNITOocZ2KUfW0l2zm0wNp+d8tb22ivztGRg= -github.com/minio/madmin-go/v3 v3.0.50-0.20240307080957-112c599cb563/go.mod h1:ZDF7kf5fhmxLhbGTqyq5efs4ao0v4eWf7nOuef/ljJs= -github.com/minio/mc v0.0.0-20240209221824-669cb0a9a475 h1:yfLzMougcV2xkVlWgwYwVRoT8pnXrcCV4oOQW+pI2EQ= -github.com/minio/mc v0.0.0-20240209221824-669cb0a9a475/go.mod h1:MmDLdb7NWd/OYhcKcXKvwErq2GNa/Zq6xtTWuhdC4II= +github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q= +github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ= +github.com/minio/kms-go/kes v0.3.1 h1:K3sPFAvFbJx33XlCTUBnQo8JRmSZyDvT6T2/MQ2iC3A= +github.com/minio/kms-go/kes v0.3.1/go.mod h1:Q9Ct0KUAuN9dH0hSVa0eva45Jg99cahbZpPxeqR9rOQ= +github.com/minio/kms-go/kms v0.5.1-0.20250225090116-4e64ce8d0f35 h1:ISNz42SPD+heeHhpl9bwMRRusPTCsbYKd1YoED265E0= +github.com/minio/kms-go/kms v0.5.1-0.20250225090116-4e64ce8d0f35/go.mod h1:JFQu2srrnWxMn6KcwS5347oTwNKW7nkewgBlrodjF9k= +github.com/minio/madmin-go/v3 v3.0.109 h1:hRHlJ6yaIB3tlIj5mz9L9mGcyLC37S9qL1WtFrRtyQ0= +github.com/minio/madmin-go/v3 v3.0.109/go.mod h1:WOe2kYmYl1OIlY2DSRHVQ8j1v4OItARQ6jGyQqcCud8= +github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9 h1:6RyInOHKL6jz8zxcAar/h6rg/aJCxDP/uFuSNvYSuMI= +github.com/minio/mc v0.0.0-20250313080218-cf909e1063a9/go.mod h1:h5UQZ+5Qfq6XV81E4iZSgStPZ6Hy+gMuHMkLkjq4Gys= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= -github.com/minio/minio-go/v7 v7.0.68 h1:hTqSIfLlpXaKuNy4baAp4Jjy2sqZEN9hRxD0M4aOfrQ= -github.com/minio/minio-go/v7 v7.0.68/go.mod h1:XAvOPJQ5Xlzk5o3o/ArO2NMbhSGkimC+bpW/ngRKDmQ= -github.com/minio/mux v1.9.0 h1:dWafQFyEfGhJvK6AwLOt83bIG5bxKxKJnKMCi0XAaoA= -github.com/minio/mux v1.9.0/go.mod h1:1pAare17ZRL5GpmNL+9YmqHoWnLmMZF9C/ioUCfy0BQ= -github.com/minio/pkg/v2 v2.0.9-0.20240209124402-7990a27fd79d h1:xGtyFgqwGy7Lc/i5udOKKeqsyRpQPlKQY2Pf4RiUDtk= -github.com/minio/pkg/v2 v2.0.9-0.20240209124402-7990a27fd79d/go.mod h1:yayUTo82b0RK+e97hGb1naC787mOtUEyDs3SIcwSyHI= +github.com/minio/minio-go/v7 v7.0.91 h1:tWLZnEfo3OZl5PoXQwcwTAPNNrjyWwOh6cbZitW5JQc= +github.com/minio/minio-go/v7 v7.0.91/go.mod h1:uvMUcGrpgeSAAI6+sD3818508nUyMULw94j2Nxku/Go= +github.com/minio/mux v1.9.2 h1:dQchne49BUBgOlxIHjx5wVe1gl5VXF2sxd4YCXkikTw= +github.com/minio/mux v1.9.2/go.mod h1:OuHAsZsux+e562bcO2P3Zv/P0LMo6fPQ310SmoyG7mQ= +github.com/minio/pkg/v3 v3.1.3 h1:6iBVcTPq7z29suUROciYUBpvLxfzDV3/+Ls0RFDOta8= +github.com/minio/pkg/v3 v3.1.3/go.mod h1:XIUU35+I9lWuTuMf94pwnQjvli6nZfRND6TjZGgqSEE= github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU= github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/minio/simdjson-go v0.4.5 h1:r4IQwjRGmWCQ2VeMc7fGiilu1z5du0gJ/I/FsKwgo5A= github.com/minio/simdjson-go v0.4.5/go.mod h1:eoNz0DcLQRyEDeaPr4Ru6JpjlZPzbA0IodxVJk8lO8E= -github.com/minio/sio v0.3.1 h1:d59r5RTHb1OsQaSl1EaTWurzMMDRLA5fgNmjzD4eVu4= -github.com/minio/sio v0.3.1/go.mod h1:S0ovgVgc+sTlQyhiXA1ppBLv7REM7TYi5yyq2qL/Y6o= +github.com/minio/sio v0.4.1 h1:EMe3YBC1nf+sRQia65Rutxi+Z554XPV0dt8BIBA+a/0= +github.com/minio/sio v0.4.1/go.mod h1:oBSjJeGbBdRMZZwna07sX9EFzZy+ywu5aofRiV1g79I= github.com/minio/websocket v1.6.0 h1:CPvnQvNvlVaQmvw5gtJNyYQhg4+xRmrPNhBbv8BdpAE= github.com/minio/websocket v1.6.0/go.mod h1:COH1CePZfHT9Ec1O7vZjTlX5uEPpyYnrifPNbu665DM= github.com/minio/xxml v0.0.3 h1:ZIpPQpfyG5uZQnqqC0LZuWtPk/WT8G/qkxvO6jb7zMU= github.com/minio/xxml v0.0.3/go.mod h1:wcXErosl6IezQIMEWSK/LYC2VS7LJ1dAkgvuyIN3aH4= -github.com/minio/zipindex v0.3.0 h1:9alMPe9K5X19zCqC7bJfLya5Opy5FjcYVlkuZom0MX0= -github.com/minio/zipindex v0.3.0/go.mod h1:uCv+DULcGHWzRN/3+Vary0KVVan0aVS2NqDi6KyIRMo= +github.com/minio/zipindex v0.4.0 h1:NFPp7OscsUm5Y91+2tJ9Hr4jEG2R20xaz2Wd0ac7uJQ= +github.com/minio/zipindex v0.4.0/go.mod h1:3xib1QhqfYkkxofF881t/50FQMHFH2XvYGyPrd4N948= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -482,45 +474,40 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE= -github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI= github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo= github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA= github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo= github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= -github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= -github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= -github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= -github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc= -github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= -github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= -github.com/nats-io/nats-streaming-server v0.24.3 h1:uZez8jBkXscua++jaDsK7DhpSAkizdetar6yWbPMRco= -github.com/nats-io/nats-streaming-server v0.24.3/go.mod h1:rqWfyCbxlhKj//fAp8POdQzeADwqkVhZcoWlbhkuU5w= +github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= +github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/jwt/v2 v2.7.4 h1:jXFuDDxs/GQjGDZGhNgH4tXzSUK6WQi2rsj4xmsNOtI= +github.com/nats-io/jwt/v2 v2.7.4/go.mod h1:me11pOkwObtcBNR8AiMrUbtVOUGkqYjMQZ6jnSdVUIA= +github.com/nats-io/nats-server/v2 v2.8.2/go.mod h1:vIdpKz3OG+DCg4q/xVPdXHoztEyKDWRtykQ4N7hd7C4= +github.com/nats-io/nats-server/v2 v2.11.1 h1:LwdauqMqMNhTxTN3+WFTX6wGDOKntHljgZ+7gL5HCnk= +github.com/nats-io/nats-server/v2 v2.11.1/go.mod h1:leXySghbdtXSUmWem8K9McnJ6xbJOb0t9+NQ5HTRZjI= +github.com/nats-io/nats-streaming-server v0.24.6 h1:iIZXuPSznnYkiy0P3L0AP9zEN9Etp+tITbbX1KKeq4Q= +github.com/nats-io/nats-streaming-server v0.24.6/go.mod h1:tdKXltY3XLeBJ21sHiZiaPl+j8sK3vcCKBWVyxeQs10= github.com/nats-io/nats.go v1.13.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= -github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.14.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= -github.com/nats-io/nats.go v1.32.0 h1:Bx9BZS+aXYlxW08k8Gd3yR2s73pV5XSoAQUyp1Kwvp0= -github.com/nats-io/nats.go v1.32.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nats.go v1.41.2 h1:5UkfLAtu/036s99AhFRlyNDI1Ieylb36qbGjJzHixos= +github.com/nats-io/nats.go v1.41.2/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= -github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= -github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0= +github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/stan.go v0.10.2/go.mod h1:vo2ax8K2IxaR3JtEMLZRFKIdoK/3o1/PKueapB7ezX0= github.com/nats-io/stan.go v0.10.4 h1:19GS/eD1SeQJaVkeM9EkvEYattnvnWrZ3wkSWSw4uXw= github.com/nats-io/stan.go v0.10.4/go.mod h1:3XJXH8GagrGqajoO/9+HgPyKV5MWsv7S5ccdda+pc6k= -github.com/navidys/tvxwidgets v0.5.0 h1:yMUjQQnEIVcVRiefk/8cqmBtss6W7mcgxKMBaWPrVuM= -github.com/navidys/tvxwidgets v0.5.0/go.mod h1:GfJi01j3qlVRQD6fg7IL08lOrM4PIznB58Q6aXJS5R4= github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4= github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk= github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= @@ -529,97 +516,82 @@ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= -github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= -github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM= +github.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= -github.com/pkg/xattr v0.4.9 h1:5883YPCtkSd8LFbs13nXplj9g9tlrwoJRjgpgMu1/fE= -github.com/pkg/xattr v0.4.9/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= +github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= +github.com/pkg/xattr v0.4.10 h1:Qe0mtiNFHQZ296vRgUjRCoPHPqH7VdTOrZx3g0T+pGA= +github.com/pkg/xattr v0.4.10/go.mod h1:di8WF84zAKk8jzR1UBTEWh9AUlIZZ7M/JNt8e9B6ktU= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/pquerna/cachecontrol v0.2.0 h1:vBXSNuE5MYP9IJ5kjsdo8uq+w41jSPgvba2DEnkRx9k= -github.com/pquerna/cachecontrol v0.2.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= -github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.46.0 h1:doXzt5ybi1HBKpsZOL0sSkaNHJJqkyfEWZGGqqScV0Y= -github.com/prometheus/common v0.46.0/go.mod h1:Tp0qkxpb9Jsg54QMe+EAmqXkSV7Evdy1BTn+g2pa/hQ= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.63.0 h1:YR/EIY1o3mEFP/kZCD7iDMnLPlGyuU2Gb3HIcXnA98k= +github.com/prometheus/common v0.63.0/go.mod h1:VVFF/fBIoToEnWRVkYoXEkq3R3paCoxG9PXP74SnV18= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= -github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= -github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo= -github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc= -github.com/rabbitmq/amqp091-go v1.9.0 h1:qrQtyzB4H8BQgEuJwhmVQqVHB9O4+MNDJCCAcpc3Aoo= -github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rivo/tview v0.0.0-20240204151237-861aa94d61c8 h1:aW0ILZ0lkphO/2mUWocSfP1iebWtSFcxL8BiSNR+/8g= -github.com/rivo/tview v0.0.0-20240204151237-861aa94d61c8/go.mod h1:sGSvhfWFNS7FpYxS8K+e22OTOI3UsB5rDs0nRtoZkpA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= +github.com/prometheus/prom2json v1.4.2 h1:PxCTM+Whqi/eykO1MKsEL0p/zMpxp9ybpsmdFamw6po= +github.com/prometheus/prom2json v1.4.2/go.mod h1:zuvPm7u3epZSbXPWHny6G+o8ETgu6eAK3oPr6yFkRWE= +github.com/prometheus/prometheus v0.303.0 h1:wsNNsbd4EycMCphYnTmNY9JASBVbp7NWwJna857cGpA= +github.com/prometheus/prometheus v0.303.0/go.mod h1:8PMRi+Fk1WzopMDeb0/6hbNs9nV6zgySkU/zds5Lu3o= +github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg= +github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9 h1:bsUq1dX0N8AOIL7EB/X911+m4EHsnWEHeJ0c+3TTBrg= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/redis/go-redis/v9 v9.7.3 h1:YpPyAayJV+XErNsatSElgRZZVCwXX9QzkKYNvO7x0wM= +github.com/redis/go-redis/v9 v9.7.3/go.mod h1:bGUrSggJ9X9GUmZpZNEOQKaANxSGgOEBRltRTZHSvrA= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rjeczalik/notify v0.9.3 h1:6rJAzHTGKXGj76sbRgDiDcYj/HniypXmSJo1SWakZeY= github.com/rjeczalik/notify v0.9.3/go.mod h1:gF3zSOrafR9DQEWSE8TjfI9NkooDxbyT4UgRGKZA0lc= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= -github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/safchain/ethtool v0.5.10 h1:Im294gZtuf4pSGJRAOGKaASNi3wMeFaGaWuSaomedpc= +github.com/safchain/ethtool v0.5.10/go.mod h1:w9jh2Lx7YBR4UwzLkzCmWl85UY0W2uZdd7/DckVE5+c= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc= github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs= -github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= -github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= +github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= +github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -630,11 +602,12 @@ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb6 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spiffe/go-spiffe/v2 v2.5.0 h1:N2I01KCUkv1FAjZXJMwh95KK1ZIQLYbPfhaxw8WS0hE= +github.com/spiffe/go-spiffe/v2 v2.5.0/go.mod h1:P+NxobPc6wXhVtINNtFjNWGBTreew1GBUCwT2wPmb7g= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -643,88 +616,85 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= -github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw= -github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU= -github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k= -github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= -github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4= -github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0= -github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= -github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4= -github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY= +github.com/tinylib/msgp v1.4.0 h1:SYOeDRiydzOw9kSiwdYp9UcBgPFtLU2WDHaJXyHruf8= +github.com/tinylib/msgp v1.4.0/go.mod h1:cvjFkb4RiC8qSBOPMGPSzSAx47nAsfhLVTCZZNuHv5o= +github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= +github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= +github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= +github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/unrolled/secure v1.14.0 h1:u9vJTU/pR4Bny0ntLUMxdfLtmIRGvQf2sEFuA0TG9AE= -github.com/unrolled/secure v1.14.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= +github.com/unrolled/secure v1.17.0 h1:Io7ifFgo99Bnh0J7+Q+qcMzWM6kaDPCA5FroFZEdbWU= +github.com/unrolled/secure v1.17.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14= -github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= +github.com/vbauerster/mpb/v8 v8.9.3 h1:PnMeF+sMvYv9u23l6DO6Q3+Mdj408mjLRXIzmUmU2Z8= +github.com/vbauerster/mpb/v8 v8.9.3/go.mod h1:hxS8Hz4C6ijnppDSIX6LjG8FYJSoPo9iIOcE53Zik0c= github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.12 h1:W4sw5ZoU2Juc9gBWuLk5U6fHfNVyY1WC5g9uiXZio/c= -go.etcd.io/etcd/api/v3 v3.5.12/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= -go.etcd.io/etcd/client/pkg/v3 v3.5.12 h1:EYDL6pWwyOsylrQyLp2w+HkQ46ATiOvoEdMarindU2A= -go.etcd.io/etcd/client/pkg/v3 v3.5.12/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= -go.etcd.io/etcd/client/v3 v3.5.12 h1:v5lCPXn1pf1Uu3M4laUE2hp/geOTc5uPcYYsNe1lDxg= -go.etcd.io/etcd/client/v3 v3.5.12/go.mod h1:tSbBCakoWmmddL+BKVAJHa9km+O/E+bumDe9mSbPiqw= -go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= -go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/otel v1.23.1 h1:Za4UzOqJYS+MUczKI320AtqZHZb7EqxO00jAHE0jmQY= -go.opentelemetry.io/otel v1.23.1/go.mod h1:Td0134eafDLcTS4y+zQ26GE8u3dEuRBiBCTUIRHaikA= -go.opentelemetry.io/otel/metric v1.23.1 h1:PQJmqJ9u2QaJLBOELl1cxIdPcpbwzbkjfEyelTl2rlo= -go.opentelemetry.io/otel/metric v1.23.1/go.mod h1:mpG2QPlAfnK8yNhNJAxDZruU9Y1/HubbC+KyH8FaCWI= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.1 h1:4LrmmEd8AU2rFvU1zegmvqW7+kWarxtNOPyeL6HmYY8= -go.opentelemetry.io/otel/trace v1.23.1/go.mod h1:4IpnpJFwr1mo/6HL8XIPJaE9y0+u1KcVmuW7dwFSVrI= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= +go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ= +go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA= +go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= goftp.io/server/v2 v2.0.1 h1:H+9UbCX2N206ePDSVNCjBftOKOgil6kQ5RAQNx5hJwE= goftp.io/server/v2 v2.0.1/go.mod h1:7+H/EIq7tXdfo1Muu5p+l3oQ6rYkDZ8lY7IM5d5kVdQ= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -732,66 +702,51 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201016220609-9e8e0b390897/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220307211146-efcb8507fb70/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3 h1:/RIbNt/Zr7rVhIkQhooTxCxFcdWLGIKnZA4IXNFSrvo= -golang.org/x/exp v0.0.0-20240205201215-2c58cdc269a3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -800,12 +755,13 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -816,8 +772,6 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -825,134 +779,101 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU= +golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.164.0 h1:of5G3oE2WRMVb2yoWKME4ZP8y8zpUKC6bMhxDr8ifyk= -google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 h1:g/4bk7P6TPMkAUbUhquq98xey1slwvuVJPosdBqYJlU= -google.golang.org/genproto v0.0.0-20240205150955-31a09d347014/go.mod h1:xEgQu1e4stdSSsxPDK8Azkrk/ECl5HvdPf6nbZrTS5M= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A= -google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.0 h1:TOvOcuXn30kRao+gfcvsebNEa5iZIiLkisYEkf7R7o0= -google.golang.org/grpc v1.61.0/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/api v0.230.0 h1:2u1hni3E+UXAXrONrrkfWpi/V6cyKVAbfGVeGtC3OxM= +google.golang.org/api v0.230.0/go.mod h1:aqvtoMk7YkiXx+6U12arQFExiRV9D/ekvMCwCd/TksQ= +google.golang.org/genproto v0.0.0-20250422160041-2d3770c4ea7f h1:iZiXS7qm4saaCcdK7S/i1Qx9ZHO2oa16HQqwYc1tPKY= +google.golang.org/genproto v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:Cej/8iHf9mPl71o/a+R1rrvSFrAAVCUFX9s/sbNttBc= +google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f h1:tjZsroqekhC63+WMqzmWyW5Twj/ZfR5HAlpd5YQ1Vs0= +google.golang.org/genproto/googleapis/api v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:Cd8IzgPo5Akum2c9R6FsXNaZbH3Jpa2gpHlW89FqlyQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f h1:N/PrbTw4kdkqNRzVfWPrBekzLuarFREcbFOiOLkXon4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250422160041-2d3770c4ea7f/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.72.0 h1:S7UkcVa60b5AAQTaO6ZKamFp1zMZSU0fGDK2WZLbBnM= +google.golang.org/grpc v1.72.0/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/h2non/filetype.v1 v1.0.5 h1:CC1jjJjoEhNVbMhXYalmGBhOBK2V70Q1N850wt/98/Y= -gopkg.in/h2non/filetype.v1 v1.0.5/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo= gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/helm-releases/minio-5.2.0.tgz b/helm-releases/minio-5.2.0.tgz new file mode 100644 index 0000000000000..92d60a609d458 Binary files /dev/null and b/helm-releases/minio-5.2.0.tgz differ diff --git a/helm-releases/minio-5.3.0.tgz b/helm-releases/minio-5.3.0.tgz new file mode 100644 index 0000000000000..cac2baa52e844 Binary files /dev/null and b/helm-releases/minio-5.3.0.tgz differ diff --git a/helm-releases/minio-5.4.0.tgz b/helm-releases/minio-5.4.0.tgz new file mode 100644 index 0000000000000..22f8d733e63f3 Binary files /dev/null and b/helm-releases/minio-5.4.0.tgz differ diff --git a/helm/minio/Chart.yaml b/helm/minio/Chart.yaml index df7b450e30602..6ae6f11618a4f 100644 --- a/helm/minio/Chart.yaml +++ b/helm/minio/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 description: High Performance Object Storage name: minio -version: 5.1.0 -appVersion: RELEASE.2024-03-03T17-50-39Z +version: 5.4.0 +appVersion: RELEASE.2024-12-18T13-15-44Z keywords: - minio - storage diff --git a/helm/minio/README.md b/helm/minio/README.md index a1d5c99f8ff2a..00d1e45df14e0 100644 --- a/helm/minio/README.md +++ b/helm/minio/README.md @@ -6,7 +6,7 @@ MinIO is a High Performance Object Storage released under GNU Affero General Pub | IMPORTANT | | -------------------------- | -| This Helm chart is community built, maintained, and supported. MinIO does not guarantee support for any given bug, feature request, or update referencing this chart.

MinIO publishes a separate [MinIO Kubernetes Operator and Tenant Helm Chart](https://github.com/minio/operator/tree/master/helm) that is officially maintained and supported. MinIO strongly recommends using the MinIO Kubernetes Operator for production deployments. See [Deploy Operator With Helm](https://min.io/docs/minio/kubernetes/upstream/operations/install-deploy-manage/deploy-operator-helm.html?ref=github) for additional documentation. | +| This Helm chart is community built, maintained, and supported. MinIO does not guarantee support for any given bug, feature request, or update referencing this chart.

MinIO publishes a separate [MinIO Kubernetes Operator and Tenant Helm Chart](https://github.com/minio/operator/tree/master/helm) that is officially maintained and supported. MinIO strongly recommends using the MinIO Kubernetes Operator for production deployments. See [Deploy Operator With Helm](https://docs.min.io/community/minio-object-store/operations/deployments/k8s-deploy-operator-helm-on-kubernetes.html?ref=github) for additional documentation. | ## Introduction diff --git a/helm/minio/templates/NOTES.txt b/helm/minio/templates/NOTES.txt index 7051b1e62c118..73d77e18878a3 100644 --- a/helm/minio/templates/NOTES.txt +++ b/helm/minio/templates/NOTES.txt @@ -1,6 +1,6 @@ {{- if eq .Values.service.type "ClusterIP" "NodePort" }} MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: -{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local +{{ template "minio.fullname" . }}.{{ .Release.Namespace }}.{{ .Values.clusterDomain }} To access MinIO from localhost, run the below commands: @@ -12,11 +12,11 @@ Read more about port forwarding here: http://kubernetes.io/docs/user-guide/kubec You can now access MinIO server on http://localhost:9000. Follow the below steps to connect to MinIO server with mc client: - 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart + 1. Download the MinIO mc client - https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart - 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} + 2. export MC_HOST_{{ template "minio.fullname" . }}_local=http://$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@localhost:{{ .Values.service.port }} - 3. mc ls {{ template "minio.fullname" . }}-local + 3. mc ls {{ template "minio.fullname" . }}_local {{- end }} {{- if eq .Values.service.type "LoadBalancer" }} @@ -27,13 +27,13 @@ Note that the public IP may take a couple of minutes to be available. You can now access MinIO server on http://:9000. Follow the below steps to connect to MinIO server with mc client: - 1. Download the MinIO mc client - https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart + 1. Download the MinIO mc client - https://docs.min.io/community/minio-object-store/reference/minio-mc.html#quickstart - 2. export MC_HOST_{{ template "minio.fullname" . }}-local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} + 2. export MC_HOST_{{ template "minio.fullname" . }}_local=http://$(kubectl get secret {{ template "minio.secretName" . }} --namespace {{ .Release.Namespace }} -o jsonpath="{.data.rootUser}" | base64 --decode):$(kubectl get secret {{ template "minio.secretName" . }} -o jsonpath="{.data.rootPassword}" | base64 --decode)@:{{ .Values.service.port }} 3. mc ls {{ template "minio.fullname" . }} -Alternately, you can use your browser or the MinIO SDK to access the server - https://min.io/docs/minio/linux/reference/minio-server/minio-server.html +Alternately, you can use your browser or the MinIO SDK to access the server - https://docs.min.io/community/minio-object-store/reference/minio-server/minio-server.html {{- end }} {{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} diff --git a/helm/minio/templates/_helper_create_bucket.txt b/helm/minio/templates/_helper_create_bucket.txt index 83b8dcb2dfc82..62b1917806a31 100644 --- a/helm/minio/templates/_helper_create_bucket.txt +++ b/helm/minio/templates/_helper_create_bucket.txt @@ -94,6 +94,8 @@ createBucket() { echo "Suspending versioning for '$BUCKET'" ${MC} version suspend myminio/$BUCKET fi + else + echo "No versioning action for '$BUCKET'" fi else echo "Bucket '$BUCKET' versioning unchanged." @@ -117,6 +119,5 @@ connectToMinio $scheme {{ $global := . }} # Create the buckets {{- range .Values.buckets }} -createBucket {{ tpl .name $global }} {{ .policy | default "none" | quote }} {{ .purge | default false }} {{ .versioning | default false }} {{ .objectlocking | default false }} -{{- end }} +createBucket {{ tpl .name $global }} {{ .policy | default "none" | quote }} {{ .purge | default false }} {{ .versioning | default "" }} {{ .objectlocking | default false }}{{- end }} {{- end }} diff --git a/helm/minio/templates/_helper_create_svcacct.txt b/helm/minio/templates/_helper_create_svcacct.txt index 59f51b1774df2..5c8aec4f00772 100644 --- a/helm/minio/templates/_helper_create_svcacct.txt +++ b/helm/minio/templates/_helper_create_svcacct.txt @@ -93,7 +93,7 @@ echo {{ tpl .accessKey $global }} > $MINIO_ACCESSKEY_SECRETKEY_TMP {{- if .existingSecret }} cat /config/secrets-svc/{{ tpl .existingSecret $global }}/{{ tpl .existingSecretKey $global }} >> $MINIO_ACCESSKEY_SECRETKEY_TMP # Add a new line if it doesn't exist -sed -i '$a\' $MINIO_ACCESSKEY_SECRETKEY_TMP +echo >> $MINIO_ACCESSKEY_SECRETKEY_TMP {{ else }} echo {{ .secretKey }} >> $MINIO_ACCESSKEY_SECRETKEY_TMP {{- end }} diff --git a/helm/minio/templates/_helpers.tpl b/helm/minio/templates/_helpers.tpl index 1cb209e5e9f07..64f34aeb7b8c6 100644 --- a/helm/minio/templates/_helpers.tpl +++ b/helm/minio/templates/_helpers.tpl @@ -131,9 +131,7 @@ Also, we can not use a single if because lazy evaluation is not an option {{- if .Values.global }} {{- if .Values.global.imagePullSecrets }} imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} -{{- end }} + {{ toYaml .Values.global.imagePullSecrets }} {{- else if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets }} diff --git a/helm/minio/templates/ciliumnetworkpolicy.yaml b/helm/minio/templates/ciliumnetworkpolicy.yaml index 01a14d35a6416..1dc91bcf2890a 100644 --- a/helm/minio/templates/ciliumnetworkpolicy.yaml +++ b/helm/minio/templates/ciliumnetworkpolicy.yaml @@ -16,9 +16,9 @@ spec: ingress: - toPorts: - ports: - - port: {{ .Values.minioAPIPort }} + - port: "{{ .Values.minioAPIPort }}" protocol: TCP - - port: {{ .Values.minioConsolePort }} + - port: "{{ .Values.minioConsolePort }}" protocol: TCP {{- if not .Values.networkPolicy.allowExternal }} fromEndpoints: diff --git a/helm/minio/templates/console-service.yaml b/helm/minio/templates/console-service.yaml index 2bbe7e385d7d0..f09e3f3c6a31f 100644 --- a/helm/minio/templates/console-service.yaml +++ b/helm/minio/templates/console-service.yaml @@ -12,16 +12,18 @@ metadata: annotations: {{- toYaml .Values.consoleService.annotations | nindent 4 }} {{- end }} spec: - {{- if (or (eq .Values.consoleService.type "ClusterIP" "") (empty .Values.consoleService.type)) }} - type: ClusterIP - {{- if not (empty .Values.consoleService.clusterIP) }} + type: {{ .Values.consoleService.type }} + {{- if and (eq .Values.consoleService.type "ClusterIP") .Values.consoleService.clusterIP }} clusterIP: {{ .Values.consoleService.clusterIP }} {{- end }} - {{- else if eq .Values.consoleService.type "LoadBalancer" }} - type: {{ .Values.consoleService.type }} - loadBalancerIP: {{ default "" .Values.consoleService.loadBalancerIP }} - {{- else }} - type: {{ .Values.consoleService.type }} + {{- if or (eq .Values.consoleService.type "LoadBalancer") (eq .Values.consoleService.type "NodePort") }} + externalTrafficPolicy: {{ .Values.consoleService.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.consoleService.type "LoadBalancer") .Values.consoleService.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.consoleService.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.consoleService.type "LoadBalancer") (not (empty .Values.consoleService.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.consoleService.loadBalancerIP }} {{- end }} ports: - name: {{ $scheme }} diff --git a/helm/minio/templates/deployment.yaml b/helm/minio/templates/deployment.yaml index d7b405aef0cb5..4c57010fd51d4 100644 --- a/helm/minio/templates/deployment.yaml +++ b/helm/minio/templates/deployment.yaml @@ -55,12 +55,7 @@ spec: {{- end }} {{- if and .Values.securityContext.enabled .Values.persistence.enabled }} securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - runAsGroup: {{ .Values.securityContext.runAsGroup }} - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} - fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} - {{- end }} + {{ omit .Values.securityContext "enabled" | toYaml | nindent 8 }} {{- end }} {{ if .Values.serviceAccount.create }} serviceAccountName: {{ .Values.serviceAccount.name }} @@ -173,6 +168,11 @@ spec: value: {{ tpl $val $ | quote }} {{- end }} resources: {{- toYaml .Values.resources | nindent 12 }} + {{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12}} + {{- end }} + {{- end }} {{- with .Values.extraContainers }} {{- if eq (typeOf .) "string" }} {{- tpl . $ | nindent 8 }} diff --git a/helm/minio/templates/networkpolicy.yaml b/helm/minio/templates/networkpolicy.yaml index b9c077171aa88..bb45a6c63d5be 100644 --- a/helm/minio/templates/networkpolicy.yaml +++ b/helm/minio/templates/networkpolicy.yaml @@ -16,11 +16,51 @@ spec: ingress: - ports: - port: {{ .Values.minioAPIPort }} + protocol: TCP - port: {{ .Values.minioConsolePort }} + protocol: TCP {{- if not .Values.networkPolicy.allowExternal }} from: - podSelector: matchLabels: {{ template "minio.name" . }}-client: "true" {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + egress: + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} +--- +kind: NetworkPolicy +apiVersion: {{ template "minio.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "minio.fullname" . }}-post-job + labels: + app: {{ template "minio.name" . }}-post-job + chart: {{ template "minio.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "minio.name" . }}-job + release: {{ .Release.Name }} + egress: + - ports: + - port: {{ .Values.minioAPIPort }} + protocol: TCP + - port: {{ .Values.minioConsolePort }} + protocol: TCP + {{- if .Values.networkPolicy.egress.enabled }} + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} {{- end }} diff --git a/helm/minio/templates/post-job.yaml b/helm/minio/templates/post-job.yaml index 5feb783177c32..955d6558c56a8 100644 --- a/helm/minio/templates/post-job.yaml +++ b/helm/minio/templates/post-job.yaml @@ -39,10 +39,7 @@ spec: tolerations: {{- toYaml . | nindent 8 }} {{- end }} {{- if .Values.postJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.postJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.postJob.securityContext.runAsGroup }} - fsGroup: {{ .Values.postJob.securityContext.fsGroup }} + securityContext: {{ omit .Values.postJob.securityContext "enabled" | toYaml | nindent 12 }} {{- end }} volumes: - name: etc-path @@ -82,6 +79,9 @@ spec: - key: {{ .Values.tls.publicCrt }} path: CAs/public.crt {{- end }} + {{- if .Values.customCommandJob.extraVolumes }} + {{- toYaml .Values.customCommandJob.extraVolumes | nindent 8 }} + {{- end }} {{- if .Values.serviceAccount.create }} serviceAccountName: {{ .Values.serviceAccount.name }} {{- end }} @@ -90,9 +90,9 @@ spec: - name: minio-make-policy image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" {{- if .Values.makePolicyJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.makePolicyJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.makePolicyJob.securityContext.runAsGroup }} + {{- with .Values.makePolicyJob.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} {{- end }} imagePullPolicy: {{ .Values.mcImage.pullPolicy }} {{- if .Values.makePolicyJob.exitCommand }} @@ -124,9 +124,9 @@ spec: - name: minio-make-bucket image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" {{- if .Values.makeBucketJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.makeBucketJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.makeBucketJob.securityContext.runAsGroup }} + {{- with .Values.makeBucketJob.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} {{- end }} imagePullPolicy: {{ .Values.mcImage.pullPolicy }} {{- if .Values.makeBucketJob.exitCommand }} @@ -157,9 +157,9 @@ spec: - name: minio-make-user image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" {{- if .Values.makeUserJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.makeUserJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.makeUserJob.securityContext.runAsGroup }} + {{- with .Values.makeUserJob.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} {{- end }} imagePullPolicy: {{ .Values.mcImage.pullPolicy }} {{- if .Values.makeUserJob.exitCommand }} @@ -190,9 +190,9 @@ spec: - name: minio-custom-command image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" {{- if .Values.customCommandJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.customCommandJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.customCommandJob.securityContext.runAsGroup }} + {{- with .Values.customCommandJob.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} {{- end }} imagePullPolicy: {{ .Values.mcImage.pullPolicy }} {{- if .Values.customCommandJob.exitCommand }} @@ -217,15 +217,18 @@ spec: - name: cert-secret-volume-mc mountPath: {{ .Values.configPathmc }}certs {{- end }} + {{- if .Values.customCommandJob.extraVolumeMounts }} + {{- toYaml .Values.customCommandJob.extraVolumeMounts | nindent 12 }} + {{- end }} resources: {{- toYaml .Values.customCommandJob.resources | nindent 12 }} {{- end }} {{- if .Values.svcaccts }} - name: minio-make-svcacct image: "{{ .Values.mcImage.repository }}:{{ .Values.mcImage.tag }}" {{- if .Values.makeServiceAccountJob.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.makeServiceAccountJob.securityContext.runAsUser }} - runAsGroup: {{ .Values.makeServiceAccountJob.securityContext.runAsGroup }} + {{- with .Values.makeServiceAccountJob.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12 }} + {{- end }} {{- end }} imagePullPolicy: {{ .Values.mcImage.pullPolicy }} {{- if .Values.makeServiceAccountJob.exitCommand }} diff --git a/helm/minio/templates/service.yaml b/helm/minio/templates/service.yaml index ba1f3feaa56cd..d872cd07adcfa 100644 --- a/helm/minio/templates/service.yaml +++ b/helm/minio/templates/service.yaml @@ -13,16 +13,18 @@ metadata: annotations: {{- toYaml .Values.service.annotations | nindent 4 }} {{- end }} spec: - {{- if (or (eq .Values.service.type "ClusterIP" "") (empty .Values.service.type)) }} - type: ClusterIP - {{- if not (empty .Values.service.clusterIP) }} + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "ClusterIP") .Values.service.clusterIP }} clusterIP: {{ .Values.service.clusterIP }} {{- end }} - {{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - loadBalancerIP: {{ default "" .Values.service.loadBalancerIP }} - {{- else }} - type: {{ .Values.service.type }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{ end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ default "" .Values.service.loadBalancerIP | quote }} {{- end }} ports: - name: {{ $scheme }} diff --git a/helm/minio/templates/statefulset.yaml b/helm/minio/templates/statefulset.yaml index 938148ea2f112..d671eaaf4a14d 100644 --- a/helm/minio/templates/statefulset.yaml +++ b/helm/minio/templates/statefulset.yaml @@ -83,12 +83,7 @@ spec: {{- end }} {{- if and .Values.securityContext.enabled .Values.persistence.enabled }} securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - runAsGroup: {{ .Values.securityContext.runAsGroup }} - fsGroup: {{ .Values.securityContext.fsGroup }} - {{- if and (ge .Capabilities.KubeVersion.Major "1") (ge .Capabilities.KubeVersion.Minor "20") }} - fsGroupChangePolicy: {{ .Values.securityContext.fsGroupChangePolicy }} - {{- end }} + {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} {{- end }} {{- if .Values.serviceAccount.create }} serviceAccountName: {{ .Values.serviceAccount.name }} @@ -100,7 +95,7 @@ spec: command: [ "/bin/sh", "-ce", - "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }}{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{ else }}{{ $bucketRoot }}{{end }}{{- end }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" + "/usr/bin/docker-entrypoint.sh minio server {{- range $i := until $poolCount }}{{ $factor := mul $i $nodeCount }}{{ $endIndex := add $factor $nodeCount }}{{ $beginIndex := mul $i $nodeCount }} {{ $scheme }}://{{ template `minio.fullname` $ }}-{{ `{` }}{{ $beginIndex }}...{{ sub $endIndex 1 }}{{ `}`}}.{{ template `minio.fullname` $ }}-svc.{{ $.Release.Namespace }}.svc{{if (gt $drivesPerNode 1)}}{{ $bucketRoot }}-{{ `{` }}0...{{ sub $drivesPerNode 1 }}{{ `}` }}{{ else }}{{ $bucketRoot }}{{end }}{{- end }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template `minio.extraArgs` . }}" ] volumeMounts: {{- if $penabled }} @@ -181,6 +176,8 @@ spec: value: {{ .Values.oidc.scopes }} - name: MINIO_IDENTITY_OPENID_COMMENT value: {{ .Values.oidc.comment }} + - name: MINIO_IDENTITY_OPENID_REDIRECT_URI + value: {{ .Values.oidc.redirectUri }} - name: MINIO_IDENTITY_OPENID_DISPLAY_NAME value: {{ .Values.oidc.displayName }} {{- end }} @@ -189,6 +186,11 @@ spec: value: {{ tpl $val $ | quote }} {{- end }} resources: {{- toYaml .Values.resources | nindent 12 }} + {{- if and .Values.securityContext.enabled .Values.persistence.enabled }} + {{- with .Values.containerSecurityContext }} + securityContext: {{ toYaml . | nindent 12}} + {{- end }} + {{- end }} {{- with .Values.extraContainers }} {{- if eq (typeOf .) "string" }} {{- tpl . $ | nindent 8 }} @@ -228,8 +230,8 @@ spec: volumeClaimTemplates: {{- if gt $drivesPerNode 1 }} {{- range $diskId := until $drivesPerNode}} - - apiVersion: v1 - kind: PersistentVolumeClaim + - apiVersion: v1 + kind: PersistentVolumeClaim metadata: name: export-{{ $diskId }} {{- if $.Values.persistence.annotations }} @@ -245,8 +247,8 @@ spec: storage: {{ $psize }} {{- end }} {{- else }} - - apiVersion: v1 - kind: PersistentVolumeClaim + - apiVersion: v1 + kind: PersistentVolumeClaim metadata: name: export {{- if $.Values.persistence.annotations }} diff --git a/helm/minio/values.yaml b/helm/minio/values.yaml index 9d7a4d126fc9f..2ea13b10d5914 100644 --- a/helm/minio/values.yaml +++ b/helm/minio/values.yaml @@ -14,7 +14,7 @@ clusterDomain: cluster.local ## image: repository: quay.io/minio/minio - tag: RELEASE.2024-03-03T17-50-39Z + tag: RELEASE.2024-12-18T13-15-44Z pullPolicy: IfNotPresent imagePullSecrets: [] @@ -25,7 +25,7 @@ imagePullSecrets: [] ## mcImage: repository: quay.io/minio/mc - tag: RELEASE.2024-03-03T00-13-08Z + tag: RELEASE.2024-11-21T17-21-54Z pullPolicy: IfNotPresent ## minio mode, i.e. standalone or distributed @@ -45,6 +45,9 @@ ignoreChartChecksums: false ## Additional arguments to pass to minio binary extraArgs: [] +# example for enabling FTP: +# - --ftp=\"address=:8021\" +# - --ftp=\"passive-port-range=10000-10010\" ## Additional volumes to minio container extraVolumes: [] @@ -85,7 +88,7 @@ runtimeClassName: "" ## Set default rootUser, rootPassword ## rootUser and rootPassword is generated when not set -## Distributed MinIO ref: https://min.io/docs/minio/linux/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html +## Distributed MinIO ref: https://docs.min.io/community/minio-object-store/operations/install-deploy-manage/deploy-minio-multi-node-multi-drive.html ## rootUser: "" rootPassword: "" @@ -129,7 +132,7 @@ tls: publicCrt: public.crt privateKey: private.key -## Trusted Certificates Settings for MinIO. Ref: https://min.io/docs/minio/linux/operations/network-encryption.html#third-party-certificate-authorities +## Trusted Certificates Settings for MinIO. Ref: https://docs.min.io/community/minio-object-store/operations/network-encryption.html#third-party-certificate-authorities ## Bundle multiple trusted certificates into one secret and pass that here. Ref: https://github.com/minio/minio/tree/master/docs/tls/kubernetes#2-create-kubernetes-secret ## When using self-signed certificates, remember to include MinIO's own certificate in the bundle with key public.crt. ## If certSecret is left empty and tls is enabled, this chart installs the public certificate from .Values.tls.certSecret. @@ -179,15 +182,29 @@ service: externalIPs: [] annotations: {} + ## service.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + #loadBalancerSourceRanges: + # - 10.10.10.0/24 + loadBalancerSourceRanges: [] + + ## service.externalTrafficPolicy minio service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## Configure Ingress based on the documentation here: https://kubernetes.io/docs/concepts/services-networking/ingress/ ## ingress: enabled: false ingressClassName: ~ - labels: {} + labels: + {} # node-role.kubernetes.io/ingress: platform - annotations: {} + annotations: + {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # kubernetes.io/ingress.allow-http: "false" @@ -211,13 +228,26 @@ consoleService: loadBalancerIP: ~ externalIPs: [] annotations: {} + ## consoleService.loadBalancerSourceRanges Addresses that are allowed when service is LoadBalancer + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + #loadBalancerSourceRanges: + # - 10.10.10.0/24 + loadBalancerSourceRanges: [] + + ## servconsoleServiceice.externalTrafficPolicy minio service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster consoleIngress: enabled: false ingressClassName: ~ - labels: {} + labels: + {} # node-role.kubernetes.io/ingress: platform - annotations: {} + annotations: + {} # kubernetes.io/ingress.class: nginx # kubernetes.io/tls-acme: "true" # kubernetes.io/ingress.allow-http: "false" @@ -250,6 +280,9 @@ securityContext: fsGroup: 1000 fsGroupChangePolicy: "OnRootMismatch" +containerSecurityContext: + readOnlyRootFilesystem: false + # Additational pod annotations podAnnotations: {} @@ -336,7 +369,7 @@ makePolicyJob: users: ## Username, password and policy to be assigned to the user ## Default policies are [readonly|readwrite|writeonly|consoleAdmin|diagnostics] - ## Add new policies as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management.html#access-management + ## Add new policies as explained here https://docs.min.io/community/minio-object-store/administration/identity-access-management.html#access-management ## NOTE: this will fail if LDAP is enabled in your MinIO deployment ## make sure to disable this if you are using LDAP. - accessKey: console @@ -362,9 +395,10 @@ makeUserJob: ## List of service accounts to be created after minio install ## -svcaccts: [] +svcaccts: + [] ## accessKey, secretKey and parent user to be assigned to the service accounts - ## Add new service accounts as explained here https://min.io/docs/minio/kubernetes/upstream/administration/identity-access-management/minio-user-management.html#service-accounts + ## Add new service accounts as explained here https://docs.min.io/community/minio-object-store/administration/identity-access-management/minio-user-management.html#access-keys # - accessKey: console-svcacct # secretKey: console123 # user: console @@ -401,7 +435,8 @@ makeServiceAccountJob: ## List of buckets to be created after minio install ## -buckets: [] +buckets: + [] # # Name of the bucket # - name: bucket1 # # Policy to be set on the @@ -449,6 +484,18 @@ customCommandJob: resources: requests: memory: 128Mi + ## Additional volumes to add to the post-job. + extraVolumes: + [] + # - name: extra-policies + # configMap: + # name: my-extra-policies-cm + ## Additional volumeMounts to add to the custom commands container when + ## running the post-job. + extraVolumeMounts: + [] + # - name: extra-policies + # mountPath: /mnt/extras/ # Command to run after the main command on exit exitCommand: "" @@ -468,7 +515,7 @@ postJob: ## Use this field to add environment variables relevant to MinIO server. These fields will be passed on to MinIO container(s) ## when Chart is deployed environment: - ## Please refer for comprehensive list https://min.io/docs/minio/linux/reference/minio-server/minio-server.html + ## Please refer for comprehensive list https://docs.min.io/community/minio-object-store/reference/minio-server/minio-server.html ## MINIO_SUBNET_LICENSE: "License key obtained from https://subnet.min.io" ## MINIO_BROWSER: "off" @@ -480,7 +527,7 @@ extraSecret: ~ ## OpenID Identity Management ## The following section documents environment variables for enabling external identity management using an OpenID Connect (OIDC)-compatible provider. -## See https://min.io/docs/minio/linux/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables. +## See https://docs.min.io/community/minio-object-store/operations/external-iam/configure-openid-external-identity-management.html for a tutorial on using these variables. oidc: enabled: false configUrl: "https://identity-provider-url/.well-known/openid-configuration" @@ -503,10 +550,35 @@ networkPolicy: # Specifies whether the policies created will be standard Network Policies (flavor: kubernetes) # or Cilium Network Policies (flavor: cilium) flavor: kubernetes + # allows external access to the minio api allowExternal: true + ## @params networkPolicy.egress configuration of the egress traffic + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing minio to connect to external data sources from kubernetes cluster. + ## + enabled: false + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## - port: 53 + ## protocol: UDP + ## + ports: [] + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [minio]} + ## + to: [] # only when using flavor: cilium egressEntities: - - kube-apiserver + - kube-apiserver ## PodDisruptionBudget settings ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ @@ -534,7 +606,8 @@ metrics: # for node metrics relabelConfigs: {} # for cluster metrics - relabelConfigsCluster: {} + relabelConfigsCluster: + {} # metricRelabelings: # - regex: (server|pod) # action: labeldrop diff --git a/index.yaml b/index.yaml index da78e9bcd2653..7d62f602f6591 100644 --- a/index.yaml +++ b/index.yaml @@ -1,9 +1,75 @@ apiVersion: v1 entries: minio: + - apiVersion: v1 + appVersion: RELEASE.2024-12-18T13-15-44Z + created: "2025-01-02T21:34:25.234658257-08:00" + description: High Performance Object Storage + digest: 25fa2740480d1ebc9e64340854a6c42d3a7bc39c2a77378da91b21f144faa9af + home: https://min.io + icon: https://min.io/resources/img/logo/MINIO_wordmark.png + keywords: + - minio + - storage + - object-storage + - s3 + - cluster + maintainers: + - email: dev@minio.io + name: MinIO, Inc + name: minio + sources: + - https://github.com/minio/minio + urls: + - https://charts.min.io/helm-releases/minio-5.4.0.tgz + version: 5.4.0 + - apiVersion: v1 + appVersion: RELEASE.2024-04-18T19-09-19Z + created: "2025-01-02T21:34:25.231025201-08:00" + description: High Performance Object Storage + digest: 5f927286767c285b925a3395e75b4f372367f83d2124395185e21dc7fd4ca177 + home: https://min.io + icon: https://min.io/resources/img/logo/MINIO_wordmark.png + keywords: + - minio + - storage + - object-storage + - s3 + - cluster + maintainers: + - email: dev@minio.io + name: MinIO, Inc + name: minio + sources: + - https://github.com/minio/minio + urls: + - https://charts.min.io/helm-releases/minio-5.3.0.tgz + version: 5.3.0 + - apiVersion: v1 + appVersion: RELEASE.2024-04-18T19-09-19Z + created: "2025-01-02T21:34:25.227480037-08:00" + description: High Performance Object Storage + digest: 8ef4212d7d51be6c8192b3e91138a9ca918ca56142c42500028cfd3b80e0b2dd + home: https://min.io + icon: https://min.io/resources/img/logo/MINIO_wordmark.png + keywords: + - minio + - storage + - object-storage + - s3 + - cluster + maintainers: + - email: dev@minio.io + name: MinIO, Inc + name: minio + sources: + - https://github.com/minio/minio + urls: + - https://charts.min.io/helm-releases/minio-5.2.0.tgz + version: 5.2.0 - apiVersion: v1 appVersion: RELEASE.2024-03-03T17-50-39Z - created: "2024-03-03T10:49:25.626817653-08:00" + created: "2025-01-02T21:34:25.221946278-08:00" description: High Performance Object Storage digest: 742d658c029616f0a977f255a27e806f2e3ef31f0d30467353a0882b5607001e home: https://min.io @@ -25,7 +91,7 @@ entries: version: 5.1.0 - apiVersion: v1 appVersion: RELEASE.2024-01-11T07-46-16Z - created: "2024-03-03T10:49:25.595841639-08:00" + created: "2025-01-02T21:34:25.188561933-08:00" description: Multi-Cloud Object Storage digest: 3a2d8e03ffdd98501026aa7561633c91d9871647f4b01d77b75a2ad9b72ee618 home: https://min.io @@ -47,7 +113,7 @@ entries: version: 5.0.15 - apiVersion: v1 appVersion: RELEASE.2023-09-30T07-02-29Z - created: "2024-03-03T10:49:25.592472551-08:00" + created: "2025-01-02T21:34:25.184512596-08:00" description: Multi-Cloud Object Storage digest: 6c3656924fbad2cb17f810cd78f352f9b60626aaec64b837c96829095b215ad3 home: https://min.io @@ -69,7 +135,7 @@ entries: version: 5.0.14 - apiVersion: v1 appVersion: RELEASE.2023-07-07T07-13-57Z - created: "2024-03-03T10:49:25.58803621-08:00" + created: "2025-01-02T21:34:25.180913342-08:00" description: Multi-Cloud Object Storage digest: 3c18f7381efe6d86497f952e6d5f59003ee5a009c54778ddea1ee8d3c7bed9c8 home: https://min.io @@ -91,7 +157,7 @@ entries: version: 5.0.13 - apiVersion: v1 appVersion: RELEASE.2023-07-07T07-13-57Z - created: "2024-03-03T10:49:25.584896474-08:00" + created: "2025-01-02T21:34:25.177247018-08:00" description: Multi-Cloud Object Storage digest: 5318bc56c73a8f4539c3dd178f4d55c7f41bee4a25d7dc02ac6a5843eeee7976 home: https://min.io @@ -113,7 +179,7 @@ entries: version: 5.0.12 - apiVersion: v1 appVersion: RELEASE.2023-06-19T19-52-50Z - created: "2024-03-03T10:49:25.582384416-08:00" + created: "2025-01-02T21:34:25.17337971-08:00" description: Multi-Cloud Object Storage digest: cba44c8cddcda1fb5c082dce82004a39f53cc20677ab9698a6998f01efefd8db home: https://min.io @@ -135,7 +201,7 @@ entries: version: 5.0.11 - apiVersion: v1 appVersion: RELEASE.2023-05-18T00-05-36Z - created: "2024-03-03T10:49:25.580339588-08:00" + created: "2025-01-02T21:34:25.169502301-08:00" description: Multi-Cloud Object Storage digest: a3d55b12f38a2049ddf3efe35b38b6dc4e59777452b72d18d5a82f3378deb9cd home: https://min.io @@ -157,7 +223,7 @@ entries: version: 5.0.10 - apiVersion: v1 appVersion: RELEASE.2023-04-28T18-11-17Z - created: "2024-03-03T10:49:25.622484664-08:00" + created: "2025-01-02T21:34:25.218260054-08:00" description: Multi-Cloud Object Storage digest: cf98985e32675e4ce327304ea9ac61046a788b3d5190d6b501330f7803d41a11 home: https://min.io @@ -179,7 +245,7 @@ entries: version: 5.0.9 - apiVersion: v1 appVersion: RELEASE.2023-04-13T03-08-07Z - created: "2024-03-03T10:49:25.619287745-08:00" + created: "2025-01-02T21:34:25.214515045-08:00" description: Multi-Cloud Object Storage digest: 034d68f85799f6693836975797f85a91842cf2d003a6c4ff401bd4ea4c946af6 home: https://min.io @@ -201,7 +267,7 @@ entries: version: 5.0.8 - apiVersion: v1 appVersion: RELEASE.2023-02-10T18-48-39Z - created: "2024-03-03T10:49:25.616095208-08:00" + created: "2025-01-02T21:34:25.210879405-08:00" description: Multi-Cloud Object Storage digest: 3f935a310e1b5b873052629b66005c160356ca7b2bd394cb07b34dbaf9905e3f home: https://min.io @@ -223,7 +289,7 @@ entries: version: 5.0.7 - apiVersion: v1 appVersion: RELEASE.2023-02-10T18-48-39Z - created: "2024-03-03T10:49:25.612890689-08:00" + created: "2025-01-02T21:34:25.207094353-08:00" description: Multi-Cloud Object Storage digest: 82ef858ce483c2d736444792986cb36bd0fb4fc90a80b97fe30d7b2f2034d24a home: https://min.io @@ -245,7 +311,7 @@ entries: version: 5.0.6 - apiVersion: v1 appVersion: RELEASE.2023-01-31T02-24-19Z - created: "2024-03-03T10:49:25.609715082-08:00" + created: "2025-01-02T21:34:25.201959046-08:00" description: Multi-Cloud Object Storage digest: fefeea10e4e525e45f82fb80a03900d34605ec432dd92f56d94eaf4fb1b98c41 home: https://min.io @@ -267,7 +333,7 @@ entries: version: 5.0.5 - apiVersion: v1 appVersion: RELEASE.2022-12-12T19-27-27Z - created: "2024-03-03T10:49:25.605406783-08:00" + created: "2025-01-02T21:34:25.198369173-08:00" description: Multi-Cloud Object Storage digest: 6b305783c98b0b97ffab079ff4430094fd0ca6e98e82bb8153cb93033a1bf40f home: https://min.io @@ -289,7 +355,7 @@ entries: version: 5.0.4 - apiVersion: v1 appVersion: RELEASE.2022-12-12T19-27-27Z - created: "2024-03-03T10:49:25.602202025-08:00" + created: "2025-01-02T21:34:25.194953084-08:00" description: Multi-Cloud Object Storage digest: bac89157c53b324aece263c294aa49f5c9b64f426b4b06c9bca3d72e77e244f2 home: https://min.io @@ -311,7 +377,7 @@ entries: version: 5.0.3 - apiVersion: v1 appVersion: RELEASE.2022-12-12T19-27-27Z - created: "2024-03-03T10:49:25.59905123-08:00" + created: "2025-01-02T21:34:25.191760917-08:00" description: Multi-Cloud Object Storage digest: 935ce4f09366231b11d414d626f887fa6fa6024dd30a42e81e810ca1438d5904 home: https://min.io @@ -333,7 +399,7 @@ entries: version: 5.0.2 - apiVersion: v1 appVersion: RELEASE.2022-11-11T03-44-20Z - created: "2024-03-03T10:49:25.578253501-08:00" + created: "2025-01-02T21:34:25.164816778-08:00" description: Multi-Cloud Object Storage digest: 3e952c5d737980b8ccdfb819021eafb4b4e8da226f764a1dc3de1ba63ceb1ffa home: https://min.io @@ -355,7 +421,7 @@ entries: version: 5.0.1 - apiVersion: v1 appVersion: RELEASE.2022-10-24T18-35-07Z - created: "2024-03-03T10:49:25.575070418-08:00" + created: "2025-01-02T21:34:25.16141762-08:00" description: Multi-Cloud Object Storage digest: 6215c800d84fd4c40e4fb4142645fc1c6a039c251776a3cc8c11a24b9e3b59c7 home: https://min.io @@ -377,7 +443,7 @@ entries: version: 5.0.0 - apiVersion: v1 appVersion: RELEASE.2022-10-24T18-35-07Z - created: "2024-03-03T10:49:25.572343589-08:00" + created: "2025-01-02T21:34:25.157595167-08:00" description: Multi-Cloud Object Storage digest: 2d3d884490ea1127742f938bc9382844bae713caae08b3308f766f3c9000659a home: https://min.io @@ -399,7 +465,7 @@ entries: version: 4.1.0 - apiVersion: v1 appVersion: RELEASE.2022-09-17T00-09-45Z - created: "2024-03-03T10:49:25.544823241-08:00" + created: "2025-01-02T21:34:25.122758935-08:00" description: Multi-Cloud Object Storage digest: 6f16f2dbfed91ab81a7fae60b6ea32f554365bd27bf5fda55b64a0fa264f4252 home: https://min.io @@ -421,7 +487,7 @@ entries: version: 4.0.15 - apiVersion: v1 appVersion: RELEASE.2022-09-01T23-53-36Z - created: "2024-03-03T10:49:25.540359749-08:00" + created: "2025-01-02T21:34:25.118898654-08:00" description: Multi-Cloud Object Storage digest: 35d89d8f49d53ea929466fb88ee26123431326033f1387e6b2d536a629c0a398 home: https://min.io @@ -443,7 +509,7 @@ entries: version: 4.0.14 - apiVersion: v1 appVersion: RELEASE.2022-08-22T23-53-06Z - created: "2024-03-03T10:49:25.537303708-08:00" + created: "2025-01-02T21:34:25.115194076-08:00" description: Multi-Cloud Object Storage digest: 5b86937ca88d9f6046141fdc2b1cc54760435ed92d289cd0a115fa7148781d4e home: https://min.io @@ -465,7 +531,7 @@ entries: version: 4.0.13 - apiVersion: v1 appVersion: RELEASE.2022-08-13T21-54-44Z - created: "2024-03-03T10:49:25.534187767-08:00" + created: "2025-01-02T21:34:25.111485897-08:00" description: Multi-Cloud Object Storage digest: 2d9c227c0f46ea8bdef4d760c212156fd4c6623ddc5406779c569fe925527787 home: https://min.io @@ -487,7 +553,7 @@ entries: version: 4.0.12 - apiVersion: v1 appVersion: RELEASE.2022-08-05T23-27-09Z - created: "2024-03-03T10:49:25.530996789-08:00" + created: "2025-01-02T21:34:25.107832294-08:00" description: Multi-Cloud Object Storage digest: 6caaffcb636e040cd7e8bc4883a1674a673757f4781c32d53b5ec0f41fea3944 home: https://min.io @@ -509,7 +575,7 @@ entries: version: 4.0.11 - apiVersion: v1 appVersion: RELEASE.2022-08-02T23-59-16Z - created: "2024-03-03T10:49:25.527836582-08:00" + created: "2025-01-02T21:34:25.103772055-08:00" description: Multi-Cloud Object Storage digest: 841d87788fb094d6a7d8a91e91821fe1e847bc952e054c781fc93742d112e18a home: https://min.io @@ -531,7 +597,7 @@ entries: version: 4.0.10 - apiVersion: v1 appVersion: RELEASE.2022-08-02T23-59-16Z - created: "2024-03-03T10:49:25.569597527-08:00" + created: "2025-01-02T21:34:25.153995865-08:00" description: Multi-Cloud Object Storage digest: 6f1a78382df3215deac07495a5e7de7009a1153b4cf6cb565630652a69aec4cf home: https://min.io @@ -553,7 +619,7 @@ entries: version: 4.0.9 - apiVersion: v1 appVersion: RELEASE.2022-07-29T19-40-48Z - created: "2024-03-03T10:49:25.566489402-08:00" + created: "2025-01-02T21:34:25.150678505-08:00" description: Multi-Cloud Object Storage digest: d11db37963636922cb778b6bc0ad2ca4724cb391ea7b785995ada52467d7dd83 home: https://min.io @@ -575,7 +641,7 @@ entries: version: 4.0.8 - apiVersion: v1 appVersion: RELEASE.2022-07-26T00-53-03Z - created: "2024-03-03T10:49:25.563659143-08:00" + created: "2025-01-02T21:34:25.146767779-08:00" description: Multi-Cloud Object Storage digest: ca775e08c84331bb5029d4d29867d30c16e2c62e897788eb432212a756e91e4e home: https://min.io @@ -597,7 +663,7 @@ entries: version: 4.0.7 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2024-03-03T10:49:25.559524383-08:00" + created: "2025-01-02T21:34:25.142770749-08:00" description: Multi-Cloud Object Storage digest: 06542b8f3d149d5908b15de9a8d6f8cf304af0213830be56dc315785d14f9ccd home: https://min.io @@ -619,7 +685,7 @@ entries: version: 4.0.6 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2024-03-03T10:49:25.556709841-08:00" + created: "2025-01-02T21:34:25.139151034-08:00" description: Multi-Cloud Object Storage digest: dd2676362f067454a496cdd293609d0c904b08f521625af49f95402a024ba1f5 home: https://min.io @@ -641,7 +707,7 @@ entries: version: 4.0.5 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2024-03-03T10:49:25.553735023-08:00" + created: "2025-01-02T21:34:25.135573416-08:00" description: Multi-Cloud Object Storage digest: bab9ef192d4eda4c572ad0ce0cf551736c847f582d1837d6833ee10543c23167 home: https://min.io @@ -663,7 +729,7 @@ entries: version: 4.0.4 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2024-03-03T10:49:25.55082388-08:00" + created: "2025-01-02T21:34:25.132238833-08:00" description: Multi-Cloud Object Storage digest: c770bb9841c76576e4e8573f78b0ec33e0d729504c9667e67ad62d48df5ed64c home: https://min.io @@ -685,7 +751,7 @@ entries: version: 4.0.3 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2024-03-03T10:49:25.547870111-08:00" + created: "2025-01-02T21:34:25.128974045-08:00" description: Multi-Cloud Object Storage digest: 95835f4199d963e2a23a2493610b348e6f2ff8b71c1a648c4a3b84af9b7a83eb home: https://min.io @@ -707,7 +773,7 @@ entries: version: 4.0.2 - apiVersion: v1 appVersion: RELEASE.2022-04-30T22-23-53Z - created: "2024-03-03T10:49:25.5235032-08:00" + created: "2025-01-02T21:34:25.099393644-08:00" description: Multi-Cloud Object Storage digest: 55a088c403b056e1f055a97426aa11759c3d6cbad38face170fe6cbbec7d568f home: https://min.io @@ -729,7 +795,7 @@ entries: version: 4.0.1 - apiVersion: v1 appVersion: RELEASE.2022-04-26T01-20-24Z - created: "2024-03-03T10:49:25.520382818-08:00" + created: "2025-01-02T21:34:25.095908528-08:00" description: Multi-Cloud Object Storage digest: f541237e24336ec3f7f45ae0d523fef694e3a2f9ef648c5b11c15734db6ba2b2 home: https://min.io @@ -751,7 +817,7 @@ entries: version: 4.0.0 - apiVersion: v1 appVersion: RELEASE.2022-04-16T04-26-02Z - created: "2024-03-03T10:49:25.517336462-08:00" + created: "2025-01-02T21:34:25.092803423-08:00" description: Multi-Cloud Object Storage digest: edc0c3dd6d5246a06b74ba16bb4aff80a6d7225dc9aecf064fd89a8af371b9c1 home: https://min.io @@ -773,7 +839,7 @@ entries: version: 3.6.6 - apiVersion: v1 appVersion: RELEASE.2022-04-12T06-55-35Z - created: "2024-03-03T10:49:25.514298369-08:00" + created: "2025-01-02T21:34:25.089672015-08:00" description: Multi-Cloud Object Storage digest: 211e89f6b9eb0b9a3583abaa127be60e1f9717a098e6b2858cb9dc1cc50c1650 home: https://min.io @@ -795,7 +861,7 @@ entries: version: 3.6.5 - apiVersion: v1 appVersion: RELEASE.2022-04-09T15-09-52Z - created: "2024-03-03T10:49:25.511254943-08:00" + created: "2025-01-02T21:34:25.086239968-08:00" description: Multi-Cloud Object Storage digest: 534a879d73b370a18b554b93d0930e1c115419619c4ce4ec7dbaae632acacf06 home: https://min.io @@ -817,7 +883,7 @@ entries: version: 3.6.4 - apiVersion: v1 appVersion: RELEASE.2022-03-24T00-43-44Z - created: "2024-03-03T10:49:25.508174549-08:00" + created: "2025-01-02T21:34:25.081664315-08:00" description: Multi-Cloud Object Storage digest: 99508b20eb0083a567dcccaf9a6c237e09575ed1d70cd2e8333f89c472d13d75 home: https://min.io @@ -839,7 +905,7 @@ entries: version: 3.6.3 - apiVersion: v1 appVersion: RELEASE.2022-03-17T06-34-49Z - created: "2024-03-03T10:49:25.503349188-08:00" + created: "2025-01-02T21:34:25.078433537-08:00" description: Multi-Cloud Object Storage digest: b4cd25611ca322b1d23d23112fdfa6b068fd91eefe0b0663b88ff87ea4282495 home: https://min.io @@ -861,7 +927,7 @@ entries: version: 3.6.2 - apiVersion: v1 appVersion: RELEASE.2022-03-14T18-25-24Z - created: "2024-03-03T10:49:25.500396776-08:00" + created: "2025-01-02T21:34:25.075113944-08:00" description: Multi-Cloud Object Storage digest: d75b88162bfe54740a233bcecf87328bba2ae23d170bec3a35c828bc6fdc224c home: https://min.io @@ -883,7 +949,7 @@ entries: version: 3.6.1 - apiVersion: v1 appVersion: RELEASE.2022-03-11T23-57-45Z - created: "2024-03-03T10:49:25.497558088-08:00" + created: "2025-01-02T21:34:25.07170837-08:00" description: Multi-Cloud Object Storage digest: 22e53a1184a21a679bc7d8b94e955777f3506340fc29da5ab0cb6d729bdbde8d home: https://min.io @@ -905,7 +971,7 @@ entries: version: 3.6.0 - apiVersion: v1 appVersion: RELEASE.2022-03-03T21-21-16Z - created: "2024-03-03T10:49:25.494829479-08:00" + created: "2025-01-02T21:34:25.067175653-08:00" description: Multi-Cloud Object Storage digest: 6fda968d3fdfd60470c0055a4e1a3bd8e5aee9ad0af5ba2fb7b7b926fdc9e4a0 home: https://min.io @@ -927,7 +993,7 @@ entries: version: 3.5.9 - apiVersion: v1 appVersion: RELEASE.2022-02-26T02-54-46Z - created: "2024-03-03T10:49:25.492044888-08:00" + created: "2025-01-02T21:34:25.063997563-08:00" description: Multi-Cloud Object Storage digest: 8e015369048a3a82bbd53ad36696786f18561c6b25d14eee9e2c93a7336cef46 home: https://min.io @@ -949,7 +1015,7 @@ entries: version: 3.5.8 - apiVersion: v1 appVersion: RELEASE.2022-02-18T01-50-10Z - created: "2024-03-03T10:49:25.487976635-08:00" + created: "2025-01-02T21:34:25.058867444-08:00" description: Multi-Cloud Object Storage digest: cb3543fe748e5f0d59b3ccf4ab9af8e10b731405ae445d1f5715e30013632373 home: https://min.io @@ -971,7 +1037,7 @@ entries: version: 3.5.7 - apiVersion: v1 appVersion: RELEASE.2022-02-18T01-50-10Z - created: "2024-03-03T10:49:25.485021417-08:00" + created: "2025-01-02T21:34:25.055866713-08:00" description: Multi-Cloud Object Storage digest: f2e359fa5eefffc59abb3d14a8fa94b11ddeaa99f6cd8dd5f40f4e04121000d6 home: https://min.io @@ -993,7 +1059,7 @@ entries: version: 3.5.6 - apiVersion: v1 appVersion: RELEASE.2022-02-16T00-35-27Z - created: "2024-03-03T10:49:25.482183453-08:00" + created: "2025-01-02T21:34:25.052552978-08:00" description: Multi-Cloud Object Storage digest: 529d56cca9d83a3d0e5672e63b6e87b5bcbe10a6b45f7a55ba998cceb32f9c81 home: https://min.io @@ -1015,7 +1081,7 @@ entries: version: 3.5.5 - apiVersion: v1 appVersion: RELEASE.2022-02-12T00-51-25Z - created: "2024-03-03T10:49:25.4794088-08:00" + created: "2025-01-02T21:34:25.049153108-08:00" description: Multi-Cloud Object Storage digest: 3d530598f8ece67bec5b7f990d206584893987c713502f9228e4ee24b5535414 home: https://min.io @@ -1037,7 +1103,7 @@ entries: version: 3.5.4 - apiVersion: v1 appVersion: RELEASE.2022-02-12T00-51-25Z - created: "2024-03-03T10:49:25.476663037-08:00" + created: "2025-01-02T21:34:25.045984459-08:00" description: Multi-Cloud Object Storage digest: 53937031348b29615f07fc4869b2d668391d8ba9084630a497abd7a7dea9dfb0 home: https://min.io @@ -1059,7 +1125,7 @@ entries: version: 3.5.3 - apiVersion: v1 appVersion: RELEASE.2022-02-07T08-17-33Z - created: "2024-03-03T10:49:25.474126134-08:00" + created: "2025-01-02T21:34:25.042945494-08:00" description: Multi-Cloud Object Storage digest: 68d643414ff0d565716c5715034fcbf1af262e041915a5c02eb51ec1a65c1ea0 home: https://min.io @@ -1081,7 +1147,7 @@ entries: version: 3.5.2 - apiVersion: v1 appVersion: RELEASE.2022-02-01T18-00-14Z - created: "2024-03-03T10:49:25.471322177-08:00" + created: "2025-01-02T21:34:25.038683645-08:00" description: Multi-Cloud Object Storage digest: a3e855ed0f31233b989fffd775a29d6fbfa0590089010ff16783fd7f142ef6e7 home: https://min.io @@ -1103,7 +1169,7 @@ entries: version: 3.5.1 - apiVersion: v1 appVersion: RELEASE.2022-02-01T18-00-14Z - created: "2024-03-03T10:49:25.467606554-08:00" + created: "2025-01-02T21:34:25.03587265-08:00" description: Multi-Cloud Object Storage digest: b1b0ae3c54b4260a698753e11d7781bb8ddc67b7e3fbf0af82796e4cd4ef92a3 home: https://min.io @@ -1125,7 +1191,7 @@ entries: version: 3.5.0 - apiVersion: v1 appVersion: RELEASE.2022-01-28T02-28-16Z - created: "2024-03-03T10:49:25.465056679-08:00" + created: "2025-01-02T21:34:25.032826604-08:00" description: Multi-Cloud Object Storage digest: fecf25d2d3fb208c6f894fed642a60780a570b7f6d0adddde846af7236dc80aa home: https://min.io @@ -1147,7 +1213,7 @@ entries: version: 3.4.8 - apiVersion: v1 appVersion: RELEASE.2022-01-25T19-56-04Z - created: "2024-03-03T10:49:25.462564387-08:00" + created: "2025-01-02T21:34:25.029589236-08:00" description: Multi-Cloud Object Storage digest: c78008caa5ce98f64c887630f59d0cbd481cb3f19a7d4e9d3e81bf4e1e45cadc home: https://min.io @@ -1169,7 +1235,7 @@ entries: version: 3.4.7 - apiVersion: v1 appVersion: RELEASE.2022-01-08T03-11-54Z - created: "2024-03-03T10:49:25.459983463-08:00" + created: "2025-01-02T21:34:25.026512118-08:00" description: Multi-Cloud Object Storage digest: 8f2e2691bf897f74ff094dd370ec56ba9d417e5e8926710c14c2ba346330238d home: https://min.io @@ -1191,7 +1257,7 @@ entries: version: 3.4.6 - apiVersion: v1 appVersion: RELEASE.2022-01-04T07-41-07Z - created: "2024-03-03T10:49:25.457436846-08:00" + created: "2025-01-02T21:34:25.023266957-08:00" description: Multi-Cloud Object Storage digest: bacd140f0016fab35f516bde787da6449b3a960c071fad9e4b6563118033ac84 home: https://min.io @@ -1213,7 +1279,7 @@ entries: version: 3.4.5 - apiVersion: v1 appVersion: RELEASE.2021-12-29T06-49-06Z - created: "2024-03-03T10:49:25.454730706-08:00" + created: "2025-01-02T21:34:25.020285989-08:00" description: Multi-Cloud Object Storage digest: 48a453ea5ffeef25933904caefd9470bfb26224dfc2d1096bd0031467ba53007 home: https://min.io @@ -1235,7 +1301,7 @@ entries: version: 3.4.4 - apiVersion: v1 appVersion: RELEASE.2021-12-20T22-07-16Z - created: "2024-03-03T10:49:25.449934548-08:00" + created: "2025-01-02T21:34:25.014477173-08:00" description: Multi-Cloud Object Storage digest: 47ef4a930713b98f9438ceca913c6e700f85bb25dba5624b056486254b5f0c60 home: https://min.io @@ -1257,7 +1323,7 @@ entries: version: 3.4.3 - apiVersion: v1 appVersion: RELEASE.2021-12-20T22-07-16Z - created: "2024-03-03T10:49:25.447188067-08:00" + created: "2025-01-02T21:34:25.011715909-08:00" description: Multi-Cloud Object Storage digest: d6763f7e2ea66810bd55eb225579a9c3b968f9ae1256f45fd469362e55d846ff home: https://min.io @@ -1279,7 +1345,7 @@ entries: version: 3.4.2 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2024-03-03T10:49:25.44470331-08:00" + created: "2025-01-02T21:34:25.009018639-08:00" description: Multi-Cloud Object Storage digest: 2fb822c87216ba3fc2ae51a54a0a3e239aa560d86542991504a841cc2a2b9a37 home: https://min.io @@ -1301,7 +1367,7 @@ entries: version: 3.4.1 - apiVersion: v1 appVersion: RELEASE.2021-12-18T04-42-33Z - created: "2024-03-03T10:49:25.442546757-08:00" + created: "2025-01-02T21:34:25.006295652-08:00" description: Multi-Cloud Object Storage digest: fa8ba1aeb1a15316c6be8403416a5e6b5e6139b7166592087e7bddc9e6db5453 home: https://min.io @@ -1323,7 +1389,7 @@ entries: version: 3.4.0 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2024-03-03T10:49:25.440656475-08:00" + created: "2025-01-02T21:34:25.003243793-08:00" description: Multi-Cloud Object Storage digest: b9b0af9ca50b8d00868e1f1b989dca275829d9110af6de91bb9b3a398341e894 home: https://min.io @@ -1345,7 +1411,7 @@ entries: version: 3.3.4 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2024-03-03T10:49:25.438721455-08:00" + created: "2025-01-02T21:34:24.999956538-08:00" description: Multi-Cloud Object Storage digest: f8b22a5b8fe95a7ddf61b825e17d11c9345fb10e4c126b0d78381608aa300a08 home: https://min.io @@ -1367,7 +1433,7 @@ entries: version: 3.3.3 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2024-03-03T10:49:25.435168243-08:00" + created: "2025-01-02T21:34:24.995166842-08:00" description: Multi-Cloud Object Storage digest: c48d474f269427abe5ab446f00687d0625b3d1adfc5c73bdb4b21ca9e42853fb home: https://min.io @@ -1389,7 +1455,7 @@ entries: version: 3.3.2 - apiVersion: v1 appVersion: RELEASE.2021-11-24T23-19-33Z - created: "2024-03-03T10:49:25.432802184-08:00" + created: "2025-01-02T21:34:24.992276741-08:00" description: Multi-Cloud Object Storage digest: 7c3da39d9b0090cbf5efedf0cc163a1e2df05becc5152c3add8e837384690bc4 home: https://min.io @@ -1411,7 +1477,7 @@ entries: version: 3.3.1 - apiVersion: v1 appVersion: RELEASE.2021-11-24T23-19-33Z - created: "2024-03-03T10:49:25.430459679-08:00" + created: "2025-01-02T21:34:24.989284049-08:00" description: Multi-Cloud Object Storage digest: 50d6590b4cc779c40f81cc13b1586fbe508aa7f3230036c760bfc5f4154fbce4 home: https://min.io @@ -1433,7 +1499,7 @@ entries: version: 3.3.0 - apiVersion: v1 appVersion: RELEASE.2021-10-13T00-23-17Z - created: "2024-03-03T10:49:25.428100419-08:00" + created: "2025-01-02T21:34:24.986516619-08:00" description: Multi-Cloud Object Storage digest: 5b797b7208cd904c11a76cd72938c8652160cb5fcd7f09fa41e4e703e6d64054 home: https://min.io @@ -1455,7 +1521,7 @@ entries: version: 3.2.0 - apiVersion: v1 appVersion: RELEASE.2021-10-10T16-53-30Z - created: "2024-03-03T10:49:25.425697209-08:00" + created: "2025-01-02T21:34:24.983573512-08:00" description: Multi-Cloud Object Storage digest: e084ac4bb095f071e59f8f08bd092e4ab2404c1ddadacfdce7dbe248f1bafff8 home: https://min.io @@ -1477,7 +1543,7 @@ entries: version: 3.1.9 - apiVersion: v1 appVersion: RELEASE.2021-10-06T23-36-31Z - created: "2024-03-03T10:49:25.423363654-08:00" + created: "2025-01-02T21:34:24.980470597-08:00" description: Multi-Cloud Object Storage digest: 2890430a8d9487d1fa5508c26776e4881d0086b2c052aa6bdc65c0e4423b9159 home: https://min.io @@ -1499,7 +1565,7 @@ entries: version: 3.1.8 - apiVersion: v1 appVersion: RELEASE.2021-10-02T16-31-05Z - created: "2024-03-03T10:49:25.420722657-08:00" + created: "2025-01-02T21:34:24.977221503-08:00" description: Multi-Cloud Object Storage digest: 01a92196af6c47e3a01e1c68d7cf693a8bc487cba810c2cecff155071e4d6a11 home: https://min.io @@ -1521,7 +1587,7 @@ entries: version: 3.1.7 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2024-03-03T10:49:25.416529039-08:00" + created: "2025-01-02T21:34:24.972867415-08:00" description: Multi-Cloud Object Storage digest: e779d73f80b75f33b9c9d995ab10fa455c9c57ee575ebc54e06725a64cd04310 home: https://min.io @@ -1543,7 +1609,7 @@ entries: version: 3.1.6 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2024-03-03T10:49:25.414014562-08:00" + created: "2025-01-02T21:34:24.969718459-08:00" description: Multi-Cloud Object Storage digest: 19de4bbc8a400f0c2a94c5e85fc25c9bfc666e773fb3e368dd621d5a57dd1c2a home: https://min.io @@ -1565,7 +1631,7 @@ entries: version: 3.1.5 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2024-03-03T10:49:25.411477125-08:00" + created: "2025-01-02T21:34:24.966608057-08:00" description: Multi-Cloud Object Storage digest: f789d93a171296dd01af0105a5ce067c663597afbb2432faeda293b752b355c0 home: https://min.io @@ -1587,7 +1653,7 @@ entries: version: 3.1.4 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2024-03-03T10:49:25.408929058-08:00" + created: "2025-01-02T21:34:24.963751369-08:00" description: Multi-Cloud Object Storage digest: e2eb34d31560b012ef6581f0ff6004ea4376c968cbe0daed2d8f3a614a892afb home: https://min.io @@ -1609,7 +1675,7 @@ entries: version: 3.1.3 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2024-03-03T10:49:25.4063768-08:00" + created: "2025-01-02T21:34:24.960755082-08:00" description: Multi-Cloud Object Storage digest: 8d7e0cc46b3583abd71b97dc0c071f98321101f90eca17348f1e9e0831be64cd home: https://min.io @@ -1631,7 +1697,7 @@ entries: version: 3.1.2 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2024-03-03T10:49:25.403782855-08:00" + created: "2025-01-02T21:34:24.957713429-08:00" description: Multi-Cloud Object Storage digest: 50dcbf366b1b21f4a6fc429d0b884c0c7ff481d0fb95c5e9b3ae157c348dd124 home: https://min.io @@ -1653,7 +1719,7 @@ entries: version: 3.1.1 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2024-03-03T10:49:25.401038051-08:00" + created: "2025-01-02T21:34:24.954546983-08:00" description: Multi-Cloud Object Storage digest: 6c01af55d2e2e5f716eabf6fef3a92a8464d0674529e9bacab292e5478a73b7a home: https://min.io @@ -1675,7 +1741,7 @@ entries: version: 3.1.0 - apiVersion: v1 appVersion: RELEASE.2021-09-03T03-56-13Z - created: "2024-03-03T10:49:25.396809855-08:00" + created: "2025-01-02T21:34:24.949999464-08:00" description: Multi-Cloud Object Storage digest: 18e10be4d0458bc590ca9abf753227e0c70f60511495387b8d4fb15a4daf932e home: https://min.io @@ -1697,7 +1763,7 @@ entries: version: 3.0.2 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2024-03-03T10:49:25.394352363-08:00" + created: "2025-01-02T21:34:24.947018538-08:00" description: Multi-Cloud Object Storage digest: f5b6e7f6272a9e71aef3b75555f6f756a39eef65cb78873f26451dba79b19906 home: https://min.io @@ -1719,7 +1785,7 @@ entries: version: 3.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2024-03-03T10:49:25.391872352-08:00" + created: "2025-01-02T21:34:24.943547135-08:00" description: Multi-Cloud Object Storage digest: 6d2ee1336c412affaaf209fdb80215be2a6ebb23ab2443adbaffef9e7df13fab home: https://min.io @@ -1741,7 +1807,7 @@ entries: version: 3.0.0 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2024-03-03T10:49:25.389401038-08:00" + created: "2025-01-02T21:34:24.940463458-08:00" description: Multi-Cloud Object Storage digest: 0a004aaf5bb61deed6a5c88256d1695ebe2f9ff1553874a93e4acfd75e8d339b home: https://min.io @@ -1761,7 +1827,7 @@ entries: version: 2.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2024-03-03T10:49:25.387120583-08:00" + created: "2025-01-02T21:34:24.937381269-08:00" description: Multi-Cloud Object Storage digest: fcd944e837ee481307de6aa3d387ea18c234f995a84c15abb211aab4a4054afc home: https://min.io @@ -1781,7 +1847,7 @@ entries: version: 2.0.0 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2024-03-03T10:49:25.385078602-08:00" + created: "2025-01-02T21:34:24.934337395-08:00" description: Multi-Cloud Object Storage digest: 7b6c033d43a856479eb493ab8ca05b230f77c3e42e209e8f298fac6af1a9796f home: https://min.io @@ -1801,7 +1867,7 @@ entries: version: 1.0.5 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2024-03-03T10:49:25.382931888-08:00" + created: "2025-01-02T21:34:24.931230726-08:00" description: Multi-Cloud Object Storage digest: abd221245ace16c8e0c6c851cf262d1474a5219dcbf25c4b2e7b77142f9c59ed home: https://min.io @@ -1821,7 +1887,7 @@ entries: version: 1.0.4 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2024-03-03T10:49:25.379042892-08:00" + created: "2025-01-02T21:34:24.926018385-08:00" description: Multi-Cloud Object Storage digest: 922a333f5413d1042f7aa81929f43767f6ffca9b260c46713f04ce1dda86d57d home: https://min.io @@ -1841,7 +1907,7 @@ entries: version: 1.0.3 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2024-03-03T10:49:25.376451906-08:00" + created: "2025-01-02T21:34:24.924448521-08:00" description: High Performance, Kubernetes Native Object Storage digest: 10e22773506bbfb1c66442937956534cf4057b94f06a977db78b8cd223588388 home: https://min.io @@ -1861,7 +1927,7 @@ entries: version: 1.0.2 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2024-03-03T10:49:25.373621509-08:00" + created: "2025-01-02T21:34:24.923185443-08:00" description: High Performance, Kubernetes Native Object Storage digest: ef86ab6df23d6942705da9ef70991b649638c51bc310587d37a425268ba4a06c home: https://min.io @@ -1881,7 +1947,7 @@ entries: version: 1.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-17T20-53-08Z - created: "2024-03-03T10:49:25.371031843-08:00" + created: "2025-01-02T21:34:24.921774338-08:00" description: High Performance, Kubernetes Native Object Storage digest: 1add7608692cbf39aaf9b1252530e566f7b2f306a14e390b0f49b97a20f2b188 home: https://min.io @@ -1899,4 +1965,4 @@ entries: urls: - https://charts.min.io/helm-releases/minio-1.0.0.tgz version: 1.0.0 -generated: "2024-03-03T10:49:25.368065655-08:00" +generated: "2025-01-02T21:34:24.920106038-08:00" diff --git a/internal/amztime/iso8601_time_test.go b/internal/amztime/iso8601_time_test.go index 73270a4e7d853..34c60204c7021 100644 --- a/internal/amztime/iso8601_time_test.go +++ b/internal/amztime/iso8601_time_test.go @@ -46,7 +46,6 @@ func TestISO8601Format(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.expectedOutput, func(t *testing.T) { gotOutput := ISO8601Format(testCase.date) t.Log("Go", testCase.date.Format(iso8601TimeFormat)) diff --git a/internal/amztime/parse_test.go b/internal/amztime/parse_test.go index f5716e3c05872..144ef81c6b1c2 100644 --- a/internal/amztime/parse_test.go +++ b/internal/amztime/parse_test.go @@ -44,7 +44,6 @@ func TestParse(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.timeStr, func(t *testing.T) { gott, goterr := Parse(testCase.timeStr) if !errors.Is(goterr, testCase.expectedErr) { diff --git a/internal/arn/arn.go b/internal/arn/arn.go index 291a53b331b74..4f40748c715e5 100644 --- a/internal/arn/arn.go +++ b/internal/arn/arn.go @@ -18,6 +18,7 @@ package arn import ( + "errors" "fmt" "regexp" "strings" @@ -31,30 +32,19 @@ import ( // // Reference: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html -type arnPartition string - -const ( - arnPartitionMinio arnPartition = "minio" -) - -type arnService string - const ( - arnServiceIAM arnService = "iam" -) - -type arnResourceType string - -const ( - arnResourceTypeRole arnResourceType = "role" + arnPrefixArn = "arn" + arnPartitionMinio = "minio" + arnServiceIAM = "iam" + arnResourceTypeRole = "role" ) // ARN - representation of resources based on AWS ARNs. type ARN struct { - Partition arnPartition - Service arnService + Partition string + Service string Region string - ResourceType arnResourceType + ResourceType string ResourceID string } @@ -65,7 +55,7 @@ var validResourceIDRegex = regexp.MustCompile(`[A-Za-z0-9_/\.-]+$`) // NewIAMRoleARN - returns an ARN for a role in MinIO. func NewIAMRoleARN(resourceID, serverRegion string) (ARN, error) { if !validResourceIDRegex.MatchString(resourceID) { - return ARN{}, fmt.Errorf("Invalid resource ID: %s", resourceID) + return ARN{}, fmt.Errorf("invalid resource ID: %s", resourceID) } return ARN{ Partition: arnPartitionMinio, @@ -80,12 +70,12 @@ func NewIAMRoleARN(resourceID, serverRegion string) (ARN, error) { func (arn ARN) String() string { return strings.Join( []string{ - "arn", - string(arn.Partition), - string(arn.Service), + arnPrefixArn, + arn.Partition, + arn.Service, arn.Region, "", // account-id is always empty in this implementation - string(arn.ResourceType) + "/" + arn.ResourceID, + arn.ResourceType + "/" + arn.ResourceID, }, ":", ) @@ -94,44 +84,42 @@ func (arn ARN) String() string { // Parse - parses an ARN string into a type. func Parse(arnStr string) (arn ARN, err error) { ps := strings.Split(arnStr, ":") - if len(ps) != 6 || - ps[0] != "arn" { - err = fmt.Errorf("Invalid ARN string format") - return + if len(ps) != 6 || ps[0] != string(arnPrefixArn) { + err = errors.New("invalid ARN string format") + return arn, err } if ps[1] != string(arnPartitionMinio) { - err = fmt.Errorf("Invalid ARN - bad partition field") - return + err = errors.New("invalid ARN - bad partition field") + return arn, err } if ps[2] != string(arnServiceIAM) { - err = fmt.Errorf("Invalid ARN - bad service field") - return + err = errors.New("invalid ARN - bad service field") + return arn, err } // ps[3] is region and is not validated here. If the region is invalid, // the ARN would not match any configured ARNs in the server. - if ps[4] != "" { - err = fmt.Errorf("Invalid ARN - unsupported account-id field") - return + err = errors.New("invalid ARN - unsupported account-id field") + return arn, err } res := strings.SplitN(ps[5], "/", 2) if len(res) != 2 { - err = fmt.Errorf("Invalid ARN - resource does not contain a \"/\"") - return + err = errors.New("invalid ARN - resource does not contain a \"/\"") + return arn, err } if res[0] != string(arnResourceTypeRole) { - err = fmt.Errorf("Invalid ARN: resource type is invalid.") - return + err = errors.New("invalid ARN: resource type is invalid") + return arn, err } if !validResourceIDRegex.MatchString(res[1]) { - err = fmt.Errorf("Invalid resource ID: %s", res[1]) - return + err = fmt.Errorf("invalid resource ID: %s", res[1]) + return arn, err } arn = ARN{ @@ -141,5 +129,5 @@ func Parse(arnStr string) (arn ARN, err error) { ResourceType: arnResourceTypeRole, ResourceID: res[1], } - return + return arn, err } diff --git a/internal/arn/arn_test.go b/internal/arn/arn_test.go index 59dacb2d7b2a7..7012cfa1ba980 100644 --- a/internal/arn/arn_test.go +++ b/internal/arn/arn_test.go @@ -1,6 +1,6 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // -// # This file is part of MinIO Object Storage stack +// This file is part of MinIO Object Storage stack // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU Affero General Public License as published by @@ -18,59 +18,219 @@ package arn import ( - "fmt" + "reflect" "testing" ) +func TestARN_String(t *testing.T) { + tests := []struct { + arn ARN + want string + }{ + { + arn: ARN{ + Partition: "minio", + Service: "iam", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "my-role", + }, + want: "arn:minio:iam:us-east-1::role/my-role", + }, + { + arn: ARN{ + Partition: "minio", + Service: "", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "my-role", + }, + want: "arn:minio::us-east-1::role/my-role", + }, + } + for _, tt := range tests { + t.Run(tt.want, func(t *testing.T) { + if got := tt.arn.String(); got != tt.want { + t.Errorf("ARN.String() = %v, want %v", got, tt.want) + } + }) + } +} + func TestNewIAMRoleARN(t *testing.T) { - testCases := []struct { - resourceID string - serverRegion string - expectedARN string - isErrExpected bool + type args struct { + resourceID string + serverRegion string + } + tests := []struct { + name string + args args + want ARN + wantErr bool }{ { - resourceID: "myrole", - serverRegion: "us-east-1", - expectedARN: "arn:minio:iam:us-east-1::role/myrole", - isErrExpected: false, + name: "valid resource ID must succeed", + args: args{ + resourceID: "my-role", + serverRegion: "us-east-1", + }, + want: ARN{ + Partition: "minio", + Service: "iam", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "my-role", + }, + wantErr: false, }, { - resourceID: "myrole", - serverRegion: "", - expectedARN: "arn:minio:iam:::role/myrole", - isErrExpected: false, + name: "valid resource ID must succeed", + args: args{ + resourceID: "-my-role", + serverRegion: "us-east-1", + }, + want: ARN{ + Partition: "minio", + Service: "iam", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "-my-role", + }, + wantErr: false, }, { - // Resource ID can start with a hyphen - resourceID: "-myrole", - serverRegion: "", - expectedARN: "arn:minio:iam:::role/-myrole", - isErrExpected: false, + name: "empty server region must succeed", + args: args{ + resourceID: "my-role", + serverRegion: "", + }, + want: ARN{ + Partition: "minio", + Service: "iam", + Region: "", + ResourceType: "role", + ResourceID: "my-role", + }, + wantErr: false, }, { - resourceID: "", - serverRegion: "", - expectedARN: "", - isErrExpected: true, + name: "empty resource ID must fail", + args: args{ + resourceID: "", + serverRegion: "us-east-1", + }, + want: ARN{}, + wantErr: true, + }, + { + name: "resource ID starting with '=' must fail", + args: args{ + resourceID: "=", + serverRegion: "us-east-1", + }, + want: ARN{}, + wantErr: true, }, } - for i, testCase := range testCases { - arn, err := NewIAMRoleARN(testCase.resourceID, testCase.serverRegion) - fmt.Println(arn, err) - if err != nil { - if !testCase.isErrExpected { - t.Errorf("Test %d: Unexpected error: %v", i+1, err) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewIAMRoleARN(tt.args.resourceID, tt.args.serverRegion) + if (err != nil) != tt.wantErr { + t.Errorf("NewIAMRoleARN() error = %v, wantErr %v", err, tt.wantErr) + return } - continue - } - - if testCase.isErrExpected { - t.Errorf("Test %d: Expected error but got none", i+1) - } - if arn.String() != testCase.expectedARN { - t.Errorf("Test %d: Expected ARN %s but got %s", i+1, testCase.expectedARN, arn.String()) - } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("NewIAMRoleARN() got = %v, want %v", got, tt.want) + } + }) + } +} +func TestParse(t *testing.T) { + type args struct { + arnStr string + } + tests := []struct { + name string + args args + wantArn ARN + wantErr bool + }{ + { + name: "valid ARN must succeed", + args: args{ + arnStr: "arn:minio:iam:us-east-1::role/my-role", + }, + wantArn: ARN{ + Partition: "minio", + Service: "iam", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "my-role", + }, + wantErr: false, + }, + { + name: "valid ARN must succeed", + args: args{ + arnStr: "arn:minio:iam:us-east-1::role/-my-role", + }, + wantArn: ARN{ + Partition: "minio", + Service: "iam", + Region: "us-east-1", + ResourceType: "role", + ResourceID: "-my-role", + }, + wantErr: false, + }, + { + name: "invalid ARN length must fail", + args: args{ + arnStr: "arn:minio:", + }, + wantArn: ARN{}, + wantErr: true, + }, + { + name: "invalid ARN partition must fail", + args: args{ + arnStr: "arn:invalid:iam:us-east-1::role/my-role", + }, + wantArn: ARN{}, + wantErr: true, + }, + { + name: "invalid ARN service must fail", + args: args{ + arnStr: "arn:minio:invalid:us-east-1::role/my-role", + }, + wantArn: ARN{}, + wantErr: true, + }, + { + name: "invalid ARN resource type must fail", + args: args{ + arnStr: "arn:minio:iam:us-east-1::invalid", + }, + wantArn: ARN{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotArn, err := Parse(tt.args.arnStr) + if err == nil && tt.wantErr { + t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr) + } + if err != nil && !tt.wantErr { + t.Errorf("Parse() error = %v, wantErr %v", err, tt.wantErr) + } + if err == nil { + if !reflect.DeepEqual(gotArn, tt.wantArn) { + t.Errorf("Parse() gotArn = %v, want %v", gotArn, tt.wantArn) + } + } + }) } } diff --git a/internal/auth/credentials.go b/internal/auth/credentials.go index 35e2dbb8da477..764b7cad0a125 100644 --- a/internal/auth/credentials.go +++ b/internal/auth/credentials.go @@ -54,6 +54,8 @@ const ( // Total length of the alpha numeric table. alphaNumericTableLen = byte(len(alphaNumericTable)) + + reservedChars = "=," ) // Common errors generated for access and secret key validation. @@ -62,11 +64,17 @@ var ( ErrInvalidSecretKeyLength = fmt.Errorf("secret key length should be between %d and %d", secretKeyMinLen, secretKeyMaxLen) ErrNoAccessKeyWithSecretKey = fmt.Errorf("access key must be specified if secret key is specified") ErrNoSecretKeyWithAccessKey = fmt.Errorf("secret key must be specified if access key is specified") + ErrContainsReservedChars = fmt.Errorf("access key contains one of reserved characters '=' or ','") ) // AnonymousCredentials simply points to empty credentials var AnonymousCredentials = Credentials{} +// ContainsReservedChars - returns whether the input string contains reserved characters. +func ContainsReservedChars(s string) bool { + return strings.ContainsAny(s, reservedChars) +} + // IsAccessKeyValid - validate access key for right length. func IsAccessKeyValid(accessKey string) bool { return len(accessKey) >= accessKeyMinLen @@ -103,16 +111,16 @@ const ( // Credentials holds access and secret keys. type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty" yaml:"-"` - Status string `xml:"-" json:"status,omitempty"` - ParentUser string `xml:"-" json:"parentUser,omitempty"` - Groups []string `xml:"-" json:"groups,omitempty"` - Claims map[string]interface{} `xml:"-" json:"claims,omitempty"` - Name string `xml:"-" json:"name,omitempty"` - Description string `xml:"-" json:"description,omitempty"` + AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty" yaml:"accessKey"` + SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty" yaml:"secretKey"` + SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty" yaml:"sessionToken"` + Expiration time.Time `xml:"Expiration" json:"expiration" yaml:"-"` + Status string `xml:"-" json:"status,omitempty"` + ParentUser string `xml:"-" json:"parentUser,omitempty"` + Groups []string `xml:"-" json:"groups,omitempty"` + Claims map[string]any `xml:"-" json:"claims,omitempty"` + Name string `xml:"-" json:"name,omitempty"` + Description string `xml:"-" json:"description,omitempty"` // Deprecated: In favor of Description - when reading credentials from // storage the value of this field is placed in the Description field above @@ -156,6 +164,14 @@ func (cred Credentials) IsServiceAccount() bool { return cred.ParentUser != "" && ok } +// IsImpliedPolicy - returns if the policy is implied via ParentUser or not. +func (cred Credentials) IsImpliedPolicy() bool { + if cred.IsServiceAccount() { + return cred.Claims[iamPolicyClaimNameSA] == "inherited-policy" + } + return false +} + // IsValid - returns whether credential is valid or not. func (cred Credentials) IsValid() bool { // Verify credentials if its enabled or not set. @@ -180,7 +196,7 @@ var timeSentinel = time.Unix(0, 0).UTC() var ErrInvalidDuration = errors.New("invalid token expiry") // ExpToInt64 - convert input interface value to int64. -func ExpToInt64(expI interface{}) (expAt int64, err error) { +func ExpToInt64(expI any) (expAt int64, err error) { switch exp := expI.(type) { case string: expAt, err = strconv.ParseInt(exp, 10, 64) @@ -277,7 +293,7 @@ func GenerateSecretKey(length int, random io.Reader) (string, error) { } // GetNewCredentialsWithMetadata generates and returns new credential with expiry. -func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) (Credentials, error) { +func GetNewCredentialsWithMetadata(m map[string]any, tokenSecret string) (Credentials, error) { accessKey, secretKey, err := GenerateCredentials() if err != nil { return Credentials{}, err @@ -287,7 +303,7 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) // CreateNewCredentialsWithMetadata - creates new credentials using the specified access & secret keys // and generate a session token if a secret token is provided. -func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]interface{}, tokenSecret string) (cred Credentials, err error) { +func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string]any, tokenSecret string) (cred Credentials, err error) { if len(accessKey) < accessKeyMinLen || len(accessKey) > accessKeyMaxLen { return Credentials{}, ErrInvalidAccessKeyLength } @@ -320,7 +336,7 @@ func CreateNewCredentialsWithMetadata(accessKey, secretKey string, m map[string] } // JWTSignWithAccessKey - generates a session token. -func JWTSignWithAccessKey(accessKey string, m map[string]interface{}, tokenSecret string) (string, error) { +func JWTSignWithAccessKey(accessKey string, m map[string]any, tokenSecret string) (string, error) { m["accessKey"] = accessKey jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m)) return jwt.SignedString([]byte(tokenSecret)) @@ -346,7 +362,7 @@ func ExtractClaims(token, secretKey string) (*jwt.MapClaims, error) { // GetNewCredentials generates and returns new credential. func GetNewCredentials() (cred Credentials, err error) { - return GetNewCredentialsWithMetadata(map[string]interface{}{}, "") + return GetNewCredentialsWithMetadata(map[string]any{}, "") } // CreateCredentials returns new credential with the given access key and secret key. diff --git a/internal/auth/credentials_test.go b/internal/auth/credentials_test.go index 643cab31919de..9e83f4b5978ef 100644 --- a/internal/auth/credentials_test.go +++ b/internal/auth/credentials_test.go @@ -25,7 +25,7 @@ import ( func TestExpToInt64(t *testing.T) { testCases := []struct { - exp interface{} + exp any expectedFailure bool }{ {"", true}, @@ -42,7 +42,6 @@ func TestExpToInt64(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { _, err := ExpToInt64(testCase.exp) if err != nil && !testCase.expectedFailure { diff --git a/internal/bpool/bpool.go b/internal/bpool/bpool.go index d733673467ed6..fc88c3204eb4e 100644 --- a/internal/bpool/bpool.go +++ b/internal/bpool/bpool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -17,7 +17,9 @@ package bpool -import "github.com/klauspost/reedsolomon" +import ( + "github.com/klauspost/reedsolomon" +) // BytePoolCap implements a leaky pool of []byte in the form of a bounded channel. type BytePoolCap struct { @@ -29,11 +31,14 @@ type BytePoolCap struct { // NewBytePoolCap creates a new BytePool bounded to the given maxSize, with new // byte arrays sized based on width. func NewBytePoolCap(maxSize uint64, width int, capwidth int) (bp *BytePoolCap) { - if capwidth > 0 && capwidth < 64 { + if capwidth <= 0 { + panic("total buffer capacity must be provided") + } + if capwidth < 64 { panic("buffer capped with smaller than 64 bytes is not supported") } - if capwidth > 0 && width > capwidth { - panic("buffer length cannot be > capacity of the buffer") + if width > capwidth { + panic("minimum buffer length cannot be > capacity of the buffer") } return &BytePoolCap{ c: make(chan []byte, maxSize), @@ -52,24 +57,35 @@ func (bp *BytePoolCap) Populate() { // Get gets a []byte from the BytePool, or creates a new one if none are // available in the pool. func (bp *BytePoolCap) Get() (b []byte) { + if bp == nil { + return nil + } select { case b = <-bp.c: // reuse existing buffer default: // create new aligned buffer - if bp.wcap > 0 { - b = reedsolomon.AllocAligned(1, bp.wcap)[0][:bp.w] - } else { - b = reedsolomon.AllocAligned(1, bp.w)[0] - } + b = reedsolomon.AllocAligned(1, bp.wcap)[0][:bp.w] } - return + return b } // Put returns the given Buffer to the BytePool. func (bp *BytePoolCap) Put(b []byte) { + if bp == nil { + return + } + + if cap(b) != bp.wcap { + // someone tried to put back buffer which is not part of this buffer pool + // we simply don't put this back into pool, a modified buffer provided + // by this package is no more usable, callers make sure to not modify + // the capacity of the buffer. + return + } + select { - case bp.c <- b: + case bp.c <- b[:bp.w]: // buffer went back into pool default: // buffer didn't go back into pool, just discard @@ -78,10 +94,24 @@ func (bp *BytePoolCap) Put(b []byte) { // Width returns the width of the byte arrays in this pool. func (bp *BytePoolCap) Width() (n int) { + if bp == nil { + return 0 + } return bp.w } // WidthCap returns the cap width of the byte arrays in this pool. func (bp *BytePoolCap) WidthCap() (n int) { + if bp == nil { + return 0 + } return bp.wcap } + +// CurrentSize returns current size of buffer pool +func (bp *BytePoolCap) CurrentSize() int { + if bp == nil { + return 0 + } + return len(bp.c) * bp.w +} diff --git a/internal/bpool/bpool_test.go b/internal/bpool/bpool_test.go index 1b122284eb09c..da673017ed04e 100644 --- a/internal/bpool/bpool_test.go +++ b/internal/bpool/bpool_test.go @@ -17,7 +17,9 @@ package bpool -import "testing" +import ( + "testing" +) // Tests - bytePool functionality. func TestBytePool(t *testing.T) { @@ -25,20 +27,20 @@ func TestBytePool(t *testing.T) { width := 1024 capWidth := 2048 - bufPool := NewBytePoolCap(size, width, capWidth) + bp := NewBytePoolCap(size, width, capWidth) // Check the width - if bufPool.Width() != width { - t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) + if bp.Width() != width { + t.Fatalf("bytepool width invalid: got %v want %v", bp.Width(), width) } // Check with width cap - if bufPool.WidthCap() != capWidth { - t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), capWidth) + if bp.WidthCap() != capWidth { + t.Fatalf("bytepool capWidth invalid: got %v want %v", bp.WidthCap(), capWidth) } // Check that retrieved buffer are of the expected width - b := bufPool.Get() + b := bp.Get() if len(b) != width { t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) } @@ -46,14 +48,14 @@ func TestBytePool(t *testing.T) { t.Fatalf("bytepool cap invalid: got %v want %v", cap(b), capWidth) } - bufPool.Put(b) + bp.Put(b) // Fill the pool beyond the capped pool size. for i := uint64(0); i < size*2; i++ { - bufPool.Put(make([]byte, bufPool.w)) + bp.Put(make([]byte, bp.w, bp.wcap)) } - b = bufPool.Get() + b = bp.Get() if len(b) != width { t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) } @@ -61,31 +63,37 @@ func TestBytePool(t *testing.T) { t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth) } - bufPool.Put(b) - - // Close the channel so we can iterate over it. - close(bufPool.c) + bp.Put(b) // Check the size of the pool. - if uint64(len(bufPool.c)) != size { - t.Fatalf("bytepool size invalid: got %v want %v", len(bufPool.c), size) + if uint64(len(bp.c)) != size { + t.Fatalf("bytepool size invalid: got %v want %v", len(bp.c), size) } - bufPoolNoCap := NewBytePoolCap(size, width, 0) - // Check the width - if bufPoolNoCap.Width() != width { - t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) + // lets drain the buf channel first before we validate invalid buffers. + for range size { + bp.Get() // discard } - // Check with width cap - if bufPoolNoCap.WidthCap() != 0 { - t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), 0) + // Try putting some invalid buffers into pool + bp.Put(make([]byte, bp.w, bp.wcap-1)) // wrong capacity is rejected (less) + bp.Put(make([]byte, bp.w, bp.wcap+1)) // wrong capacity is rejected (more) + bp.Put(make([]byte, width)) // wrong capacity is rejected (very less) + if len(bp.c) > 0 { + t.Fatal("bytepool should have rejected invalid packets") + } + + // Try putting a short slice into pool + bp.Put(make([]byte, bp.w, bp.wcap)[:2]) + if len(bp.c) != 1 { + t.Fatal("bytepool should have accepted short slice with sufficient capacity") } - b = bufPoolNoCap.Get() + + b = bp.Get() if len(b) != width { t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) } - if cap(b) != width { - t.Fatalf("bytepool length invalid: got %v want %v", cap(b), width) - } + + // Close the channel. + close(bp.c) } diff --git a/internal/kms/status-manager.go b/internal/bpool/pool.go similarity index 60% rename from internal/kms/status-manager.go rename to internal/bpool/pool.go index d005b163f3d1d..63b56eff623de 100644 --- a/internal/kms/status-manager.go +++ b/internal/bpool/pool.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2022 MinIO, Inc. +// Copyright (c) 2015-2025 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -15,18 +15,31 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package kms +package bpool -import ( - "context" +import "sync" - "github.com/minio/kms-go/kes" -) +// Pool is a single type sync.Pool with a few extra properties: +// If New is not set Get may return the zero value of T. +type Pool[T any] struct { + New func() T + p sync.Pool +} + +// Get will retuen a new T +func (p *Pool[T]) Get() T { + v, ok := p.p.Get().(T) + if ok { + return v + } + if p.New == nil { + var t T + return t + } + return p.New() +} -// StatusManager is the generic interface that handles KMS status operations -type StatusManager interface { - // Version retrieves version information - Version(ctx context.Context) (string, error) - // APIs retrieves a list of supported API endpoints - APIs(ctx context.Context) ([]kes.API, error) +// Put a used T. +func (p *Pool[T]) Put(t T) { + p.p.Put(t) } diff --git a/internal/bucket/bandwidth/monitor.go b/internal/bucket/bandwidth/monitor.go index 74814e9ad900a..b523030bc5be4 100644 --- a/internal/bucket/bandwidth/monitor.go +++ b/internal/bucket/bandwidth/monitor.go @@ -21,6 +21,7 @@ package bandwidth import ( "context" + "slices" "sync" "time" @@ -83,12 +84,7 @@ func SelectBuckets(buckets ...string) SelectionFunction { } } return func(bucket string) bool { - for _, bkt := range buckets { - if bkt == bucket { - return true - } - } - return false + return slices.Contains(buckets, bucket) } } @@ -127,7 +123,6 @@ func (m *Monitor) getReport(selectBucket SelectionFunction) *BucketBandwidthRepo } } m.tlock.RUnlock() - } return report } diff --git a/internal/bucket/bandwidth/monitor_gen.go b/internal/bucket/bandwidth/monitor_gen.go index b0852289fb7a0..4492ca2da4e55 100644 --- a/internal/bucket/bandwidth/monitor_gen.go +++ b/internal/bucket/bandwidth/monitor_gen.go @@ -1,7 +1,7 @@ -package bandwidth - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package bandwidth + import ( "github.com/tinylib/msgp/msgp" ) @@ -38,6 +38,7 @@ func (z *BucketBandwidthReport) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z BucketBandwidthReport) EncodeMsg(en *msgp.Writer) (err error) { // map header, size 0 + _ = z err = en.Append(0x80) if err != nil { return @@ -49,6 +50,7 @@ func (z BucketBandwidthReport) EncodeMsg(en *msgp.Writer) (err error) { func (z BucketBandwidthReport) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // map header, size 0 + _ = z o = append(o, 0x80) return } diff --git a/internal/bucket/bandwidth/monitor_gen_test.go b/internal/bucket/bandwidth/monitor_gen_test.go index 6d439b5fe580f..9f9c1e569d5d4 100644 --- a/internal/bucket/bandwidth/monitor_gen_test.go +++ b/internal/bucket/bandwidth/monitor_gen_test.go @@ -1,7 +1,7 @@ -package bandwidth - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package bandwidth + import ( "bytes" "testing" diff --git a/internal/bucket/bandwidth/monitor_test.go b/internal/bucket/bandwidth/monitor_test.go index fbeac14e75a87..4799ce073b3ba 100644 --- a/internal/bucket/bandwidth/monitor_test.go +++ b/internal/bucket/bandwidth/monitor_test.go @@ -99,7 +99,6 @@ func TestMonitor_GetReport(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run(tt.name, func(t *testing.T) { t.Parallel() thr := bucketThrottle{ diff --git a/internal/bucket/bandwidth/reader.go b/internal/bucket/bandwidth/reader.go index 3ec7653216c1f..30cb8559c0930 100644 --- a/internal/bucket/bandwidth/reader.go +++ b/internal/bucket/bandwidth/reader.go @@ -52,7 +52,7 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) { } if r.lastErr != nil { err = r.lastErr - return + return n, err } b := r.throttle.Burst() // maximum available tokens need := len(buf) // number of bytes requested by caller @@ -64,7 +64,6 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) { r.opts.HeaderSize = 0 need = int(math.Min(float64(b-hdr), float64(need))) // use remaining tokens towards payload tokens = need + hdr - } else { // part of header can be accommodated r.opts.HeaderSize -= b - 1 need = 1 // to ensure we read at least one byte for every Read @@ -74,19 +73,23 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) { need = int(math.Min(float64(b), float64(need))) tokens = need } - + // reduce tokens requested according to availability + av := int(r.throttle.Tokens()) + if av < tokens && av > 0 { + tokens = av + need = int(math.Min(float64(tokens), float64(need))) + } err = r.throttle.WaitN(r.ctx, tokens) if err != nil { - return + return n, err } - n, err = r.r.Read(buf[:need]) if err != nil { r.lastErr = err - return + return n, err } r.m.updateMeasurement(r.opts.BucketOptions, uint64(tokens)) - return + return n, err } // NewMonitoredReader returns reference to a monitored reader that throttles reads to configured bandwidth for the diff --git a/internal/bucket/lifecycle/action_string.go b/internal/bucket/lifecycle/action_string.go index 0a7a55eedc651..e3f11ee5940fc 100644 --- a/internal/bucket/lifecycle/action_string.go +++ b/internal/bucket/lifecycle/action_string.go @@ -16,12 +16,13 @@ func _() { _ = x[DeleteRestoredAction-5] _ = x[DeleteRestoredVersionAction-6] _ = x[DeleteAllVersionsAction-7] - _ = x[ActionCount-8] + _ = x[DelMarkerDeleteAllVersionsAction-8] + _ = x[ActionCount-9] } -const _Action_name = "NoneActionDeleteActionDeleteVersionActionTransitionActionTransitionVersionActionDeleteRestoredActionDeleteRestoredVersionActionDeleteAllVersionsActionActionCount" +const _Action_name = "NoneActionDeleteActionDeleteVersionActionTransitionActionTransitionVersionActionDeleteRestoredActionDeleteRestoredVersionActionDeleteAllVersionsActionDelMarkerDeleteAllVersionsActionActionCount" -var _Action_index = [...]uint8{0, 10, 22, 41, 57, 80, 100, 127, 150, 161} +var _Action_index = [...]uint8{0, 10, 22, 41, 57, 80, 100, 127, 150, 182, 193} func (i Action) String() string { if i < 0 || i >= Action(len(_Action_index)-1) { diff --git a/internal/bucket/lifecycle/delmarker-expiration.go b/internal/bucket/lifecycle/delmarker-expiration.go new file mode 100644 index 0000000000000..db22d2917fac1 --- /dev/null +++ b/internal/bucket/lifecycle/delmarker-expiration.go @@ -0,0 +1,74 @@ +// Copyright (c) 2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package lifecycle + +import ( + "encoding/xml" + "time" +) + +var errInvalidDaysDelMarkerExpiration = Errorf("Days must be a positive integer with DelMarkerExpiration") + +// DelMarkerExpiration used to xml encode/decode ILM action by the same name +type DelMarkerExpiration struct { + XMLName xml.Name `xml:"DelMarkerExpiration"` + Days int `xml:"Days,omitempty"` +} + +// Empty returns if a DelMarkerExpiration XML element is empty. +// Used to detect if lifecycle.Rule contained a DelMarkerExpiration element. +func (de DelMarkerExpiration) Empty() bool { + return de.Days == 0 +} + +// UnmarshalXML decodes a single XML element into a DelMarkerExpiration value +func (de *DelMarkerExpiration) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error { + type delMarkerExpiration DelMarkerExpiration + var dexp delMarkerExpiration + err := dec.DecodeElement(&dexp, &start) + if err != nil { + return err + } + + if dexp.Days <= 0 { + return errInvalidDaysDelMarkerExpiration + } + + *de = DelMarkerExpiration(dexp) + return nil +} + +// MarshalXML encodes a DelMarkerExpiration value into an XML element +func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error { + if de.Empty() { + return nil + } + + type delMarkerExpiration DelMarkerExpiration + return enc.EncodeElement(delMarkerExpiration(de), start) +} + +// NextDue returns upcoming DelMarkerExpiration date for obj if +// applicable, returns false otherwise. +func (de DelMarkerExpiration) NextDue(obj ObjectOpts) (time.Time, bool) { + if !obj.IsLatest || !obj.DeleteMarker { + return time.Time{}, false + } + + return ExpectedExpiryTime(obj.ModTime, de.Days), true +} diff --git a/internal/bucket/lifecycle/delmarker-expiration_test.go b/internal/bucket/lifecycle/delmarker-expiration_test.go new file mode 100644 index 0000000000000..8cba948c70f46 --- /dev/null +++ b/internal/bucket/lifecycle/delmarker-expiration_test.go @@ -0,0 +1,63 @@ +// Copyright (c) 2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package lifecycle + +import ( + "encoding/xml" + "fmt" + "testing" +) + +func TestDelMarkerExpParseAndValidate(t *testing.T) { + tests := []struct { + xml string + err error + }{ + { + xml: ` 1 `, + err: nil, + }, + { + xml: ` -1 `, + err: errInvalidDaysDelMarkerExpiration, + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("TestDelMarker-%d", i), func(t *testing.T) { + var dexp DelMarkerExpiration + var fail bool + err := xml.Unmarshal([]byte(test.xml), &dexp) + if test.err == nil { + if err != nil { + fail = true + } + } else { + if err == nil { + fail = true + } + if test.err.Error() != err.Error() { + fail = true + } + } + if fail { + t.Fatalf("Expected %v but got %v", test.err, err) + } + }) + } +} diff --git a/internal/bucket/lifecycle/error.go b/internal/bucket/lifecycle/error.go index 9676871a44bfc..c3a8572f08125 100644 --- a/internal/bucket/lifecycle/error.go +++ b/internal/bucket/lifecycle/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/bucket/lifecycle/evaluator.go b/internal/bucket/lifecycle/evaluator.go new file mode 100644 index 0000000000000..ec6f04e64dfbe --- /dev/null +++ b/internal/bucket/lifecycle/evaluator.go @@ -0,0 +1,156 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package lifecycle + +import ( + "fmt" + "time" + + objlock "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" +) + +// Evaluator - evaluates lifecycle policy on objects for the given lifecycle +// configuration, lock retention configuration and replication configuration. +type Evaluator struct { + policy Lifecycle + lockRetention *objlock.Retention + replCfg *replication.Config +} + +// NewEvaluator - creates a new evaluator with the given lifecycle +func NewEvaluator(policy Lifecycle) *Evaluator { + return &Evaluator{ + policy: policy, + } +} + +// WithLockRetention - sets the lock retention configuration for the evaluator +func (e *Evaluator) WithLockRetention(lr *objlock.Retention) *Evaluator { + e.lockRetention = lr + return e +} + +// WithReplicationConfig - sets the replication configuration for the evaluator +func (e *Evaluator) WithReplicationConfig(rcfg *replication.Config) *Evaluator { + e.replCfg = rcfg + return e +} + +// IsPendingReplication checks if the object is pending replication. +func (e *Evaluator) IsPendingReplication(obj ObjectOpts) bool { + if e.replCfg == nil { + return false + } + if e.replCfg.HasActiveRules(obj.Name, true) && !obj.VersionPurgeStatus.Empty() { + return true + } + + return false +} + +// IsObjectLocked checks if it is appropriate to remove an +// object according to locking configuration when this is lifecycle/ bucket quota asking. +// (copied over from enforceRetentionForDeletion) +func (e *Evaluator) IsObjectLocked(obj ObjectOpts) bool { + if e.lockRetention == nil || !e.lockRetention.LockEnabled { + return false + } + + if obj.DeleteMarker { + return false + } + + lhold := objlock.GetObjectLegalHoldMeta(obj.UserDefined) + if lhold.Status.Valid() && lhold.Status == objlock.LegalHoldOn { + return true + } + + ret := objlock.GetObjectRetentionMeta(obj.UserDefined) + if ret.Mode.Valid() && (ret.Mode == objlock.RetCompliance || ret.Mode == objlock.RetGovernance) { + t, err := objlock.UTCNowNTP() + if err != nil { + // it is safe to assume that the object is locked when + // we can't get the current time + return true + } + if ret.RetainUntilDate.After(t) { + return true + } + } + return false +} + +// eval will return a lifecycle event for each object in objs for a given time. +func (e *Evaluator) eval(objs []ObjectOpts, now time.Time) []Event { + events := make([]Event, len(objs)) + var newerNoncurrentVersions int +loop: + for i, obj := range objs { + event := e.policy.eval(obj, now, newerNoncurrentVersions) + switch event.Action { + case DeleteAllVersionsAction, DelMarkerDeleteAllVersionsAction: + // Skip if bucket has object locking enabled; To prevent the + // possibility of violating an object retention on one of the + // noncurrent versions of this object. + if e.lockRetention != nil && e.lockRetention.LockEnabled { + event = Event{} + } else { + // No need to evaluate remaining versions' lifecycle + // events after DeleteAllVersionsAction* + events[i] = event + break loop + } + + case DeleteVersionAction, DeleteRestoredVersionAction: + // Defensive code, should never happen + if obj.VersionID == "" { + event.Action = NoneAction + } + if e.IsObjectLocked(obj) { + event = Event{} + } + + if e.IsPendingReplication(obj) { + event = Event{} + } + } + if !obj.IsLatest { + switch event.Action { + case DeleteVersionAction: + // this noncurrent version will be expired, nothing to add + default: + // this noncurrent version will be spared + newerNoncurrentVersions++ + } + } + events[i] = event + } + return events +} + +// Eval will return a lifecycle event for each object in objs +func (e *Evaluator) Eval(objs []ObjectOpts) ([]Event, error) { + if len(objs) == 0 { + return nil, nil + } + if len(objs) != objs[0].NumVersions { + return nil, fmt.Errorf("number of versions mismatch, expected %d, got %d", objs[0].NumVersions, len(objs)) + } + return e.eval(objs, time.Now().UTC()), nil +} diff --git a/internal/bucket/lifecycle/evaluator_test.go b/internal/bucket/lifecycle/evaluator_test.go new file mode 100644 index 0000000000000..8225fcd68c229 --- /dev/null +++ b/internal/bucket/lifecycle/evaluator_test.go @@ -0,0 +1,183 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package lifecycle + +import ( + "fmt" + "testing" + "time" + + "github.com/google/uuid" +) + +func TestNewerNoncurrentVersions(t *testing.T) { + prepLifecycleCfg := func(tagKeys []string, retainVersions []int) Lifecycle { + var lc Lifecycle + for i := range retainVersions { + ruleID := fmt.Sprintf("rule-%d", i) + tag := Tag{ + Key: tagKeys[i], + Value: "minio", + } + lc.Rules = append(lc.Rules, Rule{ + ID: ruleID, + Status: "Enabled", + Filter: Filter{ + Tag: tag, + set: true, + }, + NoncurrentVersionExpiration: NoncurrentVersionExpiration{ + NewerNoncurrentVersions: retainVersions[i], + set: true, + }, + }) + } + return lc + } + + lc := prepLifecycleCfg([]string{"tag3", "tag4", "tag5"}, []int{3, 4, 5}) + evaluator := NewEvaluator(lc) + tagKeys := []string{"tag3", "tag3", "tag3", "tag4", "tag4", "tag5", "tag5"} + verIDs := []string{ + "0NdAikoUVNGEpCUuB9vl.XyoMftMXCSg", "19M6Z405yFZuYygnnU9jKzsOBamTZK_7", "0PmlJdFWi_9d6l_dAkWrrhP.bBgtFk6V", // spellchecker:disable-line + ".MmRalFNNJyOLymgCtQ3.qsdoYpy8qkB", "Bjb4OlMW9Agx.Nrggh15iU6frGu2CLde", "ngBmUd_cVl6ckONI9XsKGpJjzimohrzZ", // spellchecker:disable-line + "T6m1heTHLUtnByW2IOWJ3zM4JP9xXt2O", // spellchecker:disable-line + } + wantEvents := []Event{ + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: DeleteVersionAction}, + } + var objs []ObjectOpts + curModTime := time.Date(2025, time.February, 10, 23, 0, 0, 0, time.UTC) + for i := range tagKeys { + obj := ObjectOpts{ + Name: "obj", + VersionID: verIDs[i], + ModTime: curModTime.Add(time.Duration(-i) * time.Second), + UserTags: fmt.Sprintf("%s=minio", tagKeys[i]), + NumVersions: len(verIDs), + } + if i == 0 { + obj.IsLatest = true + } else { + obj.SuccessorModTime = curModTime.Add(time.Duration(-i+1) * time.Second) + } + objs = append(objs, obj) + } + now := time.Date(2025, time.February, 10, 23, 0, 0, 0, time.UTC) + gotEvents := evaluator.eval(objs, now) + for i := range wantEvents { + if gotEvents[i].Action != wantEvents[i].Action { + t.Fatalf("got %v, want %v", gotEvents[i], wantEvents[i]) + } + } + + lc = prepLifecycleCfg([]string{"tag3", "tag4", "tag5"}, []int{1, 2, 3}) + objs = objs[:len(objs)-1] + wantEvents = []Event{ + {Action: NoneAction}, + {Action: NoneAction}, + {Action: DeleteVersionAction}, + {Action: NoneAction}, + {Action: DeleteVersionAction}, + {Action: NoneAction}, + } + evaluator = NewEvaluator(lc) + gotEvents = evaluator.eval(objs, now) + for i := range wantEvents { + if gotEvents[i].Action != wantEvents[i].Action { + t.Fatalf("test-%d: got %v, want %v", i+1, gotEvents[i], wantEvents[i]) + } + } + + lc = Lifecycle{ + Rules: []Rule{ + { + ID: "AllVersionsExpiration", + Status: "Enabled", + Filter: Filter{}, + Expiration: Expiration{ + Days: 1, + DeleteAll: Boolean{ + val: true, + set: true, + }, + set: true, + }, + }, + }, + } + + now = time.Date(2025, time.February, 12, 23, 0, 0, 0, time.UTC) + evaluator = NewEvaluator(lc) + gotEvents = evaluator.eval(objs, now) + wantEvents = []Event{ + {Action: DeleteAllVersionsAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + {Action: NoneAction}, + } + for i := range wantEvents { + if gotEvents[i].Action != wantEvents[i].Action { + t.Fatalf("test-%d: got %v, want %v", i+1, gotEvents[i], wantEvents[i]) + } + } + + // Test with zero versions + events, err := evaluator.Eval(nil) + if len(events) != 0 || err != nil { + t.Fatal("expected no events nor error") + } +} + +func TestEmptyEvaluator(t *testing.T) { + var objs []ObjectOpts + curModTime := time.Date(2025, time.February, 10, 23, 0, 0, 0, time.UTC) + for i := range 5 { + obj := ObjectOpts{ + Name: "obj", + VersionID: uuid.New().String(), + ModTime: curModTime.Add(time.Duration(-i) * time.Second), + NumVersions: 5, + } + if i == 0 { + obj.IsLatest = true + } else { + obj.SuccessorModTime = curModTime.Add(time.Duration(-i+1) * time.Second) + } + objs = append(objs, obj) + } + + evaluator := NewEvaluator(Lifecycle{}) + events, err := evaluator.Eval(objs) + if err != nil { + t.Fatal(err) + } + for _, event := range events { + if event.Action != NoneAction { + t.Fatalf("got %v, want %v", event.Action, NoneAction) + } + } +} diff --git a/internal/bucket/lifecycle/expiration.go b/internal/bucket/lifecycle/expiration.go index 16a5fd14b2750..8acc203986e29 100644 --- a/internal/bucket/lifecycle/expiration.go +++ b/internal/bucket/lifecycle/expiration.go @@ -79,10 +79,10 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start return errLifecycleInvalidDate } // Allow only date timestamp specifying midnight GMT - hr, min, sec := expDate.Clock() + hr, m, sec := expDate.Clock() nsec := expDate.Nanosecond() loc := expDate.Location() - if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() { return errLifecycleDateNotMidnight } @@ -93,7 +93,7 @@ func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if eDate.Time.IsZero() { + if eDate.IsZero() { return nil } return e.EncodeElement(eDate.Format(time.RFC3339), startElement) @@ -202,7 +202,7 @@ func (e Expiration) IsDaysNull() bool { // IsDateNull returns true if date field is null func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() + return e.Date.IsZero() } // IsNull returns true if both date and days fields are null diff --git a/internal/bucket/lifecycle/filter.go b/internal/bucket/lifecycle/filter.go index 446397a50e1ea..6e605d3e321cb 100644 --- a/internal/bucket/lifecycle/filter.go +++ b/internal/bucket/lifecycle/filter.go @@ -49,6 +49,10 @@ type Filter struct { // MarshalXML - produces the xml representation of the Filter struct // only one of Prefix, And and Tag should be present in the output. func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if !f.set { + return nil + } + if err := e.EncodeToken(start); err != nil { return err } diff --git a/internal/bucket/lifecycle/lifecycle.go b/internal/bucket/lifecycle/lifecycle.go index 788027a778881..97c4200f27762 100644 --- a/internal/bucket/lifecycle/lifecycle.go +++ b/internal/bucket/lifecycle/lifecycle.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -22,11 +22,13 @@ import ( "fmt" "io" "net/http" - "sort" + "slices" "strings" "time" "github.com/google/uuid" + "github.com/minio/minio/internal/bucket/object/lock" + "github.com/minio/minio/internal/bucket/replication" xhttp "github.com/minio/minio/internal/http" ) @@ -35,6 +37,7 @@ var ( errLifecycleNoRule = Errorf("Lifecycle configuration should have at least one rule") errLifecycleDuplicateID = Errorf("Rule ID must be unique. Found same ID for more than one rule") errXMLNotWellFormed = Errorf("The XML you provided was not well-formed or did not validate against our published schema") + errLifecycleBucketLocked = Errorf("ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an object locked bucket") ) const ( @@ -67,7 +70,8 @@ const ( DeleteRestoredVersionAction // DeleteAllVersionsAction deletes all versions when an object expires DeleteAllVersionsAction - + // DelMarkerDeleteAllVersionsAction deletes all versions when an object with delete marker as latest version expires + DelMarkerDeleteAllVersionsAction // ActionCount must be the last action and shouldn't be used as a regular action. ActionCount ) @@ -84,7 +88,7 @@ func (a Action) DeleteVersioned() bool { // DeleteAll - Returns true if the action demands deleting all versions of an object func (a Action) DeleteAll() bool { - return a == DeleteAllVersionsAction + return a == DeleteAllVersionsAction || a == DelMarkerDeleteAllVersionsAction } // Delete - Returns true if action demands delete on all objects (including restored) @@ -92,7 +96,7 @@ func (a Action) Delete() bool { if a.DeleteRestored() { return true } - return a == DeleteVersionAction || a == DeleteAction || a == DeleteAllVersionsAction + return a == DeleteVersionAction || a == DeleteAction || a == DeleteAllVersionsAction || a == DelMarkerDeleteAllVersionsAction } // Lifecycle - Configuration for bucket lifecycle. @@ -204,7 +208,6 @@ func (lc Lifecycle) HasActiveRules(prefix string) bool { if !rule.Transition.IsNull() { // this allows for Transition.Days to be zero. return true } - } return false } @@ -235,7 +238,7 @@ func ParseLifecycleConfig(reader io.Reader) (*Lifecycle, error) { } // Validate - validates the lifecycle configuration -func (lc Lifecycle) Validate() error { +func (lc Lifecycle) Validate(lr lock.Retention) error { // Lifecycle config can't have more than 1000 rules if len(lc.Rules) > 1000 { return errLifecycleTooManyRules @@ -250,6 +253,9 @@ func (lc Lifecycle) Validate() error { if err := r.Validate(); err != nil { return err } + if lr.LockEnabled && (r.Expiration.DeleteAll.val || !r.DelMarkerExpiration.Empty()) { + return errLifecycleBucketLocked + } } // Make sure Rule ID is unique for i := range lc.Rules { @@ -279,7 +285,7 @@ func (lc Lifecycle) FilterRules(obj ObjectOpts) []Rule { if !strings.HasPrefix(obj.Name, rule.GetPrefix()) { continue } - if !obj.DeleteMarker && !rule.Filter.TestTags(obj.UserTags) { + if !rule.Filter.TestTags(obj.UserTags) { continue } if !obj.DeleteMarker && !rule.Filter.BySize(obj.Size) { @@ -305,6 +311,10 @@ type ObjectOpts struct { TransitionStatus string RestoreOngoing bool RestoreExpires time.Time + // to determine if object is locked due to retention + UserDefined map[string]string + VersionPurgeStatus replication.VersionPurgeStatusType + ReplicationStatus replication.StatusType } // ExpiredObjectDeleteMarker returns true if an object version referred to by o @@ -326,12 +336,12 @@ type Event struct { // Eval returns the lifecycle event applicable now. func (lc Lifecycle) Eval(obj ObjectOpts) Event { - return lc.eval(obj, time.Now().UTC()) + return lc.eval(obj, time.Now().UTC(), 0) } // eval returns the lifecycle event applicable at the given now. If now is the // zero value of time.Time, it returns the upcoming lifecycle event. -func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event { +func (lc Lifecycle) eval(obj ObjectOpts, now time.Time, remainingVersions int) Event { var events []Event if obj.ModTime.IsZero() { return Event{} @@ -353,23 +363,6 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event { } for _, rule := range lc.FilterRules(obj) { - if obj.IsLatest && rule.Expiration.DeleteAll.val { - if !rule.Expiration.IsDaysNull() { - // Specifying the Days tag will automatically perform all versions cleanup - // once the latest object is old enough to satisfy the age criteria. - // This is a MinIO only extension. - if expectedExpiry := ExpectedExpiryTime(obj.ModTime, int(rule.Expiration.Days)); now.IsZero() || now.After(expectedExpiry) { - events = append(events, Event{ - Action: DeleteAllVersionsAction, - RuleID: rule.ID, - Due: expectedExpiry, - }) - // No other conflicting actions apply to an all version expired object. - break - } - } - } - if obj.ExpiredObjectDeleteMarker() { if rule.Expiration.DeleteMarker.val { // Indicates whether MinIO will remove a delete marker with no noncurrent versions. @@ -401,17 +394,37 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event { } } - // Skip rules with newer noncurrent versions specified. These rules are - // not handled at an individual version level. eval applies only to a - // specific version. - if !obj.IsLatest && rule.NoncurrentVersionExpiration.NewerNoncurrentVersions > 0 { + // DelMarkerExpiration + if obj.IsLatest && obj.DeleteMarker && !rule.DelMarkerExpiration.Empty() { + if due, ok := rule.DelMarkerExpiration.NextDue(obj); ok && (now.IsZero() || now.After(due)) { + events = append(events, Event{ + Action: DelMarkerDeleteAllVersionsAction, + RuleID: rule.ID, + Due: due, + }) + } + // No other conflicting actions in this rule can apply to an object with current version as DEL marker + // Note: There could be other rules with earlier expiration which need to be considered. + // See TestDelMarkerExpiration continue } - if !obj.IsLatest && !rule.NoncurrentVersionExpiration.IsDaysNull() { - // Non current versions should be deleted if their age exceeds non current days configuration - // https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions - if expectedExpiry := ExpectedExpiryTime(obj.SuccessorModTime, int(rule.NoncurrentVersionExpiration.NoncurrentDays)); now.IsZero() || now.After(expectedExpiry) { + // NoncurrentVersionExpiration + if !obj.IsLatest && rule.NoncurrentVersionExpiration.set { + var ( + retainedEnough bool + oldEnough bool + ) + if rule.NoncurrentVersionExpiration.NewerNoncurrentVersions == 0 || remainingVersions >= rule.NoncurrentVersionExpiration.NewerNoncurrentVersions { + retainedEnough = true + } + expectedExpiry := ExpectedExpiryTime(obj.SuccessorModTime, int(rule.NoncurrentVersionExpiration.NoncurrentDays)) + if now.IsZero() || now.After(expectedExpiry) { + oldEnough = true + } + // > For the deletion to occur, both the and the values must be exceeded. + // ref: https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions + if retainedEnough && oldEnough { events = append(events, Event{ Action: DeleteVersionAction, RuleID: rule.ID, @@ -448,11 +461,17 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event { } case !rule.Expiration.IsDaysNull(): if expectedExpiry := ExpectedExpiryTime(obj.ModTime, int(rule.Expiration.Days)); now.IsZero() || now.After(expectedExpiry) { - events = append(events, Event{ + event := Event{ Action: DeleteAction, RuleID: rule.ID, Due: expectedExpiry, - }) + } + if rule.Expiration.DeleteAll.val { + // Expires all versions of this object once the latest object is old enough. + // This is a MinIO only extension. + event.Action = DeleteAllVersionsAction + } + events = append(events, event) } } @@ -470,25 +489,30 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event { } if len(events) > 0 { - sort.Slice(events, func(i, j int) bool { + slices.SortFunc(events, func(a, b Event) int { // Prefer Expiration over Transition for both current // and noncurrent versions when, // - now is past the expected time to action // - expected time to action is the same for both actions - if now.After(events[i].Due) && now.After(events[j].Due) || events[i].Due.Equal(events[j].Due) { - switch events[i].Action { - case DeleteAction, DeleteVersionAction: - return true + if now.After(a.Due) && now.After(b.Due) || a.Due.Equal(b.Due) { + switch a.Action { + case DeleteAllVersionsAction, DelMarkerDeleteAllVersionsAction, + DeleteAction, DeleteVersionAction: + return -1 } - switch events[j].Action { - case DeleteAction, DeleteVersionAction: - return false + switch b.Action { + case DeleteAllVersionsAction, DelMarkerDeleteAllVersionsAction, + DeleteAction, DeleteVersionAction: + return 1 } - return true + return -1 } // Prefer earlier occurring event - return events[i].Due.Before(events[j].Due) + if a.Due.Before(b.Due) { + return -1 + } + return 1 }) return events[0] } @@ -515,9 +539,9 @@ func ExpectedExpiryTime(modTime time.Time, days int) time.Time { // SetPredictionHeaders sets time to expiry and transition headers on w for a // given obj. func (lc Lifecycle) SetPredictionHeaders(w http.ResponseWriter, obj ObjectOpts) { - event := lc.eval(obj, time.Time{}) + event := lc.eval(obj, time.Time{}, 0) switch event.Action { - case DeleteAction, DeleteVersionAction, DeleteAllVersionsAction: + case DeleteAction, DeleteVersionAction, DeleteAllVersionsAction, DelMarkerDeleteAllVersionsAction: w.Header()[xhttp.AmzExpiration] = []string{ fmt.Sprintf(`expiry-date="%s", rule-id="%s"`, event.Due.Format(http.TimeFormat), event.RuleID), } diff --git a/internal/bucket/lifecycle/lifecycle_test.go b/internal/bucket/lifecycle/lifecycle_test.go index 0e8aaa79ad5fe..2af5556edfa6c 100644 --- a/internal/bucket/lifecycle/lifecycle_test.go +++ b/internal/bucket/lifecycle/lifecycle_test.go @@ -30,6 +30,7 @@ import ( "github.com/dustin/go-humanize" "github.com/google/uuid" + "github.com/minio/minio/internal/bucket/object/lock" xhttp "github.com/minio/minio/internal/http" ) @@ -38,6 +39,7 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) { inputConfig string expectedParsingErr error expectedValidationErr error + lr lock.Retention }{ { // Valid lifecycle config inputConfig: ` @@ -61,6 +63,51 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) { expectedParsingErr: nil, expectedValidationErr: nil, }, + { // Using ExpiredObjectAllVersions element with an object locked bucket + inputConfig: ` + + ExpiredObjectAllVersions with object locking + + prefix + + Enabled + + 3 + true + + + `, + expectedParsingErr: nil, + expectedValidationErr: errLifecycleBucketLocked, + lr: lock.Retention{ + LockEnabled: true, + }, + }, + { // Using DelMarkerExpiration action with an object locked bucket + inputConfig: ` + + DeleteMarkerExpiration with object locking + + prefix + + Enabled + + 3 + + + `, + expectedParsingErr: nil, + expectedValidationErr: errLifecycleBucketLocked, + lr: lock.Retention{ + LockEnabled: true, + }, + }, + { // lifecycle config with no rules + inputConfig: ` + `, + expectedParsingErr: nil, + expectedValidationErr: errLifecycleNoRule, + }, { // Valid lifecycle config inputConfig: ` @@ -73,11 +120,15 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) { expectedParsingErr: errDuplicatedXMLTag, expectedValidationErr: nil, }, - { // lifecycle config with no rules + { // lifecycle config without prefixes inputConfig: ` + + 3 + Enabled + `, expectedParsingErr: nil, - expectedValidationErr: errLifecycleNoRule, + expectedValidationErr: nil, }, { // lifecycle config with rules having overlapping prefix inputConfig: `rule1Enabled/a/b3rule2Enabled/a/b/ckey1val13 `, @@ -115,10 +166,22 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) { }, // Lifecycle with max noncurrent versions { - inputConfig: `rule>Enabled5`, + inputConfig: `ruleEnabled5`, + expectedParsingErr: nil, + expectedValidationErr: nil, + }, + // Lifecycle with delmarker expiration + { + inputConfig: `ruleEnabled5`, expectedParsingErr: nil, expectedValidationErr: nil, }, + // Lifecycle with empty delmarker expiration + { + inputConfig: `ruleEnabled`, + expectedParsingErr: errInvalidDaysDelMarkerExpiration, + expectedValidationErr: nil, + }, } for i, tc := range testCases { @@ -132,7 +195,7 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) { // no need to continue this test. return } - err = lc.Validate() + err = lc.Validate(tc.lr) if err != tc.expectedValidationErr { t.Fatalf("%d: Expected %v during validation but got %v", i+1, tc.expectedValidationErr, err) } @@ -228,7 +291,8 @@ func TestEval(t *testing.T) { objectName string objectTags string objectModTime time.Time - isExpiredDelMarker bool + isDelMarker bool + hasManyVersions bool expectedAction Action isNoncurrent bool objectSuccessorModTime time.Time @@ -383,36 +447,52 @@ func TestEval(t *testing.T) { }, // Should delete expired delete marker right away { - inputConfig: `trueEnabled`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-1 * time.Hour), // Created one hour ago - isExpiredDelMarker: true, - expectedAction: DeleteVersionAction, + inputConfig: `trueEnabled`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-1 * time.Hour), // Created one hour ago + isDelMarker: true, + expectedAction: DeleteVersionAction, }, - // Should delete expired object right away with 1 day expiration + // Should not expire a delete marker; ExpiredObjectDeleteAllVersions applies only when current version is not a DEL marker. + { + inputConfig: `1trueEnabled`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago + isDelMarker: true, + hasManyVersions: true, + expectedAction: NoneAction, + }, + // Should delete all versions of this object since the latest version has past the expiry days criteria { - inputConfig: `1trueEnabled`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago - isExpiredDelMarker: true, - expectedAction: DeleteAllVersionsAction, + inputConfig: `1trueEnabled`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago + hasManyVersions: true, + expectedAction: DeleteAllVersionsAction, + }, + // TransitionAction applies since object doesn't meet the age criteria for DeleteAllVersions + { + inputConfig: `30true10WARM-1Enabled`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-11 * 24 * time.Hour), // Created 11 days ago + hasManyVersions: true, + expectedAction: TransitionAction, }, - // Should not delete expired marker if its time has not come yet { - inputConfig: `Enabled1`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-12 * time.Hour), // Created 12 hours ago - isExpiredDelMarker: true, - expectedAction: NoneAction, + inputConfig: `Enabled1`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-12 * time.Hour), // Created 12 hours ago + isDelMarker: true, + expectedAction: NoneAction, }, // Should delete expired marker since its time has come { - inputConfig: `Enabled1`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago - isExpiredDelMarker: true, - expectedAction: DeleteVersionAction, + inputConfig: `Enabled1`, + objectName: "foodir/fooobject", + objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago + isDelMarker: true, + expectedAction: DeleteVersionAction, }, // Should transition immediately when Transition days is zero { @@ -579,25 +659,104 @@ func TestEval(t *testing.T) { objectSuccessorModTime: time.Now().UTC().Add(-90 * 24 * time.Hour), expectedAction: DeleteVersionAction, }, + { + // DelMarkerExpiration is preferred since object age is past both transition and expiration days. + inputConfig: ` + + DelMarkerExpiration with Transition + + Enabled + + 60 + + + WARM-1 + 30 + + + `, + objectName: "obj-1", + objectModTime: time.Now().UTC().Add(-90 * 24 * time.Hour), + isDelMarker: true, + expectedAction: DelMarkerDeleteAllVersionsAction, + }, + { + // NoneAction since object doesn't qualify for DelMarkerExpiration yet. + // Note: TransitionAction doesn't apply to DEL marker + inputConfig: ` + + DelMarkerExpiration with Transition + + Enabled + + 60 + + + WARM-1 + 30 + + + `, + objectName: "obj-1", + objectModTime: time.Now().UTC().Add(-50 * 24 * time.Hour), + isDelMarker: true, + expectedAction: NoneAction, + }, + { + inputConfig: ` + + DelMarkerExpiration with non DEL-marker object + + Enabled + + 60 + + + `, + objectName: "obj-1", + objectModTime: time.Now().UTC().Add(-90 * 24 * time.Hour), + expectedAction: NoneAction, + }, + { + inputConfig: ` + + DelMarkerExpiration with noncurrent DEL-marker + + Enabled + + 60 + + + `, + objectName: "obj-1", + objectModTime: time.Now().UTC().Add(-90 * 24 * time.Hour), + objectSuccessorModTime: time.Now().UTC().Add(-60 * 24 * time.Hour), + isDelMarker: true, + isNoncurrent: true, + expectedAction: NoneAction, + }, } for _, tc := range testCases { - tc := tc t.Run("", func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { t.Fatalf("Got unexpected error: %v", err) } - if res := lc.Eval(ObjectOpts{ + opts := ObjectOpts{ Name: tc.objectName, UserTags: tc.objectTags, ModTime: tc.objectModTime, - DeleteMarker: tc.isExpiredDelMarker, - NumVersions: 1, + DeleteMarker: tc.isDelMarker, IsLatest: !tc.isNoncurrent, SuccessorModTime: tc.objectSuccessorModTime, VersionID: tc.versionID, - }); res.Action != tc.expectedAction { + } + opts.NumVersions = 1 + if tc.hasManyVersions { + opts.NumVersions = 2 // at least one noncurrent version + } + if res := lc.Eval(opts); res.Action != tc.expectedAction { t.Fatalf("Expected action: `%v`, got: `%v`", tc.expectedAction, res.Action) } }) @@ -663,14 +822,13 @@ func TestHasActiveRules(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { t.Fatalf("Got unexpected error: %v", err) } // To ensure input lifecycle configurations are valid - if err := lc.Validate(); err != nil { + if err := lc.Validate(lock.Retention{}); err != nil { t.Fatalf("Invalid test case: %d %v", i+1, err) } if got := lc.HasActiveRules(tc.prefix); got != tc.want { @@ -800,7 +958,9 @@ func TestTransitionTier(t *testing.T) { // Go back seven days in the past now = now.Add(7 * 24 * time.Hour) - evt := lc.eval(obj1, now) + evaluator := NewEvaluator(lc) + evts := evaluator.eval([]ObjectOpts{obj1, obj2}, now) + evt := evts[0] if evt.Action != TransitionAction { t.Fatalf("Expected action: %s but got %s", TransitionAction, evt.Action) } @@ -808,7 +968,7 @@ func TestTransitionTier(t *testing.T) { t.Fatalf("Expected TIER-1 but got %s", evt.StorageClass) } - evt = lc.eval(obj2, now) + evt = evts[1] if evt.Action != TransitionVersionAction { t.Fatalf("Expected action: %s but got %s", TransitionVersionAction, evt.Action) } @@ -876,14 +1036,16 @@ func TestTransitionTierWithPrefixAndTags(t *testing.T) { // Go back seven days in the past now = now.Add(7 * 24 * time.Hour) + evaluator := NewEvaluator(lc) + evts := evaluator.eval([]ObjectOpts{obj1, obj2, obj3}, now) // Eval object 1 - evt := lc.eval(obj1, now) + evt := evts[0] if evt.Action != NoneAction { t.Fatalf("Expected action: %s but got %s", NoneAction, evt.Action) } // Eval object 2 - evt = lc.eval(obj2, now) + evt = evts[1] if evt.Action != TransitionAction { t.Fatalf("Expected action: %s but got %s", TransitionAction, evt.Action) } @@ -892,7 +1054,7 @@ func TestTransitionTierWithPrefixAndTags(t *testing.T) { } // Eval object 3 - evt = lc.eval(obj3, now) + evt = evts[2] if evt.Action != TransitionAction { t.Fatalf("Expected action: %s but got %s", TransitionAction, evt.Action) } @@ -1160,7 +1322,7 @@ func TestFilterRules(t *testing.T) { opts ObjectOpts hasRules bool }{ - { // Delete marker should match filter without tags + { // Delete marker shouldn't match filter without tags lc: Lifecycle{ Rules: []Rule{ rules[0], @@ -1171,7 +1333,7 @@ func TestFilterRules(t *testing.T) { IsLatest: true, Name: "obj-1", }, - hasRules: true, + hasRules: false, }, { // PUT version with no matching tags lc: Lifecycle{ @@ -1256,7 +1418,7 @@ func TestFilterRules(t *testing.T) { for i, tc := range tests { t.Run(fmt.Sprintf("test-%d", i+1), func(t *testing.T) { - if err := tc.lc.Validate(); err != nil { + if err := tc.lc.Validate(lock.Retention{}); err != nil { t.Fatalf("Lifecycle validation failed - %v", err) } rules := tc.lc.FilterRules(tc.opts) @@ -1269,3 +1431,90 @@ func TestFilterRules(t *testing.T) { }) } } + +// TestDeleteAllVersions tests ordering among events, especially ones which +// expire all versions like ExpiredObjectDeleteAllVersions and +// DelMarkerExpiration +func TestDeleteAllVersions(t *testing.T) { + // ExpiredObjectDeleteAllVersions + lc := Lifecycle{ + Rules: []Rule{ + { + ID: "ExpiredObjectDeleteAllVersions-20", + Status: "Enabled", + Expiration: Expiration{ + set: true, + DeleteAll: Boolean{val: true, set: true}, + Days: 20, + }, + }, + { + ID: "Transition-10", + Status: "Enabled", + Transition: Transition{ + set: true, + StorageClass: "WARM-1", + Days: 10, + }, + }, + }, + } + opts := ObjectOpts{ + Name: "foo.txt", + ModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // created 10 days ago + Size: 0, + VersionID: uuid.New().String(), + IsLatest: true, + NumVersions: 4, + } + + evaluator := NewEvaluator(lc) + events := evaluator.eval([]ObjectOpts{opts}, time.Time{}) + event := events[0] + if event.Action != TransitionAction { + t.Fatalf("Expected %v action but got %v", TransitionAction, event.Action) + } + // The earlier upcoming lifecycle event must be picked, i.e rule with id "Transition-10" + if exp := ExpectedExpiryTime(opts.ModTime, 10); exp != event.Due { + t.Fatalf("Expected due %v but got %v, ruleID=%v", exp, event.Due, event.RuleID) + } + + // DelMarkerExpiration + lc = Lifecycle{ + Rules: []Rule{ + { + ID: "delmarker-exp-20", + Status: "Enabled", + DelMarkerExpiration: DelMarkerExpiration{ + Days: 20, + }, + }, + { + ID: "delmarker-exp-10", + Status: "Enabled", + DelMarkerExpiration: DelMarkerExpiration{ + Days: 10, + }, + }, + }, + } + opts = ObjectOpts{ + Name: "foo.txt", + ModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // created 10 days ago + Size: 0, + VersionID: uuid.New().String(), + IsLatest: true, + DeleteMarker: true, + NumVersions: 4, + } + evaluator = NewEvaluator(lc) + events = evaluator.eval([]ObjectOpts{opts}, time.Time{}) + event = events[0] + if event.Action != DelMarkerDeleteAllVersionsAction { + t.Fatalf("Expected %v action but got %v", DelMarkerDeleteAllVersionsAction, event.Action) + } + // The earlier upcoming lifecycle event must be picked, i.e rule with id "delmarker-exp-10" + if exp := ExpectedExpiryTime(opts.ModTime, 10); exp != event.Due { + t.Fatalf("Expected due %v but got %v, ruleID=%v", exp, event.Due, event.RuleID) + } +} diff --git a/internal/bucket/lifecycle/rule.go b/internal/bucket/lifecycle/rule.go index 147b0d6ebc9c4..2fb006066ca05 100644 --- a/internal/bucket/lifecycle/rule.go +++ b/internal/bucket/lifecycle/rule.go @@ -33,22 +33,24 @@ const ( // Rule - a rule for lifecycle configuration. type Rule struct { - XMLName xml.Name `xml:"Rule"` - ID string `xml:"ID,omitempty"` - Status Status `xml:"Status"` - Filter Filter `xml:"Filter,omitempty"` - Prefix Prefix `xml:"Prefix,omitempty"` - Expiration Expiration `xml:"Expiration,omitempty"` - Transition Transition `xml:"Transition,omitempty"` + XMLName xml.Name `xml:"Rule"` + ID string `xml:"ID,omitempty"` + Status Status `xml:"Status"` + Filter Filter `xml:"Filter,omitempty"` + Prefix Prefix `xml:"Prefix,omitempty"` + Expiration Expiration `xml:"Expiration,omitempty"` + Transition Transition `xml:"Transition,omitempty"` + DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty"` // FIXME: add a type to catch unsupported AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"` NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` } var ( - errInvalidRuleID = Errorf("ID length is limited to 255 characters") - errEmptyRuleStatus = Errorf("Status should not be empty") - errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled") + errInvalidRuleID = Errorf("ID length is limited to 255 characters") + errEmptyRuleStatus = Errorf("Status should not be empty") + errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled") + errInvalidRuleDelMarkerExpiration = Errorf("Rule with DelMarkerExpiration cannot have tags based filtering") ) // validateID - checks if ID is valid or not. @@ -82,10 +84,21 @@ func (r Rule) validateNoncurrentExpiration() error { } func (r Rule) validatePrefixAndFilter() error { - if !r.Prefix.set && r.Filter.IsEmpty() || r.Prefix.set && !r.Filter.IsEmpty() { + // In the now deprecated PutBucketLifecycle API, Rule had a mandatory Prefix element and there existed no Filter field. + // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html + // In the newer PutBucketLifecycleConfiguration API, Rule has a prefix field that is deprecated, and there exists an optional + // Filter field, and within it, an optional Prefix field. + // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html + // A valid rule could be a pre-existing one created using the now deprecated PutBucketLifecycle. + // Or, a valid rule could also be either a pre-existing or a new rule that is created using PutBucketLifecycleConfiguration. + // Prefix validation below may check that either Rule.Prefix or Rule.Filter.Prefix exist but not both. + // Here, we assume the pre-existing rule created using PutBucketLifecycle API is already valid and won't fail the validation if Rule.Prefix is empty. + + if r.Prefix.set && !r.Filter.IsEmpty() && r.Filter.Prefix.set { return errXMLNotWellFormed } - if !r.Prefix.set { + + if r.Filter.set { return r.Filter.Validate() } return nil @@ -158,7 +171,10 @@ func (r Rule) Validate() error { if err := r.validateNoncurrentTransition(); err != nil { return err } - if !r.Expiration.set && !r.Transition.set && !r.NoncurrentVersionExpiration.set && !r.NoncurrentVersionTransition.set { + if (!r.Filter.Tag.IsEmpty() || len(r.Filter.And.Tags) != 0) && !r.DelMarkerExpiration.Empty() { + return errInvalidRuleDelMarkerExpiration + } + if !r.Expiration.set && !r.Transition.set && !r.NoncurrentVersionExpiration.set && !r.NoncurrentVersionTransition.set && r.DelMarkerExpiration.Empty() { return errXMLNotWellFormed } return nil diff --git a/internal/bucket/lifecycle/rule_test.go b/internal/bucket/lifecycle/rule_test.go index 94b9bccd348e6..f6f139174356d 100644 --- a/internal/bucket/lifecycle/rule_test.go +++ b/internal/bucket/lifecycle/rule_test.go @@ -105,6 +105,31 @@ func TestInvalidRules(t *testing.T) { `, expectedErr: errXMLNotWellFormed, }, + { + inputXML: ` + Rule with a tag and DelMarkerExpiration + k1v1 + + 365 + + Enabled + `, + expectedErr: errInvalidRuleDelMarkerExpiration, + }, + { + inputXML: ` + Rule with multiple tags and DelMarkerExpiration + + k1v1 + k2v2 + + + 365 + + Enabled + `, + expectedErr: errInvalidRuleDelMarkerExpiration, + }, } for i, tc := range invalidTestCases { diff --git a/internal/bucket/lifecycle/transition.go b/internal/bucket/lifecycle/transition.go index 948510d017d2c..397f4c0151488 100644 --- a/internal/bucket/lifecycle/transition.go +++ b/internal/bucket/lifecycle/transition.go @@ -50,10 +50,10 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start return errTransitionInvalidDate } // Allow only date timestamp specifying midnight GMT - hr, min, sec := trnDate.Clock() + hr, m, sec := trnDate.Clock() nsec := trnDate.Nanosecond() loc := trnDate.Location() - if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { + if hr != 0 || m != 0 || sec != 0 || nsec != 0 || loc.String() != time.UTC.String() { return errTransitionDateNotMidnight } @@ -64,7 +64,7 @@ func (tDate *TransitionDate) UnmarshalXML(d *xml.Decoder, startElement xml.Start // MarshalXML encodes expiration date if it is non-zero and encodes // empty string otherwise func (tDate TransitionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if tDate.Time.IsZero() { + if tDate.IsZero() { return nil } return e.EncodeElement(tDate.Format(time.RFC3339), startElement) @@ -151,7 +151,7 @@ func (t Transition) Validate() error { // IsDateNull returns true if date field is null func (t Transition) IsDateNull() bool { - return t.Date.Time.IsZero() + return t.Date.IsZero() } // IsNull returns true if both date and days fields are null diff --git a/internal/bucket/object/lock/lock.go b/internal/bucket/object/lock/lock.go index bce57d949a737..410011b96427e 100644 --- a/internal/bucket/object/lock/lock.go +++ b/internal/bucket/object/lock/lock.go @@ -24,6 +24,7 @@ import ( "errors" "fmt" "io" + "maps" "net/http" "net/textproto" "strings" @@ -34,9 +35,17 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) +const ( + logSubsys = "locking" +) + +func lockLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, logSubsys, err) +} + // Enabled indicates object locking is enabled const Enabled = "Enabled" @@ -153,7 +162,7 @@ type Retention struct { func (r Retention) Retain(created time.Time) bool { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) // Retain return true } @@ -229,6 +238,25 @@ type Config struct { } `xml:"Rule,omitempty"` } +// String returns the human readable format of object lock configuration, used in audit logs. +func (config Config) String() string { + parts := []string{ + fmt.Sprintf("Enabled: %v", config.Enabled()), + } + if config.Rule != nil { + if config.Rule.DefaultRetention.Mode != "" { + parts = append(parts, fmt.Sprintf("Mode: %s", config.Rule.DefaultRetention.Mode)) + } + if config.Rule.DefaultRetention.Days != nil { + parts = append(parts, fmt.Sprintf("Days: %d", *config.Rule.DefaultRetention.Days)) + } + if config.Rule.DefaultRetention.Years != nil { + parts = append(parts, fmt.Sprintf("Years: %d", *config.Rule.DefaultRetention.Years)) + } + } + return strings.Join(parts, ", ") +} + // Enabled returns true if config.ObjectLockEnabled is set to Enabled func (config *Config) Enabled() bool { return config.ObjectLockEnabled == Enabled @@ -262,7 +290,7 @@ func (config *Config) ToRetention() Retention { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) // Do not change any configuration // upon NTP failure. return r @@ -341,6 +369,10 @@ type ObjectRetention struct { RetainUntilDate RetentionDate `xml:"RetainUntilDate,omitempty"` } +func (o ObjectRetention) String() string { + return fmt.Sprintf("Mode: %s, RetainUntilDate: %s", o.Mode, o.RetainUntilDate.Time) +} + // Maximum 4KiB size per object retention config. const maxObjectRetentionSize = 1 << 12 @@ -364,7 +396,7 @@ func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) { t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) return &ret, ErrPastObjectLockRetainDate } @@ -427,7 +459,7 @@ func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionD t, err := UTCNowNTP() if err != nil { - logger.LogIf(context.Background(), err) + lockLogIf(context.Background(), err) return rmode, r, ErrPastObjectLockRetainDate } @@ -555,7 +587,7 @@ func ParseObjectLegalHold(reader io.Reader) (hold *ObjectLegalHold, err error) { if !hold.Status.Valid() { return nil, ErrMalformedXML } - return + return hold, err } // FilterObjectLockMetadata filters object lock metadata if s3:GetObjectRetention permission is denied or if isCopy flag set. @@ -564,14 +596,13 @@ func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filte dst := metadata var copied bool delKey := func(key string) { + key = strings.ToLower(key) if _, ok := metadata[key]; !ok { return } if !copied { dst = make(map[string]string, len(metadata)) - for k, v := range metadata { - dst[k] = v - } + maps.Copy(dst, metadata) copied = true } delete(dst, key) diff --git a/internal/bucket/object/lock/lock_test.go b/internal/bucket/object/lock/lock_test.go index d800a53320ae0..be7975e28c084 100644 --- a/internal/bucket/object/lock/lock_test.go +++ b/internal/bucket/object/lock/lock_test.go @@ -174,7 +174,6 @@ func TestParseObjectLockConfig(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run("", func(t *testing.T) { _, err := ParseObjectLockConfig(strings.NewReader(tt.value)) //nolint:gocritic @@ -219,7 +218,6 @@ func TestParseObjectRetention(t *testing.T) { }, } for _, tt := range tests { - tt := tt t.Run("", func(t *testing.T) { _, err := ParseObjectRetention(strings.NewReader(tt.value)) //nolint:gocritic @@ -606,8 +604,77 @@ func TestFilterObjectLockMetadata(t *testing.T) { for i, tt := range tests { o := FilterObjectLockMetadata(tt.metadata, tt.filterRetention, tt.filterLegalHold) - if !reflect.DeepEqual(o, tt.metadata) { + if !reflect.DeepEqual(o, tt.expected) { t.Fatalf("Case %d expected %v, got %v", i, tt.metadata, o) } } } + +func TestToString(t *testing.T) { + days := uint64(30) + daysPtr := &days + years := uint64(2) + yearsPtr := &years + + tests := []struct { + name string + c Config + want string + }{ + { + name: "happy case", + c: Config{ + ObjectLockEnabled: "Enabled", + }, + want: "Enabled: true", + }, + { + name: "with default retention days", + c: Config{ + ObjectLockEnabled: "Enabled", + Rule: &struct { + DefaultRetention DefaultRetention `xml:"DefaultRetention"` + }{ + DefaultRetention: DefaultRetention{ + Mode: RetGovernance, + Days: daysPtr, + }, + }, + }, + want: "Enabled: true, Mode: GOVERNANCE, Days: 30", + }, + { + name: "with default retention years", + c: Config{ + ObjectLockEnabled: "Enabled", + Rule: &struct { + DefaultRetention DefaultRetention `xml:"DefaultRetention"` + }{ + DefaultRetention: DefaultRetention{ + Mode: RetCompliance, + Years: yearsPtr, + }, + }, + }, + want: "Enabled: true, Mode: COMPLIANCE, Years: 2", + }, + { + name: "disabled case", + c: Config{ + ObjectLockEnabled: "Disabled", + }, + want: "Enabled: false", + }, + { + name: "empty case", + c: Config{}, + want: "Enabled: false", + }, + } + for _, tt := range tests { + got := tt.c.String() + if got != tt.want { + t.Errorf("test: %s, got: '%v', want: '%v'", tt.name, got, tt.want) + } + } +} diff --git a/internal/bucket/replication/datatypes.go b/internal/bucket/replication/datatypes.go index a67cabe13dfa2..980f9be5583ba 100644 --- a/internal/bucket/replication/datatypes.go +++ b/internal/bucket/replication/datatypes.go @@ -51,3 +51,27 @@ func (s StatusType) String() string { func (s StatusType) Empty() bool { return string(s) == "" } + +// VersionPurgeStatusType represents status of a versioned delete or permanent delete w.r.t bucket replication +type VersionPurgeStatusType string + +const ( + // VersionPurgePending - versioned delete replication is pending. + VersionPurgePending VersionPurgeStatusType = "PENDING" + + // VersionPurgeComplete - versioned delete replication is now complete, erase version on disk. + VersionPurgeComplete VersionPurgeStatusType = "COMPLETE" + + // VersionPurgeFailed - versioned delete replication failed. + VersionPurgeFailed VersionPurgeStatusType = "FAILED" +) + +// Empty returns true if purge status was not set. +func (v VersionPurgeStatusType) Empty() bool { + return string(v) == "" +} + +// Pending returns true if the version is pending purge. +func (v VersionPurgeStatusType) Pending() bool { + return v == VersionPurgePending || v == VersionPurgeFailed +} diff --git a/internal/bucket/replication/datatypes_gen.go b/internal/bucket/replication/datatypes_gen.go index 3dc029a6a925e..3a9c20a8229a0 100644 --- a/internal/bucket/replication/datatypes_gen.go +++ b/internal/bucket/replication/datatypes_gen.go @@ -1,7 +1,7 @@ -package replication - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package replication + import ( "github.com/tinylib/msgp/msgp" ) @@ -109,3 +109,55 @@ func (z Type) Msgsize() (s int) { s = msgp.IntSize return } + +// DecodeMsg implements msgp.Decodable +func (z *VersionPurgeStatusType) DecodeMsg(dc *msgp.Reader) (err error) { + { + var zb0001 string + zb0001, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionPurgeStatusType(zb0001) + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z VersionPurgeStatusType) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteString(string(z)) + if err != nil { + err = msgp.WrapError(err) + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z VersionPurgeStatusType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendString(o, string(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *VersionPurgeStatusType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 string + zb0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = VersionPurgeStatusType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z VersionPurgeStatusType) Msgsize() (s int) { + s = msgp.StringPrefixSize + len(string(z)) + return +} diff --git a/internal/bucket/replication/datatypes_gen_test.go b/internal/bucket/replication/datatypes_gen_test.go index e3cbaadfaaf3d..048f6d7beef4c 100644 --- a/internal/bucket/replication/datatypes_gen_test.go +++ b/internal/bucket/replication/datatypes_gen_test.go @@ -1,3 +1,3 @@ -package replication - // Code generated by github.com/tinylib/msgp DO NOT EDIT. + +package replication diff --git a/internal/bucket/replication/destination.go b/internal/bucket/replication/destination.go index fae272f42b049..9f31b3231f738 100644 --- a/internal/bucket/replication/destination.go +++ b/internal/bucket/replication/destination.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/wildcard" ) // DestinationARNPrefix - destination ARN prefix as per AWS S3 specification. diff --git a/internal/bucket/replication/error.go b/internal/bucket/replication/error.go index 7d5178d849bc3..b653c86303b4c 100644 --- a/internal/bucket/replication/error.go +++ b/internal/bucket/replication/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/bucket/replication/replication.go b/internal/bucket/replication/replication.go index c95b154525f5b..409e1c19128b3 100644 --- a/internal/bucket/replication/replication.go +++ b/internal/bucket/replication/replication.go @@ -176,7 +176,7 @@ func (c Config) HasExistingObjectReplication(arn string) (hasARN, isEnabled bool // FilterActionableRules returns the rules actions that need to be executed // after evaluating prefix/tag filtering func (c Config) FilterActionableRules(obj ObjectOpts) []Rule { - if obj.Name == "" && !(obj.OpType == ResyncReplicationType || obj.OpType == AllReplicationType) { + if obj.Name == "" && (obj.OpType != ResyncReplicationType && obj.OpType != AllReplicationType) { return nil } var rules []Rule @@ -220,9 +220,6 @@ func (c Config) GetDestination() Destination { // Replicate returns true if the object should be replicated. func (c Config) Replicate(obj ObjectOpts) bool { - if obj.SSEC { - return false - } for _, rule := range c.FilterActionableRules(obj) { if rule.Status == Disabled { continue diff --git a/internal/bucket/replication/replication_test.go b/internal/bucket/replication/replication_test.go index 4e872fd6667dd..7732f8caa9f4e 100644 --- a/internal/bucket/replication/replication_test.go +++ b/internal/bucket/replication/replication_test.go @@ -250,12 +250,12 @@ func TestReplicate(t *testing.T) { {ObjectOpts{Name: "c1test"}, cfgs[0], true}, // 2. valid ObjectOpts passing empty Filter {ObjectOpts{Name: "c1test", VersionID: "vid"}, cfgs[0], true}, // 3. valid ObjectOpts passing empty Filter - {ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status - {ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, // 5. permanent delete of version, matches DeleteReplication status - valid case - {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 6. permanent delete of version, matches DeleteReplication status - {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 7. permanent delete of version, disqualified by SSE-C - {ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], false}, // 8. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C - {ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], false}, // 9. replication of SSE-C encrypted object, disqualified + {ObjectOpts{Name: "c1test", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 4. DeleteMarker version replication valid case - matches DeleteMarkerReplication status + {ObjectOpts{Name: "c1test", VersionID: "vid", OpType: DeleteReplicationType}, cfgs[0], true}, // 5. permanent delete of version, matches DeleteReplication status - valid case + {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 6. permanent delete of version, matches DeleteReplication status + {ObjectOpts{Name: "c1test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 7. permanent delete of version + {ObjectOpts{Name: "c1test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[0], true}, // 8. setting DeleteMarker on SSE-C encrypted object + {ObjectOpts{Name: "c1test", SSEC: true}, cfgs[0], true}, // 9. replication of SSE-C encrypted object // using config 2 - no filters, only replication of object, metadata enabled {ObjectOpts{Name: "c2test"}, cfgs[1], true}, // 10. valid ObjectOpts passing empty Filter @@ -264,7 +264,7 @@ func TestReplicate(t *testing.T) { {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 13. permanent delete of DeleteMarker version, disallowed by DeleteReplication status {ObjectOpts{Name: "c2test", VersionID: "vid", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 14. permanent delete of version, disqualified by SSE-C & DeleteReplication status {ObjectOpts{Name: "c2test", DeleteMarker: true, SSEC: true, OpType: DeleteReplicationType}, cfgs[1], false}, // 15. setting DeleteMarker on SSE-C encrypted object, disqualified by SSE-C & DeleteMarkerReplication status - {ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], false}, // 16. replication of SSE-C encrypted object, disqualified by default + {ObjectOpts{Name: "c2test", SSEC: true}, cfgs[1], true}, // 16. replication of SSE-C encrypted object // using config 2 - has more than one rule with overlapping prefixes {ObjectOpts{Name: "xy/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 17. matches rule 1 for replication of content/metadata {ObjectOpts{Name: "xyz/c3test", UserTags: "k1=v1"}, cfgs[2], true}, // 18. matches rule 1 for replication of content/metadata @@ -296,7 +296,6 @@ func TestReplicate(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.opts.Name, func(t *testing.T) { result := testCase.c.Replicate(testCase.opts) if result != testCase.expectedResult { @@ -352,7 +351,6 @@ func TestHasActiveRules(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { @@ -365,7 +363,6 @@ func TestHasActiveRules(t *testing.T) { t.Fatalf("Expected result with recursive set to true: `%v`, got: `%v`", tc.expectedRec, got) } }) - } } @@ -403,7 +400,6 @@ func TestFilterActionableRules(t *testing.T) { }, } for _, tc := range testCases { - tc := tc cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { t.Fatalf("Got unexpected error: %v", err) diff --git a/internal/bucket/replication/rule.go b/internal/bucket/replication/rule.go index 0c6b6bd04772b..347dfe28d8738 100644 --- a/internal/bucket/replication/rule.go +++ b/internal/bucket/replication/rule.go @@ -139,7 +139,7 @@ type Rule struct { Destination Destination `xml:"Destination" json:"Destination"` SourceSelectionCriteria SourceSelectionCriteria `xml:"SourceSelectionCriteria" json:"SourceSelectionCriteria"` Filter Filter `xml:"Filter" json:"Filter"` - ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication,omitempty"` + ExistingObjectReplication ExistingObjectReplication `xml:"ExistingObjectReplication,omitempty" json:"ExistingObjectReplication"` } var ( diff --git a/internal/bucket/replication/rule_test.go b/internal/bucket/replication/rule_test.go index df7192553dcb6..32722c970470f 100644 --- a/internal/bucket/replication/rule_test.go +++ b/internal/bucket/replication/rule_test.go @@ -57,7 +57,6 @@ func TestMetadataReplicate(t *testing.T) { } for i, tc := range testCases { - tc := tc t.Run(fmt.Sprintf("Test_%d", i+1), func(t *testing.T) { cfg, err := ParseConfig(bytes.NewReader([]byte(tc.inputConfig))) if err != nil { @@ -67,6 +66,5 @@ func TestMetadataReplicate(t *testing.T) { t.Fatalf("Expected result with recursive set to false: `%v`, got: `%v`", tc.expectedResult, got) } }) - } } diff --git a/internal/bucket/versioning/error.go b/internal/bucket/versioning/error.go index 6b652c0aeb3d8..20bb4caa259df 100644 --- a/internal/bucket/versioning/error.go +++ b/internal/bucket/versioning/error.go @@ -29,7 +29,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { return Error{err: fmt.Errorf(format, a...)} } diff --git a/internal/bucket/versioning/versioning.go b/internal/bucket/versioning/versioning.go index 8d31c0fad272d..3647f908de356 100644 --- a/internal/bucket/versioning/versioning.go +++ b/internal/bucket/versioning/versioning.go @@ -22,7 +22,7 @@ import ( "io" "strings" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/wildcard" ) // State - enabled/disabled/suspended states diff --git a/internal/cachevalue/cache.go b/internal/cachevalue/cache.go index 46b5c756ef61d..346e0a5f24a29 100644 --- a/internal/cachevalue/cache.go +++ b/internal/cachevalue/cache.go @@ -18,6 +18,7 @@ package cachevalue import ( + "context" "sync" "sync/atomic" "time" @@ -30,11 +31,6 @@ type Opts struct { // Returns the last good value AND the error. ReturnLastGood bool - // If CacheError is set, errors will be cached as well - // and not continuously try to update. - // Should not be combined with ReturnLastGood. - CacheError bool - // If NoWait is set, Get() will return the last good value, // if TTL has expired but 2x TTL has not yet passed, // but will fetch a new value in the background. @@ -50,7 +46,7 @@ type Cache[T any] struct { // Only one caller will call this function at any time, others will be blocking. // The returned value can no longer be modified once returned. // Should be set before calling Get(). - updateFn func() (T, error) + updateFn func(ctx context.Context) (T, error) // ttl for a cached value. ttl time.Duration @@ -62,10 +58,7 @@ type Cache[T any] struct { Once sync.Once // Managed values. - valErr atomic.Pointer[struct { - v T - e error - }] + val atomic.Pointer[T] lastUpdateMs atomic.Int64 updating sync.Mutex } @@ -78,7 +71,7 @@ func New[T any]() *Cache[T] { // NewFromFunc allocates a new cached value instance and initializes it with an // update function, making it ready for use. -func NewFromFunc[T any](ttl time.Duration, opts Opts, update func() (T, error)) *Cache[T] { +func NewFromFunc[T any](ttl time.Duration, opts Opts, update func(ctx context.Context) (T, error)) *Cache[T] { return &Cache[T]{ ttl: ttl, updateFn: update, @@ -88,7 +81,7 @@ func NewFromFunc[T any](ttl time.Duration, opts Opts, update func() (T, error)) // InitOnce initializes the cache with a TTL and an update function. It is // guaranteed to be called only once. -func (t *Cache[T]) InitOnce(ttl time.Duration, opts Opts, update func() (T, error)) { +func (t *Cache[T]) InitOnce(ttl time.Duration, opts Opts, update func(ctx context.Context) (T, error)) { t.Once.Do(func() { t.ttl = ttl t.updateFn = update @@ -96,61 +89,68 @@ func (t *Cache[T]) InitOnce(ttl time.Duration, opts Opts, update func() (T, erro }) } -// Get will return a cached value or fetch a new one. -// Tf the Update function returns an error the value is forwarded as is and not cached. -func (t *Cache[T]) Get() (T, error) { - v := t.valErr.Load() +// GetWithCtx will return a cached value or fetch a new one. +// passes a caller context, if caller context cancels nothing +// is cached. +// If the Update function returns an error the value is forwarded as is and not cached. +func (t *Cache[T]) GetWithCtx(ctx context.Context) (T, error) { + v := t.val.Load() ttl := t.ttl vTime := t.lastUpdateMs.Load() tNow := time.Now().UnixMilli() if v != nil && tNow-vTime < ttl.Milliseconds() { - if v.e == nil { - return v.v, nil - } - if v.e != nil && t.opts.CacheError || t.opts.ReturnLastGood { - return v.v, v.e - } + return *v, nil } - // Fetch new value. - if t.opts.NoWait && v != nil && tNow-vTime < ttl.Milliseconds()*2 && (v.e == nil || t.opts.CacheError) { + // Fetch new value asynchronously, while we do not return an error + // if v != nil value or + if t.opts.NoWait && v != nil && tNow-vTime < ttl.Milliseconds()*2 { if t.updating.TryLock() { go func() { defer t.updating.Unlock() - t.update() + t.update(context.Background()) }() } - return v.v, v.e + return *v, nil } // Get lock. Either we get it or we wait for it. t.updating.Lock() + defer t.updating.Unlock() + if time.Since(time.UnixMilli(t.lastUpdateMs.Load())) < ttl { // There is a new value, release lock and return it. - v = t.valErr.Load() - t.updating.Unlock() - return v.v, v.e + if v = t.val.Load(); v != nil { + return *v, nil + } } - t.update() - v = t.valErr.Load() - t.updating.Unlock() - return v.v, v.e + + if err := t.update(ctx); err != nil { + var empty T + return empty, err + } + + return *t.val.Load(), nil +} + +// Get will return a cached value or fetch a new one. +// Tf the Update function returns an error the value is forwarded as is and not cached. +func (t *Cache[T]) Get() (T, error) { + return t.GetWithCtx(context.Background()) } -func (t *Cache[T]) update() { - val, err := t.updateFn() +func (t *Cache[T]) update(ctx context.Context) error { + val, err := t.updateFn(ctx) if err != nil { - if t.opts.ReturnLastGood { - // Keep last good value. - v := t.valErr.Load() - if v != nil { - val = v.v - } + if t.opts.ReturnLastGood && t.val.Load() != nil { + // Keep last good value, so update + // does not return an error. + return nil } + return err } - t.valErr.Store(&struct { - v T - e error - }{v: val, e: err}) + + t.val.Store(&val) t.lastUpdateMs.Store(time.Now().UnixMilli()) + return nil } diff --git a/internal/cachevalue/cache_test.go b/internal/cachevalue/cache_test.go index 4be5d56738972..978cc1ca955c2 100644 --- a/internal/cachevalue/cache_test.go +++ b/internal/cachevalue/cache_test.go @@ -18,15 +18,76 @@ package cachevalue import ( + "context" + "errors" "testing" "time" ) +func slowCaller(ctx context.Context) error { + sl := time.NewTimer(time.Second) + defer sl.Stop() + + select { + case <-sl.C: + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} + +func TestCacheCtx(t *testing.T) { + cache := New[time.Time]() + t.Parallel() + cache.InitOnce(2*time.Second, Opts{}, + func(ctx context.Context) (time.Time, error) { + return time.Now(), slowCaller(ctx) + }, + ) + + ctx, cancel := context.WithCancel(t.Context()) + cancel() // cancel context to test. + + _, err := cache.GetWithCtx(ctx) + if !errors.Is(err, context.Canceled) { + t.Fatalf("expected context.Canceled err, got %v", err) + } + + ctx, cancel = context.WithCancel(t.Context()) + defer cancel() + + t1, err := cache.GetWithCtx(ctx) + if err != nil { + t.Fatalf("expected nil err, got %v", err) + } + + t2, err := cache.GetWithCtx(ctx) + if err != nil { + t.Fatalf("expected nil err, got %v", err) + } + + if !t1.Equal(t2) { + t.Fatalf("expected time to be equal: %s != %s", t1, t2) + } + + time.Sleep(3 * time.Second) + + t3, err := cache.GetWithCtx(ctx) + if err != nil { + t.Fatalf("expected nil err, got %v", err) + } + + if t1.Equal(t3) { + t.Fatalf("expected time to be un-equal: %s == %s", t1, t3) + } +} + func TestCache(t *testing.T) { cache := New[time.Time]() t.Parallel() cache.InitOnce(2*time.Second, Opts{}, - func() (time.Time, error) { + func(ctx context.Context) (time.Time, error) { return time.Now(), nil }, ) @@ -50,7 +111,7 @@ func TestCache(t *testing.T) { func BenchmarkCache(b *testing.B) { cache := New[time.Time]() cache.InitOnce(1*time.Millisecond, Opts{}, - func() (time.Time, error) { + func(ctx context.Context) (time.Time, error) { return time.Now(), nil }, ) diff --git a/internal/color/color.go b/internal/color/color.go index 3cd3c1188d68a..e45851e9eea05 100644 --- a/internal/color/color.go +++ b/internal/color/color.go @@ -31,122 +31,130 @@ var ( return !color.NoColor } - Bold = func() func(format string, a ...interface{}) string { + Bold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.Bold).SprintfFunc() } return fmt.Sprintf }() - RedBold = func() func(a ...interface{}) string { + RedBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgRed, color.Bold).SprintFunc() } return fmt.Sprint }() - RedBoldf = func() func(format string, a ...interface{}) string { + RedBoldf = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgRed, color.Bold).SprintfFunc() } return fmt.Sprintf }() - Red = func() func(format string, a ...interface{}) string { + Red = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgRed).SprintfFunc() } return fmt.Sprintf }() - Blue = func() func(format string, a ...interface{}) string { + Blue = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlue).SprintfFunc() } return fmt.Sprintf }() - Yellow = func() func(format string, a ...interface{}) string { + Yellow = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgYellow).SprintfFunc() } return fmt.Sprintf }() - Green = func() func(a ...interface{}) string { + Green = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgGreen).SprintFunc() } return fmt.Sprint }() - Greenf = func() func(format string, a ...interface{}) string { + Greenf = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgGreen).SprintfFunc() } return fmt.Sprintf }() - GreenBold = func() func(a ...interface{}) string { + GreenBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgGreen, color.Bold).SprintFunc() } return fmt.Sprint }() - CyanBold = func() func(a ...interface{}) string { + CyanBold = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgCyan, color.Bold).SprintFunc() } return fmt.Sprint }() - YellowBold = func() func(format string, a ...interface{}) string { + YellowBold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgYellow, color.Bold).SprintfFunc() } return fmt.Sprintf }() - BlueBold = func() func(format string, a ...interface{}) string { + BlueBold = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlue, color.Bold).SprintfFunc() } return fmt.Sprintf }() - BgYellow = func() func(format string, a ...interface{}) string { + BgYellow = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.BgYellow).SprintfFunc() } return fmt.Sprintf }() - Black = func() func(format string, a ...interface{}) string { + Black = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgBlack).SprintfFunc() } return fmt.Sprintf }() - FgRed = func() func(a ...interface{}) string { + FgRed = func() func(a ...any) string { if IsTerminal() { return color.New(color.FgRed).SprintFunc() } return fmt.Sprint }() - BgRed = func() func(format string, a ...interface{}) string { + BgRed = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.BgRed).SprintfFunc() } return fmt.Sprintf }() - FgWhite = func() func(format string, a ...interface{}) string { + FgWhite = func() func(format string, a ...any) string { if IsTerminal() { return color.New(color.FgWhite).SprintfFunc() } return fmt.Sprintf }() + + TurnOff = func() { + color.NoColor = true + } + + TurnOn = func() { + color.NoColor = false + } ) diff --git a/internal/config/api/api.go b/internal/config/api/api.go index ad57e2db072d0..f203f7e95cd24 100644 --- a/internal/config/api/api.go +++ b/internal/config/api/api.go @@ -21,24 +21,26 @@ import ( "encoding/json" "errors" "fmt" + "math" + "slices" "strconv" "strings" "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // API sub-system constants const ( apiRequestsMax = "requests_max" - apiRequestsDeadline = "requests_deadline" apiClusterDeadline = "cluster_deadline" apiCorsAllowOrigin = "cors_allow_origin" apiRemoteTransportDeadline = "remote_transport_deadline" apiListQuorum = "list_quorum" apiReplicationPriority = "replication_priority" apiReplicationMaxWorkers = "replication_max_workers" + apiReplicationMaxLWorkers = "replication_max_lrg_workers" apiTransitionWorkers = "transition_workers" apiStaleUploadsCleanupInterval = "stale_uploads_cleanup_interval" @@ -49,17 +51,20 @@ const ( apiGzipObjects = "gzip_objects" apiRootAccess = "root_access" apiSyncEvents = "sync_events" + apiObjectMaxVersions = "object_max_versions" + + EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX" + EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE" + EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE" + EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN" + EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE" + EnvAPITransitionWorkers = "MINIO_API_TRANSITION_WORKERS" + EnvAPIListQuorum = "MINIO_API_LIST_QUORUM" + EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS" // default config.EnableOn + EnvAPIReplicationPriority = "MINIO_API_REPLICATION_PRIORITY" + EnvAPIReplicationMaxWorkers = "MINIO_API_REPLICATION_MAX_WORKERS" + EnvAPIReplicationMaxLWorkers = "MINIO_API_REPLICATION_MAX_LRG_WORKERS" - EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX" - EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE" - EnvAPIClusterDeadline = "MINIO_API_CLUSTER_DEADLINE" - EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN" - EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE" - EnvAPITransitionWorkers = "MINIO_API_TRANSITION_WORKERS" - EnvAPIListQuorum = "MINIO_API_LIST_QUORUM" - EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS" // default config.EnableOn - EnvAPIReplicationPriority = "MINIO_API_REPLICATION_PRIORITY" - EnvAPIReplicationMaxWorkers = "MINIO_API_REPLICATION_MAX_WORKERS" EnvAPIStaleUploadsCleanupInterval = "MINIO_API_STALE_UPLOADS_CLEANUP_INTERVAL" EnvAPIStaleUploadsExpiry = "MINIO_API_STALE_UPLOADS_EXPIRY" EnvAPIDeleteCleanupInterval = "MINIO_API_DELETE_CLEANUP_INTERVAL" @@ -69,11 +74,14 @@ const ( EnvAPIGzipObjects = "MINIO_API_GZIP_OBJECTS" EnvAPIRootAccess = "MINIO_API_ROOT_ACCESS" // default config.EnableOn EnvAPISyncEvents = "MINIO_API_SYNC_EVENTS" // default "off" + EnvAPIObjectMaxVersions = "MINIO_API_OBJECT_MAX_VERSIONS" + EnvAPIObjectMaxVersionsLegacy = "_MINIO_OBJECT_MAX_VERSIONS" ) // Deprecated key and ENVs const ( apiReadyDeadline = "ready_deadline" + apiRequestsDeadline = "requests_deadline" apiReplicationWorkers = "replication_workers" apiReplicationFailedWorkers = "replication_failed_workers" ) @@ -85,10 +93,6 @@ var ( Key: apiRequestsMax, Value: "0", }, - config.KV{ - Key: apiRequestsDeadline, - Value: "10s", - }, config.KV{ Key: apiClusterDeadline, Value: "10s", @@ -113,6 +117,10 @@ var ( Key: apiReplicationMaxWorkers, Value: "500", }, + config.KV{ + Key: apiReplicationMaxLWorkers, + Value: "10", + }, config.KV{ Key: apiTransitionWorkers, Value: "100", @@ -150,21 +158,24 @@ var ( Key: apiSyncEvents, Value: config.EnableOff, }, + config.KV{ + Key: apiObjectMaxVersions, + Value: "9223372036854775807", + }, } ) // Config storage class configuration type Config struct { RequestsMax int `json:"requests_max"` - RequestsDeadline time.Duration `json:"requests_deadline"` ClusterDeadline time.Duration `json:"cluster_deadline"` CorsAllowOrigin []string `json:"cors_allow_origin"` RemoteTransportDeadline time.Duration `json:"remote_transport_deadline"` ListQuorum string `json:"list_quorum"` ReplicationPriority string `json:"replication_priority"` ReplicationMaxWorkers int `json:"replication_max_workers"` + ReplicationMaxLWorkers int `json:"replication_max_lrg_workers"` TransitionWorkers int `json:"transition_workers"` - ExpiryWorkers int `json:"expiry_workers"` StaleUploadsCleanupInterval time.Duration `json:"stale_uploads_cleanup_interval"` StaleUploadsExpiry time.Duration `json:"stale_uploads_expiry"` DeleteCleanupInterval time.Duration `json:"delete_cleanup_interval"` @@ -172,6 +183,7 @@ type Config struct { GzipObjects bool `json:"gzip_objects"` RootAccess bool `json:"root_access"` SyncEvents bool `json:"sync_events"` + ObjectMaxVersions int64 `json:"object_max_versions"` } // UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON. @@ -189,9 +201,11 @@ func (sCfg *Config) UnmarshalJSON(data []byte) error { func LookupConfig(kvs config.KVS) (cfg Config, err error) { deprecatedKeys := []string{ apiReadyDeadline, + apiRequestsDeadline, "extend_list_cache_life", apiReplicationWorkers, apiReplicationFailedWorkers, + "expiry_workers", } disableODirect := env.Get(EnvAPIDisableODirect, kvs.Get(apiDisableODirect)) == config.EnableOn @@ -211,10 +225,8 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { corsAllowOrigin = []string{"*"} // defaults to '*' } else { corsAllowOrigin = strings.Split(corsList, ",") - for _, cors := range corsAllowOrigin { - if cors == "" { - return cfg, errors.New("invalid cors value") - } + if slices.Contains(corsAllowOrigin, "") { + return cfg, errors.New("invalid cors value") } } cfg.CorsAllowOrigin = corsAllowOrigin @@ -234,12 +246,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { return cfg, errors.New("invalid API max requests value") } - requestsDeadline, err := time.ParseDuration(env.Get(EnvAPIRequestsDeadline, kvs.GetWithDefault(apiRequestsDeadline, DefaultKVS))) - if err != nil { - return cfg, err - } - cfg.RequestsDeadline = requestsDeadline - clusterDeadline, err := time.ParseDuration(env.Get(EnvAPIClusterDeadline, kvs.GetWithDefault(apiClusterDeadline, DefaultKVS))) if err != nil { return cfg, err @@ -271,11 +277,21 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { if err != nil { return cfg, err } - if replicationMaxWorkers <= 0 || replicationMaxWorkers > 500 { return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Number of replication workers should be between 1 and 500") } cfg.ReplicationMaxWorkers = replicationMaxWorkers + + replicationMaxLWorkers, err := strconv.Atoi(env.Get(EnvAPIReplicationMaxLWorkers, kvs.GetWithDefault(apiReplicationMaxLWorkers, DefaultKVS))) + if err != nil { + return cfg, err + } + if replicationMaxLWorkers <= 0 || replicationMaxLWorkers > 10 { + return cfg, config.ErrInvalidReplicationWorkersValue(nil).Msg("Number of replication workers for transfers >=128MiB should be between 1 and 10 per node") + } + + cfg.ReplicationMaxLWorkers = replicationMaxLWorkers + transitionWorkers, err := strconv.Atoi(env.Get(EnvAPITransitionWorkers, kvs.GetWithDefault(apiTransitionWorkers, DefaultKVS))) if err != nil { return cfg, err @@ -307,5 +323,22 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { cfg.SyncEvents = env.Get(EnvAPISyncEvents, kvs.Get(apiSyncEvents)) == config.EnableOn + maxVerStr := env.Get(EnvAPIObjectMaxVersions, "") + if maxVerStr == "" { + maxVerStr = env.Get(EnvAPIObjectMaxVersionsLegacy, kvs.Get(apiObjectMaxVersions)) + } + if maxVerStr != "" { + maxVersions, err := strconv.ParseInt(maxVerStr, 10, 64) + if err != nil { + return cfg, err + } + if maxVersions <= 0 { + return cfg, fmt.Errorf("invalid object max versions value: %v", maxVersions) + } + cfg.ObjectMaxVersions = maxVersions + } else { + cfg.ObjectMaxVersions = math.MaxInt64 + } + return cfg, nil } diff --git a/internal/config/api/help.go b/internal/config/api/help.go index 967f56404af61..2c4b8b2bbc9c1 100644 --- a/internal/config/api/help.go +++ b/internal/config/api/help.go @@ -28,16 +28,10 @@ var ( Help = config.HelpKVS{ config.HelpKV{ Key: apiRequestsMax, - Description: `set the maximum number of concurrent requests` + defaultHelpPostfix(apiRequestsMax), + Description: `set the maximum number of concurrent requests (default: auto)`, Optional: true, Type: "number", }, - config.HelpKV{ - Key: apiRequestsDeadline, - Description: `set the deadline for API requests waiting to be processed` + defaultHelpPostfix(apiRequestsDeadline), - Optional: true, - Type: "duration", - }, config.HelpKV{ Key: apiClusterDeadline, Description: `set the deadline for cluster readiness check` + defaultHelpPostfix(apiClusterDeadline), @@ -116,5 +110,11 @@ var ( Optional: true, Type: "boolean", }, + config.HelpKV{ + Key: apiObjectMaxVersions, + Description: "set max allowed number of versions per object" + defaultHelpPostfix(apiObjectMaxVersions), + Optional: true, + Type: "number", + }, } ) diff --git a/internal/config/batch/batch.go b/internal/config/batch/batch.go index e1c095418c07b..7404cf86967ed 100644 --- a/internal/config/batch/batch.go +++ b/internal/config/batch/batch.go @@ -22,7 +22,7 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Batch job environment variables diff --git a/internal/config/browser/browser.go b/internal/config/browser/browser.go index a67451a9215bf..f5ad11d2017c3 100644 --- a/internal/config/browser/browser.go +++ b/internal/config/browser/browser.go @@ -23,7 +23,7 @@ import ( "sync" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Browser sub-system constants @@ -51,7 +51,7 @@ var ( DefaultKVS = config.KVS{ config.KV{ Key: browserCSPPolicy, - Value: "default-src 'self' 'unsafe-eval' 'unsafe-inline';", + Value: "default-src 'self' 'unsafe-eval' 'unsafe-inline'; script-src 'self' https://unpkg.com; connect-src 'self' https://unpkg.com;", }, config.KV{ Key: browserHSTSSeconds, @@ -97,15 +97,30 @@ func (browseCfg *Config) Update(newCfg Config) { // LookupConfig - lookup api config and override with valid environment settings if any. func LookupConfig(kvs config.KVS) (cfg Config, err error) { - cspPolicy := env.Get(EnvBrowserCSPPolicy, kvs.GetWithDefault(browserCSPPolicy, DefaultKVS)) - hstsSeconds, err := strconv.Atoi(env.Get(EnvBrowserHSTSSeconds, kvs.GetWithDefault(browserHSTSSeconds, DefaultKVS))) - if err != nil { + cfg = Config{ + CSPPolicy: env.Get(EnvBrowserCSPPolicy, kvs.GetWithDefault(browserCSPPolicy, DefaultKVS)), + HSTSSeconds: 0, + HSTSIncludeSubdomains: true, + HSTSPreload: true, + ReferrerPolicy: "strict-origin-when-cross-origin", + } + + if err = config.CheckValidKeys(config.BrowserSubSys, kvs, DefaultKVS); err != nil { return cfg, err } hstsIncludeSubdomains := env.Get(EnvBrowserHSTSIncludeSubdomains, kvs.GetWithDefault(browserHSTSIncludeSubdomains, DefaultKVS)) == config.EnableOn hstsPreload := env.Get(EnvBrowserHSTSPreload, kvs.Get(browserHSTSPreload)) == config.EnableOn + hstsSeconds, err := strconv.Atoi(env.Get(EnvBrowserHSTSSeconds, kvs.GetWithDefault(browserHSTSSeconds, DefaultKVS))) + if err != nil { + return cfg, err + } + + cfg.HSTSSeconds = hstsSeconds + cfg.HSTSIncludeSubdomains = hstsIncludeSubdomains + cfg.HSTSPreload = hstsPreload + referrerPolicy := env.Get(EnvBrowserReferrerPolicy, kvs.GetWithDefault(browserReferrerPolicy, DefaultKVS)) switch referrerPolicy { case "no-referrer", "no-referrer-when-downgrade", "origin", "origin-when-cross-origin", "same-origin", "strict-origin", "strict-origin-when-cross-origin", "unsafe-url": @@ -114,11 +129,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { return cfg, fmt.Errorf("invalid value %v for %s", referrerPolicy, browserReferrerPolicy) } - cfg.CSPPolicy = cspPolicy - cfg.HSTSSeconds = hstsSeconds - cfg.HSTSIncludeSubdomains = hstsIncludeSubdomains - cfg.HSTSPreload = hstsPreload - return cfg, nil } diff --git a/internal/config/cache/cache.go b/internal/config/cache/cache.go deleted file mode 100644 index 56c50019ed996..0000000000000 --- a/internal/config/cache/cache.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2015-2023 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cache - -import ( - "bytes" - "context" - "errors" - "fmt" - "net/http" - "sync" - "time" - - "github.com/dustin/go-humanize" - "github.com/minio/minio/internal/config" - xhttp "github.com/minio/minio/internal/http" - "github.com/minio/pkg/v2/env" - "github.com/tinylib/msgp/msgp" -) - -// Cache related keys -const ( - Enable = "enable" - Endpoint = "endpoint" - BlockSize = "block_size" - - EnvEnable = "MINIO_CACHE_ENABLE" - EnvEndpoint = "MINIO_CACHE_ENDPOINT" - EnvBlockSize = "MINIO_CACHE_BLOCK_SIZE" -) - -// DefaultKVS - default KV config for cache settings -var DefaultKVS = config.KVS{ - config.KV{ - Key: Enable, - Value: "off", - }, - config.KV{ - Key: Endpoint, - Value: "", - }, - config.KV{ - Key: BlockSize, - Value: "", - }, -} - -// Config represents the subnet related configuration -type Config struct { - // Flag indicating whether cache is enabled. - Enable bool `json:"enable"` - - // Endpoint for caching uses remote mcache server to - // store and retrieve pre-condition check entities such as - // Etag and ModTime of an object + version - Endpoint string `json:"endpoint"` - - // BlockSize indicates the maximum object size below which - // data is cached and fetched remotely from DRAM. - BlockSize int64 - - // Is the HTTP client used for communicating with mcache server - clnt *http.Client -} - -var configLock sync.RWMutex - -// Enabled - indicates if cache is enabled or not -func (c *Config) Enabled() bool { - return c.Enable && c.Endpoint != "" -} - -// MatchesSize verifies if input 'size' falls under cacheable threshold -func (c Config) MatchesSize(size int64) bool { - configLock.RLock() - defer configLock.RUnlock() - - return c.Enable && c.BlockSize > 0 && size <= c.BlockSize -} - -// Update updates new cache frequency -func (c *Config) Update(ncfg Config) { - configLock.Lock() - defer configLock.Unlock() - - c.Enable = ncfg.Enable - c.Endpoint = ncfg.Endpoint - c.BlockSize = ncfg.BlockSize - c.clnt = ncfg.clnt -} - -// cache related errors -var ( - ErrInvalidArgument = errors.New("invalid argument") - ErrKeyMissing = errors.New("key is missing") -) - -const ( - mcacheV1Check = "/_mcache/v1/check" - mcacheV1Update = "/_mcache/v1/update" - mcacheV1Delete = "/_mcache/v1/delete" -) - -// Get performs conditional check and returns the cached object info if any. -func (c Config) Get(r *CondCheck) (*ObjectInfo, error) { - configLock.RLock() - defer configLock.RUnlock() - - if !c.Enable { - return nil, nil - } - - if c.Endpoint == "" { - // Endpoint not set, make this a no-op - return nil, nil - } - - buf, err := r.MarshalMsg(nil) - if err != nil { - return nil, err - } - - // We do not want Gets to take so much time, anything - // beyond 250ms we should cut it, remote cache is too - // busy already. - ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) - defer cancel() - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.Endpoint+mcacheV1Check, bytes.NewReader(buf)) - if err != nil { - return nil, err - } - - resp, err := c.clnt.Do(req) - if err != nil { - return nil, err - } - defer xhttp.DrainBody(resp.Body) - - switch resp.StatusCode { - case http.StatusNotFound: - return nil, ErrKeyMissing - case http.StatusOK: - co := &ObjectInfo{} - return co, co.DecodeMsg(msgp.NewReader(resp.Body)) - default: - return nil, ErrInvalidArgument - } -} - -// Set sets the cache object info -func (c Config) Set(ci *ObjectInfo) { - configLock.RLock() - defer configLock.RUnlock() - - if !c.Enable { - return - } - - if c.Endpoint == "" { - // Endpoint not set, make this a no-op - return - } - - buf, err := ci.MarshalMsg(nil) - if err != nil { - return - } - - req, err := http.NewRequestWithContext(context.Background(), http.MethodPut, c.Endpoint+mcacheV1Update, bytes.NewReader(buf)) - if err != nil { - return - } - - resp, err := c.clnt.Do(req) - if err != nil { - return - } - defer xhttp.DrainBody(resp.Body) -} - -// Delete deletes remote cached content for object and its version. -func (c Config) Delete(bucket, key string) { - configLock.RLock() - defer configLock.RUnlock() - - if !c.Enable { - return - } - - if c.Endpoint == "" { - return - } - - req, err := http.NewRequestWithContext(context.Background(), http.MethodDelete, c.Endpoint+fmt.Sprintf("%s?bucket=%s&key=%s", mcacheV1Delete, bucket, key), nil) - if err != nil { - return - } - - resp, err := c.clnt.Do(req) - if err != nil { - return - } - defer xhttp.DrainBody(resp.Body) -} - -// LookupConfig - lookup config and override with valid environment settings if any. -func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err error) { - cfg.Enable = env.Get(EnvEnable, kvs.GetWithDefault(Enable, DefaultKVS)) == config.EnableOn - - if d := env.Get(EnvBlockSize, kvs.GetWithDefault(BlockSize, DefaultKVS)); d != "" { - objectSize, err := humanize.ParseBytes(d) - if err != nil { - return cfg, err - } - cfg.BlockSize = int64(objectSize) - } - - cfg.Endpoint = env.Get(EnvEndpoint, kvs.GetWithDefault(Endpoint, DefaultKVS)) - cfg.clnt = &http.Client{Transport: transport} - - return cfg, nil -} diff --git a/internal/config/cache/help.go b/internal/config/cache/help.go deleted file mode 100644 index 7754b06d163e4..0000000000000 --- a/internal/config/cache/help.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2015-2023 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cache - -import "github.com/minio/minio/internal/config" - -var ( - defaultHelpPostfix = func(key string) string { - return config.DefaultHelpPostfix(DefaultKVS, key) - } - - // Help - provides help for cache config - Help = config.HelpKVS{ - config.HelpKV{ - Key: Enable, - Type: "on|off", - Description: "set to enable remote cache plugin" + defaultHelpPostfix(Enable), - Optional: true, - }, - config.HelpKV{ - Key: Endpoint, - Type: "string", - Description: "remote cache endpoint for GET/HEAD object(s) metadata, data" + defaultHelpPostfix(Endpoint), - Optional: true, - }, - config.HelpKV{ - Key: BlockSize, - Type: "string", - Description: "cache all objects below the specified block size" + defaultHelpPostfix(BlockSize), - Optional: true, - }, - } -) diff --git a/internal/config/cache/remote.go b/internal/config/cache/remote.go deleted file mode 100644 index 8db4e2f55add9..0000000000000 --- a/internal/config/cache/remote.go +++ /dev/null @@ -1,111 +0,0 @@ -package cache - -import ( - "net/http" - "regexp" - "strconv" - "time" - - "github.com/minio/minio/internal/amztime" - xhttp "github.com/minio/minio/internal/http" -) - -//go:generate msgp -file=$GOFILE - -// ObjectInfo represents the object information cached remotely -type ObjectInfo struct { - Key string `json:"key"` - Bucket string `json:"bucket"` - ETag string `json:"etag"` - ModTime time.Time `json:"modTime"` - StatusCode int `json:"statusCode"` - - // Optional elements - CacheControl string `json:"cacheControl,omitempty" msg:",omitempty"` - Expires string `json:"expires,omitempty" msg:",omitempty"` - Metadata map[string]string `json:"metadata,omitempty" msg:",omitempty"` - Range string `json:"range,omitempty" msg:",omitempty"` - PartNumber int `json:"partNumber,omitempty" msg:",omitempty"` - Size int64 `json:"size,omitempty" msg:",omitempty"` // Full size of the object - Data []byte `json:"data,omitempty" msg:",omitempty"` // Data can container full data of the object or partial -} - -// WriteHeaders writes the response headers for conditional requests -func (oi ObjectInfo) WriteHeaders(w http.ResponseWriter, preamble, statusCode func()) { - preamble() - - if !oi.ModTime.IsZero() { - w.Header().Set(xhttp.LastModified, oi.ModTime.UTC().Format(http.TimeFormat)) - } - - if oi.ETag != "" { - w.Header()[xhttp.ETag] = []string{"\"" + oi.ETag + "\""} - } - - if oi.Expires != "" { - w.Header().Set(xhttp.Expires, oi.Expires) - } - - if oi.CacheControl != "" { - w.Header().Set(xhttp.CacheControl, oi.CacheControl) - } - - statusCode() -} - -// CondCheck represents the conditional request made to the remote cache -// for validation during GET/HEAD object requests. -type CondCheck struct { - ObjectInfo - IfMatch string `json:"ifMatch,omitempty" msg:",omitempty"` - IfNoneMatch string `json:"ifNoneMatch,omitempty" msg:",omitempty"` - IfModifiedSince *time.Time `json:"ifModSince,omitempty" msg:",omitempty"` - IfUnModifiedSince *time.Time `json:"ifUnmodSince,omitempty" msg:",omitempty"` - IfRange string `json:"ifRange,omitempty" msg:",omitempty"` - IfPartNumber int `json:"ifPartNumber,omitempty" msg:",omitempty"` -} - -// IsSet tells the cache lookup to avoid sending a request -func (r *CondCheck) IsSet() bool { - if r == nil { - return false - } - return r.IfMatch != "" || r.IfNoneMatch != "" || r.IfModifiedSince != nil || r.IfUnModifiedSince != nil || r.IfRange != "" -} - -var etagRegex = regexp.MustCompile("\"*?([^\"]*?)\"*?$") - -// canonicalizeETag returns ETag with leading and trailing double-quotes removed, -// if any present -func canonicalizeETag(etag string) string { - return etagRegex.ReplaceAllString(etag, "$1") -} - -// Init - populates the input values, initializes CondCheck -// before sending the request remotely. -func (r *CondCheck) Init(bucket, object string, header http.Header) { - r.Key = object - r.Bucket = bucket - - ifModifiedSinceHeader := header.Get(xhttp.IfModifiedSince) - if ifModifiedSinceHeader != "" { - if givenTime, err := amztime.ParseHeader(ifModifiedSinceHeader); err == nil { - r.IfModifiedSince = &givenTime - } - } - ifUnmodifiedSinceHeader := header.Get(xhttp.IfUnmodifiedSince) - if ifUnmodifiedSinceHeader != "" { - if givenTime, err := amztime.ParseHeader(ifUnmodifiedSinceHeader); err == nil { - r.IfUnModifiedSince = &givenTime - } - } - r.IfMatch = canonicalizeETag(header.Get(xhttp.IfMatch)) - r.IfNoneMatch = canonicalizeETag(header.Get(xhttp.IfNoneMatch)) - r.IfRange = header.Get(xhttp.Range) - ifPartNumberHeader := header.Get(xhttp.PartNumber) - if ifPartNumberHeader != "" { - if partNumber, err := strconv.Atoi(ifPartNumberHeader); err == nil { - r.IfPartNumber = partNumber - } - } -} diff --git a/internal/config/cache/remote_gen.go b/internal/config/cache/remote_gen.go deleted file mode 100644 index b6b7e7e622dad..0000000000000 --- a/internal/config/cache/remote_gen.go +++ /dev/null @@ -1,795 +0,0 @@ -package cache - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "time" - - "github.com/tinylib/msgp/msgp" -) - -// DecodeMsg implements msgp.Decodable -func (z *CondCheck) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "ObjectInfo": - err = z.ObjectInfo.DecodeMsg(dc) - if err != nil { - err = msgp.WrapError(err, "ObjectInfo") - return - } - case "IfMatch": - z.IfMatch, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "IfMatch") - return - } - case "IfNoneMatch": - z.IfNoneMatch, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "IfNoneMatch") - return - } - case "IfModifiedSince": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "IfModifiedSince") - return - } - z.IfModifiedSince = nil - } else { - if z.IfModifiedSince == nil { - z.IfModifiedSince = new(time.Time) - } - *z.IfModifiedSince, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "IfModifiedSince") - return - } - } - case "IfUnModifiedSince": - if dc.IsNil() { - err = dc.ReadNil() - if err != nil { - err = msgp.WrapError(err, "IfUnModifiedSince") - return - } - z.IfUnModifiedSince = nil - } else { - if z.IfUnModifiedSince == nil { - z.IfUnModifiedSince = new(time.Time) - } - *z.IfUnModifiedSince, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "IfUnModifiedSince") - return - } - } - case "IfRange": - z.IfRange, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "IfRange") - return - } - case "IfPartNumber": - z.IfPartNumber, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "IfPartNumber") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *CondCheck) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 7 - // write "ObjectInfo" - err = en.Append(0x87, 0xaa, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f) - if err != nil { - return - } - err = z.ObjectInfo.EncodeMsg(en) - if err != nil { - err = msgp.WrapError(err, "ObjectInfo") - return - } - // write "IfMatch" - err = en.Append(0xa7, 0x49, 0x66, 0x4d, 0x61, 0x74, 0x63, 0x68) - if err != nil { - return - } - err = en.WriteString(z.IfMatch) - if err != nil { - err = msgp.WrapError(err, "IfMatch") - return - } - // write "IfNoneMatch" - err = en.Append(0xab, 0x49, 0x66, 0x4e, 0x6f, 0x6e, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68) - if err != nil { - return - } - err = en.WriteString(z.IfNoneMatch) - if err != nil { - err = msgp.WrapError(err, "IfNoneMatch") - return - } - // write "IfModifiedSince" - err = en.Append(0xaf, 0x49, 0x66, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65) - if err != nil { - return - } - if z.IfModifiedSince == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = en.WriteTime(*z.IfModifiedSince) - if err != nil { - err = msgp.WrapError(err, "IfModifiedSince") - return - } - } - // write "IfUnModifiedSince" - err = en.Append(0xb1, 0x49, 0x66, 0x55, 0x6e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65) - if err != nil { - return - } - if z.IfUnModifiedSince == nil { - err = en.WriteNil() - if err != nil { - return - } - } else { - err = en.WriteTime(*z.IfUnModifiedSince) - if err != nil { - err = msgp.WrapError(err, "IfUnModifiedSince") - return - } - } - // write "IfRange" - err = en.Append(0xa7, 0x49, 0x66, 0x52, 0x61, 0x6e, 0x67, 0x65) - if err != nil { - return - } - err = en.WriteString(z.IfRange) - if err != nil { - err = msgp.WrapError(err, "IfRange") - return - } - // write "IfPartNumber" - err = en.Append(0xac, 0x49, 0x66, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteInt(z.IfPartNumber) - if err != nil { - err = msgp.WrapError(err, "IfPartNumber") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *CondCheck) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 7 - // string "ObjectInfo" - o = append(o, 0x87, 0xaa, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x6e, 0x66, 0x6f) - o, err = z.ObjectInfo.MarshalMsg(o) - if err != nil { - err = msgp.WrapError(err, "ObjectInfo") - return - } - // string "IfMatch" - o = append(o, 0xa7, 0x49, 0x66, 0x4d, 0x61, 0x74, 0x63, 0x68) - o = msgp.AppendString(o, z.IfMatch) - // string "IfNoneMatch" - o = append(o, 0xab, 0x49, 0x66, 0x4e, 0x6f, 0x6e, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68) - o = msgp.AppendString(o, z.IfNoneMatch) - // string "IfModifiedSince" - o = append(o, 0xaf, 0x49, 0x66, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65) - if z.IfModifiedSince == nil { - o = msgp.AppendNil(o) - } else { - o = msgp.AppendTime(o, *z.IfModifiedSince) - } - // string "IfUnModifiedSince" - o = append(o, 0xb1, 0x49, 0x66, 0x55, 0x6e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x53, 0x69, 0x6e, 0x63, 0x65) - if z.IfUnModifiedSince == nil { - o = msgp.AppendNil(o) - } else { - o = msgp.AppendTime(o, *z.IfUnModifiedSince) - } - // string "IfRange" - o = append(o, 0xa7, 0x49, 0x66, 0x52, 0x61, 0x6e, 0x67, 0x65) - o = msgp.AppendString(o, z.IfRange) - // string "IfPartNumber" - o = append(o, 0xac, 0x49, 0x66, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - o = msgp.AppendInt(o, z.IfPartNumber) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *CondCheck) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "ObjectInfo": - bts, err = z.ObjectInfo.UnmarshalMsg(bts) - if err != nil { - err = msgp.WrapError(err, "ObjectInfo") - return - } - case "IfMatch": - z.IfMatch, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfMatch") - return - } - case "IfNoneMatch": - z.IfNoneMatch, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfNoneMatch") - return - } - case "IfModifiedSince": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.IfModifiedSince = nil - } else { - if z.IfModifiedSince == nil { - z.IfModifiedSince = new(time.Time) - } - *z.IfModifiedSince, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfModifiedSince") - return - } - } - case "IfUnModifiedSince": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.IfUnModifiedSince = nil - } else { - if z.IfUnModifiedSince == nil { - z.IfUnModifiedSince = new(time.Time) - } - *z.IfUnModifiedSince, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfUnModifiedSince") - return - } - } - case "IfRange": - z.IfRange, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfRange") - return - } - case "IfPartNumber": - z.IfPartNumber, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "IfPartNumber") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *CondCheck) Msgsize() (s int) { - s = 1 + 11 + z.ObjectInfo.Msgsize() + 8 + msgp.StringPrefixSize + len(z.IfMatch) + 12 + msgp.StringPrefixSize + len(z.IfNoneMatch) + 16 - if z.IfModifiedSince == nil { - s += msgp.NilSize - } else { - s += msgp.TimeSize - } - s += 18 - if z.IfUnModifiedSince == nil { - s += msgp.NilSize - } else { - s += msgp.TimeSize - } - s += 8 + msgp.StringPrefixSize + len(z.IfRange) + 13 + msgp.IntSize - return -} - -// DecodeMsg implements msgp.Decodable -func (z *ObjectInfo) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Key": - z.Key, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Key") - return - } - case "Bucket": - z.Bucket, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "ETag": - z.ETag, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "ETag") - return - } - case "ModTime": - z.ModTime, err = dc.ReadTime() - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - case "StatusCode": - z.StatusCode, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "StatusCode") - return - } - case "CacheControl": - z.CacheControl, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "CacheControl") - return - } - case "Expires": - z.Expires, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Expires") - return - } - case "Metadata": - var zb0002 uint32 - zb0002, err = dc.ReadMapHeader() - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - if z.Metadata == nil { - z.Metadata = make(map[string]string, zb0002) - } else if len(z.Metadata) > 0 { - for key := range z.Metadata { - delete(z.Metadata, key) - } - } - for zb0002 > 0 { - zb0002-- - var za0001 string - var za0002 string - za0001, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - za0002, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Metadata", za0001) - return - } - z.Metadata[za0001] = za0002 - } - case "Range": - z.Range, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Range") - return - } - case "PartNumber": - z.PartNumber, err = dc.ReadInt() - if err != nil { - err = msgp.WrapError(err, "PartNumber") - return - } - case "Size": - z.Size, err = dc.ReadInt64() - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - case "Data": - z.Data, err = dc.ReadBytes(z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - default: - err = dc.Skip() - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *ObjectInfo) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 12 - // write "Key" - err = en.Append(0x8c, 0xa3, 0x4b, 0x65, 0x79) - if err != nil { - return - } - err = en.WriteString(z.Key) - if err != nil { - err = msgp.WrapError(err, "Key") - return - } - // write "Bucket" - err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - if err != nil { - return - } - err = en.WriteString(z.Bucket) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - // write "ETag" - err = en.Append(0xa4, 0x45, 0x54, 0x61, 0x67) - if err != nil { - return - } - err = en.WriteString(z.ETag) - if err != nil { - err = msgp.WrapError(err, "ETag") - return - } - // write "ModTime" - err = en.Append(0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) - if err != nil { - return - } - err = en.WriteTime(z.ModTime) - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - // write "StatusCode" - err = en.Append(0xaa, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) - if err != nil { - return - } - err = en.WriteInt(z.StatusCode) - if err != nil { - err = msgp.WrapError(err, "StatusCode") - return - } - // write "CacheControl" - err = en.Append(0xac, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c) - if err != nil { - return - } - err = en.WriteString(z.CacheControl) - if err != nil { - err = msgp.WrapError(err, "CacheControl") - return - } - // write "Expires" - err = en.Append(0xa7, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73) - if err != nil { - return - } - err = en.WriteString(z.Expires) - if err != nil { - err = msgp.WrapError(err, "Expires") - return - } - // write "Metadata" - err = en.Append(0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteMapHeader(uint32(len(z.Metadata))) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - for za0001, za0002 := range z.Metadata { - err = en.WriteString(za0001) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - err = en.WriteString(za0002) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0001) - return - } - } - // write "Range" - err = en.Append(0xa5, 0x52, 0x61, 0x6e, 0x67, 0x65) - if err != nil { - return - } - err = en.WriteString(z.Range) - if err != nil { - err = msgp.WrapError(err, "Range") - return - } - // write "PartNumber" - err = en.Append(0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - if err != nil { - return - } - err = en.WriteInt(z.PartNumber) - if err != nil { - err = msgp.WrapError(err, "PartNumber") - return - } - // write "Size" - err = en.Append(0xa4, 0x53, 0x69, 0x7a, 0x65) - if err != nil { - return - } - err = en.WriteInt64(z.Size) - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - // write "Data" - err = en.Append(0xa4, 0x44, 0x61, 0x74, 0x61) - if err != nil { - return - } - err = en.WriteBytes(z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *ObjectInfo) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 12 - // string "Key" - o = append(o, 0x8c, 0xa3, 0x4b, 0x65, 0x79) - o = msgp.AppendString(o, z.Key) - // string "Bucket" - o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74) - o = msgp.AppendString(o, z.Bucket) - // string "ETag" - o = append(o, 0xa4, 0x45, 0x54, 0x61, 0x67) - o = msgp.AppendString(o, z.ETag) - // string "ModTime" - o = append(o, 0xa7, 0x4d, 0x6f, 0x64, 0x54, 0x69, 0x6d, 0x65) - o = msgp.AppendTime(o, z.ModTime) - // string "StatusCode" - o = append(o, 0xaa, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65) - o = msgp.AppendInt(o, z.StatusCode) - // string "CacheControl" - o = append(o, 0xac, 0x43, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c) - o = msgp.AppendString(o, z.CacheControl) - // string "Expires" - o = append(o, 0xa7, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73) - o = msgp.AppendString(o, z.Expires) - // string "Metadata" - o = append(o, 0xa8, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61) - o = msgp.AppendMapHeader(o, uint32(len(z.Metadata))) - for za0001, za0002 := range z.Metadata { - o = msgp.AppendString(o, za0001) - o = msgp.AppendString(o, za0002) - } - // string "Range" - o = append(o, 0xa5, 0x52, 0x61, 0x6e, 0x67, 0x65) - o = msgp.AppendString(o, z.Range) - // string "PartNumber" - o = append(o, 0xaa, 0x50, 0x61, 0x72, 0x74, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72) - o = msgp.AppendInt(o, z.PartNumber) - // string "Size" - o = append(o, 0xa4, 0x53, 0x69, 0x7a, 0x65) - o = msgp.AppendInt64(o, z.Size) - // string "Data" - o = append(o, 0xa4, 0x44, 0x61, 0x74, 0x61) - o = msgp.AppendBytes(o, z.Data) - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ObjectInfo) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Key": - z.Key, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Key") - return - } - case "Bucket": - z.Bucket, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Bucket") - return - } - case "ETag": - z.ETag, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ETag") - return - } - case "ModTime": - z.ModTime, bts, err = msgp.ReadTimeBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ModTime") - return - } - case "StatusCode": - z.StatusCode, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StatusCode") - return - } - case "CacheControl": - z.CacheControl, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "CacheControl") - return - } - case "Expires": - z.Expires, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Expires") - return - } - case "Metadata": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - if z.Metadata == nil { - z.Metadata = make(map[string]string, zb0002) - } else if len(z.Metadata) > 0 { - for key := range z.Metadata { - delete(z.Metadata, key) - } - } - for zb0002 > 0 { - var za0001 string - var za0002 string - zb0002-- - za0001, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metadata") - return - } - za0002, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Metadata", za0001) - return - } - z.Metadata[za0001] = za0002 - } - case "Range": - z.Range, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Range") - return - } - case "PartNumber": - z.PartNumber, bts, err = msgp.ReadIntBytes(bts) - if err != nil { - err = msgp.WrapError(err, "PartNumber") - return - } - case "Size": - z.Size, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - err = msgp.WrapError(err, "Size") - return - } - case "Data": - z.Data, bts, err = msgp.ReadBytesBytes(bts, z.Data) - if err != nil { - err = msgp.WrapError(err, "Data") - return - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ObjectInfo) Msgsize() (s int) { - s = 1 + 4 + msgp.StringPrefixSize + len(z.Key) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.ETag) + 8 + msgp.TimeSize + 11 + msgp.IntSize + 13 + msgp.StringPrefixSize + len(z.CacheControl) + 8 + msgp.StringPrefixSize + len(z.Expires) + 9 + msgp.MapHeaderSize - if z.Metadata != nil { - for za0001, za0002 := range z.Metadata { - _ = za0002 - s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002) - } - } - s += 6 + msgp.StringPrefixSize + len(z.Range) + 11 + msgp.IntSize + 5 + msgp.Int64Size + 5 + msgp.BytesPrefixSize + len(z.Data) - return -} diff --git a/internal/config/cache/remote_gen_test.go b/internal/config/cache/remote_gen_test.go deleted file mode 100644 index 86a1a2b9200db..0000000000000 --- a/internal/config/cache/remote_gen_test.go +++ /dev/null @@ -1,236 +0,0 @@ -package cache - -// Code generated by github.com/tinylib/msgp DO NOT EDIT. - -import ( - "bytes" - "testing" - - "github.com/tinylib/msgp/msgp" -) - -func TestMarshalUnmarshalCondCheck(t *testing.T) { - v := CondCheck{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgCondCheck(b *testing.B) { - v := CondCheck{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgCondCheck(b *testing.B) { - v := CondCheck{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalCondCheck(b *testing.B) { - v := CondCheck{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeCondCheck(t *testing.T) { - v := CondCheck{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeCondCheck Msgsize() is inaccurate") - } - - vn := CondCheck{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeCondCheck(b *testing.B) { - v := CondCheck{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeCondCheck(b *testing.B) { - v := CondCheck{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} - -func TestMarshalUnmarshalObjectInfo(t *testing.T) { - v := ObjectInfo{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgObjectInfo(b *testing.B) { - v := ObjectInfo{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgObjectInfo(b *testing.B) { - v := ObjectInfo{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalObjectInfo(b *testing.B) { - v := ObjectInfo{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - -func TestEncodeDecodeObjectInfo(t *testing.T) { - v := ObjectInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - - m := v.Msgsize() - if buf.Len() > m { - t.Log("WARNING: TestEncodeDecodeObjectInfo Msgsize() is inaccurate") - } - - vn := ObjectInfo{} - err := msgp.Decode(&buf, &vn) - if err != nil { - t.Error(err) - } - - buf.Reset() - msgp.Encode(&buf, &v) - err = msgp.NewReader(&buf).Skip() - if err != nil { - t.Error(err) - } -} - -func BenchmarkEncodeObjectInfo(b *testing.B) { - v := ObjectInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - en := msgp.NewWriter(msgp.Nowhere) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.EncodeMsg(en) - } - en.Flush() -} - -func BenchmarkDecodeObjectInfo(b *testing.B) { - v := ObjectInfo{} - var buf bytes.Buffer - msgp.Encode(&buf, &v) - b.SetBytes(int64(buf.Len())) - rd := msgp.NewEndlessReader(buf.Bytes(), b) - dc := msgp.NewReader(rd) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - err := v.DecodeMsg(dc) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/internal/config/callhome/callhome.go b/internal/config/callhome/callhome.go index fd7a27558221a..ef6f8d51f5054 100644 --- a/internal/config/callhome/callhome.go +++ b/internal/config/callhome/callhome.go @@ -22,7 +22,7 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Callhome related keys diff --git a/internal/config/certs.go b/internal/config/certs.go index bf13a429bcff3..e2ba44ebc1934 100644 --- a/internal/config/certs.go +++ b/internal/config/certs.go @@ -25,7 +25,7 @@ import ( "errors" "os" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // EnvCertPassword is the environment variable which contains the password used @@ -49,19 +49,19 @@ func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er for len(current) > 0 { var pemBlock *pem.Block if pemBlock, current = pem.Decode(current); pemBlock == nil { - return nil, ErrTLSUnexpectedData(nil).Msg("Could not read PEM block from file %s", certFile) + return nil, ErrTLSUnexpectedData(nil).Msgf("Could not read PEM block from file %s", certFile) } var x509Cert *x509.Certificate if x509Cert, err = x509.ParseCertificate(pemBlock.Bytes); err != nil { - return nil, ErrTLSUnexpectedData(nil).Msg("Failed to parse `%s`: %s", certFile, err.Error()) + return nil, ErrTLSUnexpectedData(nil).Msgf("Failed to parse `%s`: %s", certFile, err.Error()) } x509Certs = append(x509Certs, x509Cert) } if len(x509Certs) == 0 { - return nil, ErrTLSUnexpectedData(nil).Msg("Empty public certificate file %s", certFile) + return nil, ErrTLSUnexpectedData(nil).Msgf("Empty public certificate file %s", certFile) } return x509Certs, nil @@ -73,18 +73,18 @@ func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) { certPEMBlock, err := os.ReadFile(certFile) if err != nil { - return tls.Certificate{}, ErrTLSReadError(nil).Msg("Unable to read the public key: %s", err) + return tls.Certificate{}, ErrTLSReadError(nil).Msgf("Unable to read the public key: %s", err) } keyPEMBlock, err := os.ReadFile(keyFile) if err != nil { - return tls.Certificate{}, ErrTLSReadError(nil).Msg("Unable to read the private key: %s", err) + return tls.Certificate{}, ErrTLSReadError(nil).Msgf("Unable to read the private key: %s", err) } key, rest := pem.Decode(keyPEMBlock) if len(rest) > 0 { - return tls.Certificate{}, ErrTLSUnexpectedData(nil).Msg("The private key contains additional data") + return tls.Certificate{}, ErrTLSUnexpectedData(nil).Msgf("The private key contains additional data") } if key == nil { - return tls.Certificate{}, ErrTLSUnexpectedData(nil).Msg("The private key is not readable") + return tls.Certificate{}, ErrTLSUnexpectedData(nil).Msgf("The private key is not readable") } if x509.IsEncryptedPEMBlock(key) { password := env.Get(EnvCertPassword, "") diff --git a/internal/config/certs_test.go b/internal/config/certs_test.go index 4c989d37b04af..d102a2492c948 100644 --- a/internal/config/certs_test.go +++ b/internal/config/certs_test.go @@ -22,10 +22,11 @@ import ( "testing" ) -func createTempFile(prefix, content string) (tempFile string, err error) { +func createTempFile(t testing.TB, prefix, content string) (tempFile string, err error) { + t.Helper() var tmpfile *os.File - if tmpfile, err = os.CreateTemp("", prefix); err != nil { + if tmpfile, err = os.CreateTemp(t.TempDir(), prefix); err != nil { return tempFile, err } @@ -42,14 +43,13 @@ func createTempFile(prefix, content string) (tempFile string, err error) { } func TestParsePublicCertFile(t *testing.T) { - tempFile1, err := createTempFile("public-cert-file", "") + tempFile1, err := createTempFile(t, "public-cert-file", "") if err != nil { t.Fatalf("Unable to create temporary file. %v", err) } defer os.Remove(tempFile1) - tempFile2, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile2, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -70,8 +70,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile2) - tempFile3, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile3, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -92,8 +91,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile3) - tempFile4, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile4, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -114,8 +112,7 @@ M9ofSEt/bdRD } defer os.Remove(tempFile4) - tempFile5, err := createTempFile("public-cert-file", - `-----BEGIN CERTIFICATE----- + tempFile5, err := createTempFile(t, "public-cert-file", `-----BEGIN CERTIFICATE----- MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN @@ -184,11 +181,11 @@ func TestLoadX509KeyPair(t *testing.T) { os.Unsetenv(EnvCertPassword) }) for i, testCase := range loadX509KeyPairTests { - privateKey, err := createTempFile("private.key", testCase.privateKey) + privateKey, err := createTempFile(t, "private.key", testCase.privateKey) if err != nil { t.Fatalf("Test %d: failed to create tmp private key file: %v", i, err) } - certificate, err := createTempFile("public.crt", testCase.certificate) + certificate, err := createTempFile(t, "public.crt", testCase.certificate) if err != nil { os.Remove(privateKey) t.Fatalf("Test %d: failed to create tmp certificate file: %v", i, err) diff --git a/internal/config/compress/compress.go b/internal/config/compress/compress.go index 8e9a0b75c51c7..dc050e3ba99b7 100644 --- a/internal/config/compress/compress.go +++ b/internal/config/compress/compress.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Config represents the compression settings. @@ -93,9 +93,12 @@ func LookupConfig(kvs config.KVS) (Config, error) { return cfg, err } - compress := env.Get(EnvCompressState, kvs.Get(config.Enable)) + compress := env.Get(EnvCompressState, "") if compress == "" { - compress = env.Get(EnvCompress, "") + compress = env.Get(EnvCompressEnableLegacy, "") + if compress == "" { + compress = env.Get(EnvCompress, kvs.Get(config.Enable)) + } } cfg.Enabled, err = config.ParseBool(compress) if err != nil { @@ -109,9 +112,9 @@ func LookupConfig(kvs config.KVS) (Config, error) { return cfg, nil } - allowEnc := env.Get(EnvCompressAllowEncryption, kvs.Get(AllowEncrypted)) + allowEnc := env.Get(EnvCompressAllowEncryption, "") if allowEnc == "" { - allowEnc = env.Get(EnvCompressAllowEncryptionLegacy, "") + allowEnc = env.Get(EnvCompressAllowEncryptionLegacy, kvs.Get(AllowEncrypted)) } cfg.AllowEncrypted, err = config.ParseBool(allowEnc) diff --git a/internal/config/compress/compress_test.go b/internal/config/compress/compress_test.go index 124d182dbc8a0..12daa9f16a2e0 100644 --- a/internal/config/compress/compress_test.go +++ b/internal/config/compress/compress_test.go @@ -41,7 +41,6 @@ func TestParseCompressIncludes(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.str, func(t *testing.T) { gotPatterns, err := parseCompressIncludes(testCase.str) if !testCase.success && err == nil { diff --git a/internal/config/compress/legacy.go b/internal/config/compress/legacy.go index 13be9069a3843..034df304f8bba 100644 --- a/internal/config/compress/legacy.go +++ b/internal/config/compress/legacy.go @@ -30,6 +30,7 @@ const ( // These envs were wrong but we supported them for a long time // so keep them here to support existing deployments. + EnvCompressEnableLegacy = "MINIO_COMPRESS_ENABLE" EnvCompressAllowEncryptionLegacy = "MINIO_COMPRESS_ALLOW_ENCRYPTION" EnvCompressExtensionsLegacy = "MINIO_COMPRESS_EXTENSIONS" EnvCompressMimeTypesLegacy2 = "MINIO_COMPRESS_MIME_TYPES" diff --git a/internal/config/config.go b/internal/config/config.go index fd8568544bca0..20a9f3024925e 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -21,14 +21,17 @@ import ( "bufio" "fmt" "io" + "maps" "regexp" + "slices" "sort" "strings" + "sync" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/auth" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // ErrorConfig holds the config error types @@ -60,7 +63,7 @@ type ErrConfigNotFound struct { func Error[T ErrorConfig, PT interface { *T setMsg(string) -}](format string, vals ...interface{}, +}](format string, vals ...any, ) T { pt := PT(new(T)) pt.setMsg(fmt.Sprintf(format, vals...)) @@ -68,7 +71,7 @@ func Error[T ErrorConfig, PT interface { } // Errorf formats an error and returns it as a generic config error -func Errorf(format string, vals ...interface{}) ErrConfigGeneric { +func Errorf(format string, vals ...any) ErrConfigGeneric { return Error[ErrConfigGeneric](format, vals...) } @@ -102,7 +105,6 @@ const ( IdentityLDAPSubSys = madmin.IdentityLDAPSubSys IdentityTLSSubSys = madmin.IdentityTLSSubSys IdentityPluginSubSys = madmin.IdentityPluginSubSys - CacheSubSys = madmin.CacheSubSys SiteSubSys = madmin.SiteSubSys RegionSubSys = madmin.RegionSubSys EtcdSubSys = madmin.EtcdSubSys @@ -188,7 +190,6 @@ var SubSystemsDynamic = set.CreateStringSet( AuditWebhookSubSys, AuditKafkaSubSys, StorageClassSubSys, - CacheSubSys, ILMSubSys, BatchSubSys, BrowserSubSys, @@ -199,7 +200,6 @@ var SubSystemsSingleTargets = set.CreateStringSet( SiteSubSys, RegionSubSys, EtcdSubSys, - CacheSubSys, APISubSys, StorageClassSubSys, CompressionSubSys, @@ -240,9 +240,7 @@ var DefaultKVS = map[string]KVS{} // globally, this should be called only once preferably // during `init()`. func RegisterDefaultKVS(kvsMap map[string]KVS) { - for subSys, kvs := range kvsMap { - DefaultKVS[subSys] = kvs - } + maps.Copy(DefaultKVS, kvsMap) } // HelpSubSysMap - help for all individual KVS for each sub-systems @@ -255,9 +253,7 @@ var HelpSubSysMap = map[string]HelpKVS{} // this function should be called only once // preferably in during `init()`. func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) { - for subSys, hkvs := range helpKVSMap { - HelpSubSysMap[subSys] = hkvs - } + maps.Copy(HelpSubSysMap, helpKVSMap) } // HelpDeprecatedSubSysMap - help for all deprecated sub-systems, that may be @@ -267,9 +263,7 @@ var HelpDeprecatedSubSysMap = map[string]HelpKV{} // RegisterHelpDeprecatedSubSys - saves input help KVS for deprecated // sub-systems globally. Should be called only once at init. func RegisterHelpDeprecatedSubSys(helpDeprecatedKVMap map[string]HelpKV) { - for k, v := range helpDeprecatedKVMap { - HelpDeprecatedSubSysMap[k] = v - } + maps.Copy(HelpDeprecatedSubSysMap, helpDeprecatedKVMap) } // KV - is a shorthand of each key value. @@ -355,9 +349,7 @@ func Merge(cfgKVS map[string]KVS, envname string, defaultKVS KVS) map[string]KVS } newCfgKVS[tgt] = defaultKVS } - for tgt, kv := range cfgKVS { - newCfgKVS[tgt] = kv - } + maps.Copy(newCfgKVS, cfgKVS) return newCfgKVS } @@ -461,7 +453,7 @@ func ParseConfigTargetID(r io.Reader) (ids map[string]bool, err error) { if err := scanner.Err(); err != nil { return nil, err } - return + return ids, err } // ReadConfig - read content from input and write into c. @@ -544,10 +536,36 @@ var ( } ) +var siteLK sync.RWMutex + // Site - holds site info - name and region. type Site struct { - Name string - Region string + name string + region string +} + +// Update safe update the new site name and region +func (s *Site) Update(n Site) { + siteLK.Lock() + s.name = n.name + s.region = n.region + siteLK.Unlock() +} + +// Name returns currently configured site name +func (s *Site) Name() string { + siteLK.RLock() + defer siteLK.RUnlock() + + return s.name +} + +// Region returns currently configured site region +func (s *Site) Region() string { + siteLK.RLock() + defer siteLK.RUnlock() + + return s.region } var validRegionRegex = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-_-]+$") @@ -560,7 +578,7 @@ var validSiteNameRegex = regexp.MustCompile("^[a-z][a-z0-9-]+$") // region sub-system as well. func LookupSite(siteKV KVS, regionKV KVS) (s Site, err error) { if err = CheckValidKeys(SiteSubSys, siteKV, DefaultSiteKVS); err != nil { - return + return s, err } region := env.Get(EnvRegion, "") if region == "" { @@ -578,7 +596,7 @@ func LookupSite(siteKV KVS, regionKV KVS) (s Site, err error) { // is legacy, we return an error to tell the user to // reset the region via the new command. err = Errorf("could not load region from legacy configuration as it was invalid - use 'mc admin config set myminio site region=myregion name=myname' to set a region and name (%v)", err) - return + return s, err } region = regionKV.Get(RegionName) @@ -588,9 +606,9 @@ func LookupSite(siteKV KVS, regionKV KVS) (s Site, err error) { err = Errorf( "region '%s' is invalid, expected simple characters such as [us-east-1, myregion...]", region) - return + return s, err } - s.Region = region + s.region = region } name := env.Get(EnvSiteName, siteKV.Get(NameKey)) @@ -599,11 +617,11 @@ func LookupSite(siteKV KVS, regionKV KVS) (s Site, err error) { err = Errorf( "site name '%s' is invalid, expected simple characters such as [cal-rack0, myname...]", name) - return + return s, err } - s.Name = name + s.name = name } - return + return s, err } // CheckValidKeys - checks if inputs KVS has the necessary keys, @@ -618,11 +636,8 @@ func CheckValidKeys(subSys string, kv KVS, validKVS KVS, deprecatedKeys ...strin continue } var skip bool - for _, deprecatedKey := range deprecatedKeys { - if kv.Key == deprecatedKey { - skip = true - break - } + if slices.Contains(deprecatedKeys, kv.Key) { + skip = true } if skip { continue @@ -828,7 +843,7 @@ func (c Config) DelKVS(s string) error { if len(inputs) == 2 { currKVS := ck.Clone() defKVS := DefaultKVS[subSys] - for _, delKey := range strings.Fields(inputs[1]) { + for delKey := range strings.FieldsSeq(inputs[1]) { _, ok := currKVS.Lookup(delKey) if !ok { return Error[ErrConfigNotFound]("key %s doesn't exist", delKey) @@ -917,6 +932,9 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) (dynamic bool, err e if err != nil { return false, err } + if len(inputs) < 2 { + return false, Errorf("sub-system '%s' must have key", subSys) + } dynamic = SubSystemsDynamic.Contains(subSys) @@ -1178,13 +1196,13 @@ func (c Config) ResolveConfigParam(subSys, target, cfgParam string, redactSecret // Initially only support OpenID if !resolvableSubsystems.Contains(subSys) { - return + return value, cs, isRedacted } // Check if config param requested is valid. defKVS, ok := DefaultKVS[subSys] if !ok { - return + return value, cs, isRedacted } defValue, isFound := defKVS.Lookup(cfgParam) @@ -1193,7 +1211,7 @@ func (c Config) ResolveConfigParam(subSys, target, cfgParam string, redactSecret defValue, isFound = "", true } if !isFound { - return + return value, cs, isRedacted } if target == "" { @@ -1218,7 +1236,7 @@ func (c Config) ResolveConfigParam(subSys, target, cfgParam string, redactSecret value = env.Get(envVar, "") if value != "" { cs = ValueSourceEnv - return + return value, cs, isRedacted } // Lookup config store. @@ -1228,7 +1246,7 @@ func (c Config) ResolveConfigParam(subSys, target, cfgParam string, redactSecret value, ok3 = kvs.Lookup(cfgParam) if ok3 { cs = ValueSourceCfg - return + return value, cs, isRedacted } } } @@ -1236,7 +1254,7 @@ func (c Config) ResolveConfigParam(subSys, target, cfgParam string, redactSecret // Return the default value. value = defValue cs = ValueSourceDef - return + return value, cs, isRedacted } // KVSrc represents a configuration parameter key and value along with the @@ -1380,13 +1398,7 @@ func (c Config) GetSubsysInfo(subSys, target string, redactSecrets bool) ([]Subs } if target != "" { - found := false - for _, t := range targets { - if t == target { - found = true - break - } - } + found := slices.Contains(targets, target) if !found { return nil, Errorf("there is no target `%s` for subsystem `%s`", target, subSys) } diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 9a0a3f65ee6cb..e55d446b37415 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -88,7 +88,6 @@ func TestKVFields(t *testing.T) { }, } for _, test := range tests { - test := test t.Run("", func(t *testing.T) { gotFields := kvFields(test.input, test.keys) if len(gotFields) != len(test.expectedFields) { diff --git a/internal/config/constants.go b/internal/config/constants.go index 0dc0f83434d88..bd00ae347a4dc 100644 --- a/internal/config/constants.go +++ b/internal/config/constants.go @@ -75,6 +75,7 @@ const ( EnvMinIOPrometheusJobID = "MINIO_PROMETHEUS_JOB_ID" EnvMinIOPrometheusExtraLabels = "MINIO_PROMETHEUS_EXTRA_LABELS" EnvMinIOPrometheusAuthToken = "MINIO_PROMETHEUS_AUTH_TOKEN" + EnvConsoleDebugLogLevel = "MINIO_CONSOLE_DEBUG_LOGLEVEL" EnvUpdate = "MINIO_UPDATE" diff --git a/internal/config/crypto.go b/internal/config/crypto.go index a3b80dc30ec0c..757b1db4e249a 100644 --- a/internal/config/crypto.go +++ b/internal/config/crypto.go @@ -27,7 +27,6 @@ import ( "io" jsoniter "github.com/json-iterator/go" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/kms" "github.com/secure-io/sio-go" "github.com/secure-io/sio-go/sioutil" @@ -38,7 +37,7 @@ import ( // // The same context must be provided when decrypting the // ciphertext. -func EncryptBytes(k kms.KMS, plaintext []byte, context kms.Context) ([]byte, error) { +func EncryptBytes(k *kms.KMS, plaintext []byte, context kms.Context) ([]byte, error) { ciphertext, err := Encrypt(k, bytes.NewReader(plaintext), context) if err != nil { return nil, err @@ -49,7 +48,7 @@ func EncryptBytes(k kms.KMS, plaintext []byte, context kms.Context) ([]byte, err // DecryptBytes decrypts the ciphertext using a key managed by the KMS. // The same context that have been used during encryption must be // provided. -func DecryptBytes(k kms.KMS, ciphertext []byte, context kms.Context) ([]byte, error) { +func DecryptBytes(k *kms.KMS, ciphertext []byte, context kms.Context) ([]byte, error) { plaintext, err := Decrypt(k, bytes.NewReader(ciphertext), context) if err != nil { return nil, err @@ -62,13 +61,13 @@ func DecryptBytes(k kms.KMS, ciphertext []byte, context kms.Context) ([]byte, er // // The same context must be provided when decrypting the // ciphertext. -func Encrypt(k kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) { +func Encrypt(k *kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) { algorithm := sio.AES_256_GCM - if !fips.Enabled && !sioutil.NativeAES() { + if !sioutil.NativeAES() { algorithm = sio.ChaCha20Poly1305 } - key, err := k.GenerateKey(context.Background(), "", ctx) + key, err := k.GenerateKey(context.Background(), &kms.GenerateKeyRequest{AssociatedData: ctx}) if err != nil { return nil, err } @@ -116,7 +115,7 @@ func Encrypt(k kms.KMS, plaintext io.Reader, ctx kms.Context) (io.Reader, error) // Decrypt decrypts the ciphertext using a key managed by the KMS. // The same context that have been used during encryption must be // provided. -func Decrypt(k kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, error) { +func Decrypt(k *kms.KMS, ciphertext io.Reader, associatedData kms.Context) (io.Reader, error) { const ( MaxMetadataSize = 1 << 20 // max. size of the metadata Version = 1 @@ -145,11 +144,12 @@ func Decrypt(k kms.KMS, ciphertext io.Reader, context kms.Context) (io.Reader, e if err := json.Unmarshal(metadataBuffer, &metadata); err != nil { return nil, err } - if fips.Enabled && metadata.Algorithm != sio.AES_256_GCM { - return nil, fmt.Errorf("config: unsupported encryption algorithm: %q is not supported in FIPS mode", metadata.Algorithm) - } - key, err := k.DecryptKey(metadata.KeyID, metadata.KMSKey, context) + key, err := k.Decrypt(context.TODO(), &kms.DecryptRequest{ + Name: metadata.KeyID, + Ciphertext: metadata.KMSKey, + AssociatedData: associatedData, + }) if err != nil { return nil, err } diff --git a/internal/config/crypto_test.go b/internal/config/crypto_test.go index 1573e587f1aec..224d230bdc7d5 100644 --- a/internal/config/crypto_test.go +++ b/internal/config/crypto_test.go @@ -53,7 +53,7 @@ func TestEncryptDecrypt(t *testing.T) { if err != nil { t.Fatalf("Failed to decode master key: %v", err) } - KMS, err := kms.New("my-key", key) + KMS, err := kms.NewBuiltin("my-key", key) if err != nil { t.Fatalf("Failed to create KMS: %v", err) } @@ -88,7 +88,7 @@ func BenchmarkEncrypt(b *testing.B) { if err != nil { b.Fatalf("Failed to decode master key: %v", err) } - KMS, err := kms.New("my-key", key) + KMS, err := kms.NewBuiltin("my-key", key) if err != nil { b.Fatalf("Failed to create KMS: %v", err) } @@ -100,7 +100,7 @@ func BenchmarkEncrypt(b *testing.B) { context = kms.Context{"key": "value"} ) b.SetBytes(int64(size)) - for i := 0; i < b.N; i++ { + for b.Loop() { ciphertext, err := Encrypt(KMS, plaintext, context) if err != nil { b.Fatal(err) diff --git a/internal/config/dns/dns_path.go b/internal/config/dns/dns_path.go new file mode 100644 index 0000000000000..9e9a72087e012 --- /dev/null +++ b/internal/config/dns/dns_path.go @@ -0,0 +1,57 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package dns + +import ( + "path" + "strings" + + "github.com/miekg/dns" +) + +// msgPath converts a domainname to an etcd path. If s looks like service.staging.skydns.local., +// the resulting key will be /skydns/local/skydns/staging/service . +func msgPath(s, prefix string) string { + l := dns.SplitDomainName(s) + for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { + l[i], l[j] = l[j], l[i] + } + return path.Join(append([]string{etcdPathSeparator + prefix + etcdPathSeparator}, l...)...) +} + +// dnsJoin joins labels to form a fully qualified domain name. If the last label is +// the root label it is ignored. Not other syntax checks are performed. +func dnsJoin(labels ...string) string { + if len(labels) == 0 { + return "" + } + ll := len(labels) + if labels[ll-1] == "." { + return strings.Join(labels[:ll-1], ".") + "." + } + return dns.Fqdn(strings.Join(labels, ".")) +} + +// msgUnPath converts a etcd path to domainName. +func msgUnPath(s string) string { + ks := strings.Split(strings.Trim(s, etcdPathSeparator), etcdPathSeparator) + for i, j := 0, len(ks)-1; i < j; i, j = i+1, j-1 { + ks[i], ks[j] = ks[j], ks[i] + } + return strings.Join(ks, ".") +} diff --git a/internal/config/dns/etcd_dns.go b/internal/config/dns/etcd_dns.go index 4c6b55cf00579..120eab5ef0c1b 100644 --- a/internal/config/dns/etcd_dns.go +++ b/internal/config/dns/etcd_dns.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -27,7 +27,6 @@ import ( "strings" "time" - "github.com/coredns/coredns/plugin/etcd/msg" "github.com/minio/minio-go/v7/pkg/set" clientv3 "go.etcd.io/etcd/client/v3" ) @@ -60,7 +59,7 @@ func (c *CoreDNS) Close() error { func (c *CoreDNS) List() (map[string][]SrvRecord, error) { srvRecords := map[string][]SrvRecord{} for _, domainName := range c.domainNames { - key := msg.Path(fmt.Sprintf("%s.", domainName), c.prefixPath) + key := msgPath(fmt.Sprintf("%s.", domainName), c.prefixPath) records, err := c.list(key+etcdPathSeparator, true) if err != nil { return srvRecords, err @@ -79,7 +78,7 @@ func (c *CoreDNS) List() (map[string][]SrvRecord, error) { func (c *CoreDNS) Get(bucket string) ([]SrvRecord, error) { var srvRecords []SrvRecord for _, domainName := range c.domainNames { - key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) + key := msgPath(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) records, err := c.list(key, false) if err != nil { return nil, err @@ -102,15 +101,6 @@ func (c *CoreDNS) Get(bucket string) ([]SrvRecord, error) { return srvRecords, nil } -// msgUnPath converts a etcd path to domainname. -func msgUnPath(s string) string { - ks := strings.Split(strings.Trim(s, etcdPathSeparator), etcdPathSeparator) - for i, j := 0, len(ks)-1; i < j; i, j = i+1, j-1 { - ks[i], ks[j] = ks[j], ks[i] - } - return strings.Join(ks, ".") -} - // Retrieves list of entries under the key passed. // Note that this method fetches entries upto only two levels deep. func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) { @@ -153,7 +143,6 @@ func (c *CoreDNS) list(key string, domain bool) ([]SrvRecord, error) { srvRecord.Key = msgUnPath(srvRecord.Key) srvRecords = append(srvRecords, srvRecord) - } sort.Slice(srvRecords, func(i int, j int) bool { return srvRecords[i].Key < srvRecords[j].Key @@ -172,7 +161,7 @@ func (c *CoreDNS) Put(bucket string) error { return err } for _, domainName := range c.domainNames { - key := msg.Path(fmt.Sprintf("%s.%s", bucket, domainName), c.prefixPath) + key := msgPath(fmt.Sprintf("%s.%s", bucket, domainName), c.prefixPath) key = key + etcdPathSeparator + ip ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) _, err = c.etcdClient.Put(ctx, key, string(bucketMsg)) @@ -191,7 +180,7 @@ func (c *CoreDNS) Put(bucket string) error { // Delete - Removes DNS entries added in Put(). func (c *CoreDNS) Delete(bucket string) error { for _, domainName := range c.domainNames { - key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) + key := msgPath(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath) ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) _, err := c.etcdClient.Delete(ctx, key+etcdPathSeparator, clientv3.WithPrefix()) cancel() @@ -205,7 +194,7 @@ func (c *CoreDNS) Delete(bucket string) error { // DeleteRecord - Removes a specific DNS entry func (c *CoreDNS) DeleteRecord(record SrvRecord) error { for _, domainName := range c.domainNames { - key := msg.Path(fmt.Sprintf("%s.%s.", record.Key, domainName), c.prefixPath) + key := msgPath(fmt.Sprintf("%s.%s.", record.Key, domainName), c.prefixPath) ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) _, err := c.etcdClient.Delete(ctx, key+etcdPathSeparator+record.Host) diff --git a/internal/config/dns/etcd_dns_test.go b/internal/config/dns/etcd_dns_test.go new file mode 100644 index 0000000000000..4be271bf7a2e5 --- /dev/null +++ b/internal/config/dns/etcd_dns_test.go @@ -0,0 +1,74 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package dns + +import "testing" + +func TestDNSJoin(t *testing.T) { + tests := []struct { + in []string + out string + }{ + {[]string{"bla", "bliep", "example", "org"}, "bla.bliep.example.org."}, + {[]string{"example", "."}, "example."}, + {[]string{"example", "org."}, "example.org."}, // technically we should not be called like this. + {[]string{"."}, "."}, + } + + for i, tc := range tests { + if x := dnsJoin(tc.in...); x != tc.out { + t.Errorf("Test %d, expected %s, got %s", i, tc.out, x) + } + } +} + +func TestPath(t *testing.T) { + for _, path := range []string{"mydns", "skydns"} { + result := msgPath("service.staging.skydns.local.", path) + if result != etcdPathSeparator+path+"/local/skydns/staging/service" { + t.Errorf("Failure to get domain's path with prefix: %s", result) + } + } +} + +func TestUnPath(t *testing.T) { + result1 := msgUnPath("/skydns/local/cluster/staging/service/") + if result1 != "service.staging.cluster.local.skydns" { + t.Errorf("Failure to get domain from etcd key (with a trailing '/'), expect: 'service.staging.cluster.local.', actually get: '%s'", result1) + } + + result2 := msgUnPath("/skydns/local/cluster/staging/service") + if result2 != "service.staging.cluster.local.skydns" { + t.Errorf("Failure to get domain from etcd key (without trailing '/'), expect: 'service.staging.cluster.local.' actually get: '%s'", result2) + } + + result3 := msgUnPath("/singleleveldomain/") + if result3 != "singleleveldomain" { + t.Errorf("Failure to get domain from etcd key (with leading and trailing '/'), expect: 'singleleveldomain.' actually get: '%s'", result3) + } + + result4 := msgUnPath("/singleleveldomain") + if result4 != "singleleveldomain" { + t.Errorf("Failure to get domain from etcd key (without trailing '/'), expect: 'singleleveldomain.' actually get: '%s'", result4) + } + + result5 := msgUnPath("singleleveldomain") + if result5 != "singleleveldomain" { + t.Errorf("Failure to get domain from etcd key (without leading and trailing '/'), expect: 'singleleveldomain.' actually get: '%s'", result5) + } +} diff --git a/internal/config/dns/operator_dns.go b/internal/config/dns/operator_dns.go index 3f720267f095e..e703103c305ae 100644 --- a/internal/config/dns/operator_dns.go +++ b/internal/config/dns/operator_dns.go @@ -63,14 +63,14 @@ func (c *OperatorDNS) addAuthHeader(r *http.Request) error { return nil } -func (c *OperatorDNS) endpoint(bucket string, delete bool) (string, error) { +func (c *OperatorDNS) endpoint(bucket string, del bool) (string, error) { u, err := url.Parse(c.Endpoint) if err != nil { return "", err } q := u.Query() q.Add("bucket", bucket) - q.Add("delete", strconv.FormatBool(delete)) + q.Add("delete", strconv.FormatBool(del)) u.RawQuery = q.Encode() return u.String(), nil } diff --git a/internal/config/drive/drive.go b/internal/config/drive/drive.go index 431086a955fab..862c62ab76b79 100644 --- a/internal/config/drive/drive.go +++ b/internal/config/drive/drive.go @@ -22,76 +22,89 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" +) + +// Drive specific timeout environment variables +const ( + EnvMaxDriveTimeout = "MINIO_DRIVE_MAX_TIMEOUT" + EnvMaxDriveTimeoutLegacy = "_MINIO_DRIVE_MAX_TIMEOUT" + EnvMaxDiskTimeoutLegacy = "_MINIO_DISK_MAX_TIMEOUT" ) // DefaultKVS - default KVS for drive var DefaultKVS = config.KVS{ config.KV{ Key: MaxTimeout, - Value: "", + Value: "30s", }, } +var configLk sync.RWMutex + // Config represents the subnet related configuration type Config struct { // MaxTimeout - maximum timeout for a drive operation MaxTimeout time.Duration `json:"maxTimeout"` - mutex sync.RWMutex } // Update - updates the config with latest values -func (c *Config) Update(new *Config) error { - c.mutex.Lock() - defer c.mutex.Unlock() - c.MaxTimeout = getMaxTimeout(new.MaxTimeout) +func (c *Config) Update(updated Config) error { + configLk.Lock() + defer configLk.Unlock() + c.MaxTimeout = getMaxTimeout(updated.MaxTimeout) return nil } -// GetMaxTimeout - returns the max timeout value. +// GetMaxTimeout - returns the per call drive operation timeout func (c *Config) GetMaxTimeout() time.Duration { - c.mutex.RLock() - defer c.mutex.RUnlock() + return c.GetOPTimeout() +} + +// GetOPTimeout - returns the per call drive operation timeout +func (c *Config) GetOPTimeout() time.Duration { + configLk.RLock() + defer configLk.RUnlock() + return getMaxTimeout(c.MaxTimeout) } // LookupConfig - lookup config and override with valid environment settings if any. -func LookupConfig(kvs config.KVS) (cfg *Config, err error) { +func LookupConfig(kvs config.KVS) (cfg Config, err error) { + cfg = Config{ + MaxTimeout: 30 * time.Second, + } if err = config.CheckValidKeys(config.DriveSubSys, kvs, DefaultKVS); err != nil { return cfg, err } + // if not set. Get default value from environment - d := kvs.GetWithDefault(MaxTimeout, DefaultKVS) + d := env.Get(EnvMaxDriveTimeout, env.Get(EnvMaxDriveTimeoutLegacy, env.Get(EnvMaxDiskTimeoutLegacy, kvs.GetWithDefault(MaxTimeout, DefaultKVS)))) if d == "" { - d = env.Get("_MINIO_DRIVE_MAX_TIMEOUT", "") - if d == "" { - d = env.Get("_MINIO_DISK_MAX_TIMEOUT", "") - } - } - cfg = &Config{ - mutex: sync.RWMutex{}, - } - dur, _ := time.ParseDuration(d) - if dur < time.Second { cfg.MaxTimeout = 30 * time.Second } else { - cfg.MaxTimeout = getMaxTimeout(dur) + dur, _ := time.ParseDuration(d) + if dur < time.Second { + cfg.MaxTimeout = 30 * time.Second + } else { + cfg.MaxTimeout = getMaxTimeout(dur) + } } return cfg, err } func getMaxTimeout(t time.Duration) time.Duration { - if t < time.Second { - // get default value - d := env.Get("_MINIO_DRIVE_MAX_TIMEOUT", "") - if d == "" { - d = env.Get("_MINIO_DISK_MAX_TIMEOUT", "") - } - dur, _ := time.ParseDuration(d) - if dur < time.Second { - return 30 * time.Second - } - return dur + if t > time.Second { + return t + } + // get default value + d := env.Get(EnvMaxDriveTimeoutLegacy, env.Get(EnvMaxDiskTimeoutLegacy, "")) + if d == "" { + return 30 * time.Second + } + dur, _ := time.ParseDuration(d) + if dur < time.Second { + return 30 * time.Second } - return t + return dur } diff --git a/internal/config/drive/help.go b/internal/config/drive/help.go index 3ed68cbab8329..5964dcce4181f 100644 --- a/internal/config/drive/help.go +++ b/internal/config/drive/help.go @@ -22,12 +22,13 @@ import "github.com/minio/minio/internal/config" var ( // MaxTimeout is the max timeout for drive MaxTimeout = "max_timeout" + // HelpDrive is help for drive HelpDrive = config.HelpKVS{ config.HelpKV{ Key: MaxTimeout, Type: "string", - Description: "set per call max_timeout for the drive, defaults to 2 minutes", + Description: "set per call max_timeout for the drive, defaults to 30 seconds", Optional: true, }, } diff --git a/internal/config/errors-utils.go b/internal/config/errors-utils.go index 3ee3e70d7fa52..3d75ab73ed8e8 100644 --- a/internal/config/errors-utils.go +++ b/internal/config/errors-utils.go @@ -58,14 +58,25 @@ func (u Err) Error() string { } // Msg - Replace the current error's message -func (u Err) Msg(m string, args ...interface{}) Err { +func (u Err) Msg(m string) Err { e := u.Clone() - e.msg = fmt.Sprintf(m, args...) + e.msg = m + return e +} + +// Msgf - Replace the current error's message +func (u Err) Msgf(m string, args ...any) Err { + e := u.Clone() + if len(args) == 0 { + e.msg = m + } else { + e.msg = fmt.Sprintf(m, args...) + } return e } // Hint - Replace the current error's message -func (u Err) Hint(m string, args ...interface{}) Err { +func (u Err) Hint(m string, args ...any) Err { e := u.Clone() e.hint = fmt.Sprintf(m, args...) return e diff --git a/internal/config/errors.go b/internal/config/errors.go index 24a2d00428c48..3bcb79ce9486a 100644 --- a/internal/config/errors.go +++ b/internal/config/errors.go @@ -73,6 +73,12 @@ var ( `Access key length should be at least 3, and secret key length at least 8 characters`, ) + ErrInvalidRootUserCredentials = newErrFn( + "Invalid credentials", + "Please provide correct credentials", + EnvRootUser+` length should be at least 3, and `+EnvRootPassword+` length at least 8 characters`, + ) + ErrMissingEnvCredentialRootUser = newErrFn( "Missing credential environment variable, \""+EnvRootUser+"\"", "Environment variable \""+EnvRootUser+"\" is missing", @@ -100,13 +106,13 @@ var ( ErrInvalidErasureEndpoints = newErrFn( "Invalid endpoint(s) in erasure mode", "Please provide correct combination of local/remote paths", - "For more information, please refer to https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html", + "For more information, please refer to https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html", ) ErrInvalidNumberOfErasureEndpoints = newErrFn( "Invalid total number of endpoints for erasure mode", "Please provide number of endpoints greater or equal to 2", - "For more information, please refer to https://min.io/docs/minio/linux/operations/concepts/erasure-coding.html", + "For more information, please refer to https://docs.min.io/community/minio-object-store/operations/concepts/erasure-coding.html", ) ErrStorageClassValue = newErrFn( @@ -186,7 +192,7 @@ Examples: ErrNoCertsAndHTTPSEndpoints = newErrFn( "HTTPS specified in endpoints, but no TLS certificate is found on the local machine", "Please add TLS certificate or use HTTP endpoints only", - "Refer to https://min.io/docs/minio/linux/operations/network-encryption.html for information about how to load a TLS certificate in your server", + "Refer to https://docs.min.io/community/minio-object-store/operations/network-encryption.html for information about how to load a TLS certificate in your server", ) ErrCertsAndHTTPEndpoints = newErrFn( @@ -224,11 +230,6 @@ Examples: "", "MINIO_API_TRANSITION_WORKERS: should be >= GOMAXPROCS/2", ) - ErrInvalidExpiryWorkersValue = newErrFn( - "Invalid value for expiry workers", - "", - "MINIO_API_EXPIRY_WORKERS: should be between 1 and 500", - ) ErrInvalidBatchKeyRotationWorkersWait = newErrFn( "Invalid value for batch key rotation workers wait", "Please input a non-negative duration", diff --git a/internal/config/etcd/etcd.go b/internal/config/etcd/etcd.go index 7351ee4bf7a04..87e18012bfe4c 100644 --- a/internal/config/etcd/etcd.go +++ b/internal/config/etcd/etcd.go @@ -24,8 +24,9 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/minio/internal/crypto" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" clientv3 "go.etcd.io/etcd/client/v3" "go.etcd.io/etcd/client/v3/namespace" "go.uber.org/zap" @@ -159,7 +160,13 @@ func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) { cfg.PathPrefix = env.Get(EnvEtcdPathPrefix, kvs.Get(PathPrefix)) if etcdSecure { cfg.TLS = &tls.Config{ - RootCAs: rootCAs, + RootCAs: rootCAs, + PreferServerCipherSuites: true, + MinVersion: tls.VersionTLS12, + NextProtos: []string{"http/1.1", "h2"}, + ClientSessionCache: tls.NewLRUClientSessionCache(64), + CipherSuites: crypto.TLSCiphersBackwardCompatible(), + CurvePreferences: crypto.TLSCurveIDs(), } // This is only to support client side certificate authentication // https://coreos.com/etcd/docs/latest/op-guide/security.html diff --git a/internal/config/etcd/etcd_test.go b/internal/config/etcd/etcd_test.go index d9889ca9601b7..e7aad0f79c6f6 100644 --- a/internal/config/etcd/etcd_test.go +++ b/internal/config/etcd/etcd_test.go @@ -49,7 +49,6 @@ func TestParseEndpoints(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run(testCase.s, func(t *testing.T) { endpoints, secure, err := parseEndpoints(testCase.s) if err != nil && testCase.success { diff --git a/internal/config/heal/heal.go b/internal/config/heal/heal.go index 5c57820a75afc..edca0eec3896b 100644 --- a/internal/config/heal/heal.go +++ b/internal/config/heal/heal.go @@ -26,7 +26,7 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Compression environment variables @@ -63,11 +63,9 @@ type Config struct { } // BitrotScanCycle returns the configured cycle for the scanner healing -// -1 for not enabled -// -// 0 for contiunous bitrot scanning -// -// >0 interval duration between cycles +// - '-1' for not enabled +// - '0' for continuous bitrot scanning +// - '> 0' interval duration between cycles func (opts Config) BitrotScanCycle() (d time.Duration) { configMutex.RLock() defer configMutex.RUnlock() @@ -157,11 +155,14 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { if err = config.CheckValidKeys(config.HealSubSys, kvs, DefaultKVS); err != nil { return cfg, err } - cfg.Bitrot = env.Get(EnvBitrot, kvs.GetWithDefault(Bitrot, DefaultKVS)) - _, err = parseBitrotConfig(cfg.Bitrot) - if err != nil { + + bitrot := env.Get(EnvBitrot, kvs.GetWithDefault(Bitrot, DefaultKVS)) + if _, err = parseBitrotConfig(bitrot); err != nil { return cfg, fmt.Errorf("'heal:bitrotscan' value invalid: %w", err) } + + cfg.Bitrot = bitrot + cfg.Sleep, err = time.ParseDuration(env.Get(EnvSleep, kvs.GetWithDefault(Sleep, DefaultKVS))) if err != nil { return cfg, fmt.Errorf("'heal:max_sleep' value invalid: %w", err) diff --git a/internal/config/identity/ldap/config.go b/internal/config/identity/ldap/config.go index 0ed0bb480a004..79bcf86a782bb 100644 --- a/internal/config/identity/ldap/config.go +++ b/internal/config/identity/ldap/config.go @@ -18,14 +18,17 @@ package ldap import ( + "crypto/tls" "crypto/x509" "errors" + "net" "sort" "time" "github.com/minio/madmin-go/v3" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/ldap" + "github.com/minio/minio/internal/crypto" + "github.com/minio/pkg/v3/ldap" ) const ( @@ -67,6 +70,7 @@ const ( LookupBindPassword = "lookup_bind_password" UserDNSearchBaseDN = "user_dn_search_base_dn" UserDNSearchFilter = "user_dn_search_filter" + UserDNAttributes = "user_dn_attributes" GroupSearchFilter = "group_search_filter" GroupSearchBaseDN = "group_search_base_dn" TLSSkipVerify = "tls_skip_verify" @@ -81,6 +85,7 @@ const ( EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT" EnvUserDNSearchBaseDN = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_BASE_DN" EnvUserDNSearchFilter = "MINIO_IDENTITY_LDAP_USER_DN_SEARCH_FILTER" + EnvUserDNAttributes = "MINIO_IDENTITY_LDAP_USER_DN_ATTRIBUTES" EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER" EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN" EnvLookupBindDN = "MINIO_IDENTITY_LDAP_LOOKUP_BIND_DN" @@ -118,6 +123,10 @@ var ( Key: UserDNSearchFilter, Value: "", }, + config.KV{ + Key: UserDNAttributes, + Value: "", + }, config.KV{ Key: GroupSearchFilter, Value: "", @@ -182,16 +191,30 @@ func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) { if ldapServer == "" { return l, nil } + + // Set ServerName in TLS config for proper certificate validation + host, _, err := net.SplitHostPort(ldapServer) + if err != nil { + host = ldapServer + } + l.LDAP = ldap.Config{ - Enabled: true, - RootCAs: rootCAs, ServerAddr: ldapServer, SRVRecordName: getCfgVal(SRVRecordName), + TLS: &tls.Config{ + ServerName: host, + MinVersion: tls.VersionTLS12, + NextProtos: []string{"h2", "http/1.1"}, + ClientSessionCache: tls.NewLRUClientSessionCache(100), + CipherSuites: crypto.TLSCiphersBackwardCompatible(), // Contains RSA key exchange + RootCAs: rootCAs, + }, } - // Parse explicitly enable=on/off flag. If not set, defaults to `true` - // because ServerAddr is set. + // Parse explicitly set enable=on/off flag. + isEnableFlagExplicitlySet := false if v := getCfgVal(config.Enable); v != "" { + isEnableFlagExplicitlySet = true l.LDAP.Enabled, err = config.ParseBool(v) if err != nil { return l, err @@ -214,7 +237,7 @@ func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) { } } if v := getCfgVal(TLSSkipVerify); v != "" { - l.LDAP.TLSSkipVerify, err = config.ParseBool(v) + l.LDAP.TLS.InsecureSkipVerify, err = config.ParseBool(v) if err != nil { return l, err } @@ -227,14 +250,22 @@ func Lookup(s config.Config, rootCAs *x509.CertPool) (l Config, err error) { // User DN search configuration l.LDAP.UserDNSearchFilter = getCfgVal(UserDNSearchFilter) l.LDAP.UserDNSearchBaseDistName = getCfgVal(UserDNSearchBaseDN) + l.LDAP.UserDNAttributes = getCfgVal(UserDNAttributes) // Group search params configuration l.LDAP.GroupSearchFilter = getCfgVal(GroupSearchFilter) l.LDAP.GroupSearchBaseDistName = getCfgVal(GroupSearchBaseDN) + // If enable flag was not explicitly set, we treat it as implicitly set at + // this point as necessary configuration is available. + if !isEnableFlagExplicitlySet && !l.LDAP.Enabled { + l.LDAP.Enabled = true + } // Validate and test configuration. valResult := l.LDAP.Validate() if !valResult.IsOk() { + // Set to false if configuration fails to validate. + l.LDAP.Enabled = false return l, valResult } diff --git a/internal/config/identity/ldap/help.go b/internal/config/identity/ldap/help.go index 035a9d80f7871..300039baa9e33 100644 --- a/internal/config/identity/ldap/help.go +++ b/internal/config/identity/ldap/help.go @@ -66,6 +66,12 @@ var ( Optional: true, Type: "string", }, + config.HelpKV{ + Key: UserDNAttributes, + Description: `"," separated list of user DN attributes e.g. "uid,cn,mail,sshPublicKey"` + defaultHelpPostfix(UserDNAttributes), + Optional: true, + Type: "list", + }, config.HelpKV{ Key: GroupSearchFilter, Description: `search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"` + defaultHelpPostfix(GroupSearchFilter), diff --git a/internal/config/identity/ldap/ldap.go b/internal/config/identity/ldap/ldap.go index 4c805aee703b3..1c1c704c38e01 100644 --- a/internal/config/identity/ldap/ldap.go +++ b/internal/config/identity/ldap/ldap.go @@ -27,196 +27,246 @@ import ( ldap "github.com/go-ldap/ldap/v3" "github.com/minio/minio-go/v7/pkg/set" "github.com/minio/minio/internal/auth" + xldap "github.com/minio/pkg/v3/ldap" ) -// LookupUserDN searches for the full DN and groups of a given username -func (l *Config) LookupUserDN(username string) (string, []string, error) { +// LookupUserDN searches for the full DN and groups of a given short/login +// username. +func (l *Config) LookupUserDN(username string) (*xldap.DNSearchResult, []string, error) { conn, err := l.LDAP.Connect() if err != nil { - return "", nil, err + return nil, nil, err } defer conn.Close() // Bind to the lookup user account if err = l.LDAP.LookupBind(conn); err != nil { - return "", nil, err + return nil, nil, err } // Lookup user DN - bindDN, err := l.LDAP.LookupUserDN(conn, username) + lookupRes, err := l.LDAP.LookupUsername(conn, username) if err != nil { errRet := fmt.Errorf("Unable to find user DN: %w", err) - return "", nil, errRet + return nil, nil, errRet } - groups, err := l.LDAP.SearchForUserGroups(conn, username, bindDN) + groups, err := l.LDAP.SearchForUserGroups(conn, username, lookupRes.ActualDN) if err != nil { - return "", nil, err + return nil, nil, err } - return bindDN, groups, nil + return lookupRes, groups, nil } -// DoesUsernameExist checks if the given username exists in the LDAP directory. +// GetValidatedDNForUsername checks if the given username exists in the LDAP directory. // The given username could be just the short "login" username or the full DN. -// When the username is found, the full DN is returned, otherwise the returned -// string is empty. If the user is not found, err = nil, otherwise, err != nil. -func (l *Config) DoesUsernameExist(username string) (string, error) { +// +// When the username/DN is found, the full DN returned by the **server** is +// returned, otherwise the returned string is empty. The value returned here is +// the value sent by the LDAP server and is used in minio as the server performs +// LDAP specific normalization (including Unicode normalization). +// +// If the user is not found, err = nil, otherwise, err != nil. +func (l *Config) GetValidatedDNForUsername(username string) (*xldap.DNSearchResult, error) { conn, err := l.LDAP.Connect() if err != nil { - return "", err + return nil, err } defer conn.Close() // Bind to the lookup user account if err = l.LDAP.LookupBind(conn); err != nil { - return "", err + return nil, err } // Check if the passed in username is a valid DN. - parsedUsernameDN, err := ldap.ParseDN(username) - if err != nil { - // Since the passed in username was not a DN, we consider it as a login - // username and attempt to check it exists in the directory. - bindDN, err := l.LDAP.LookupUserDN(conn, username) + if !l.ParsesAsDN(username) { + // We consider it as a login username and attempt to check it exists in + // the directory. + bindDN, err := l.LDAP.LookupUsername(conn, username) if err != nil { - if strings.Contains(err.Error(), "not found") { - return "", nil + if strings.Contains(err.Error(), "User DN not found for") { + return nil, nil } - return "", fmt.Errorf("Unable to find user DN: %w", err) + return nil, fmt.Errorf("Unable to find user DN: %w", err) } return bindDN, nil } - // Since the username is a valid DN, check that it is under a configured - // base DN in the LDAP directory. - var foundDistName []string - for _, baseDN := range l.LDAP.UserDNSearchBaseDistNames { - // BaseDN should not fail to parse. - baseDNParsed, _ := ldap.ParseDN(baseDN) - if baseDNParsed.AncestorOf(parsedUsernameDN) { - searchRequest := ldap.NewSearchRequest(username, ldap.ScopeBaseObject, ldap.NeverDerefAliases, - 0, 0, false, "(objectClass=*)", nil, nil) - searchResult, err := conn.Search(searchRequest) - if err != nil { - // Check if there is no matching result. - // Ref: https://ldap.com/ldap-result-code-reference/ - if ldap.IsErrorWithCode(err, 32) { - continue - } - return "", err - } - for _, entry := range searchResult.Entries { - foundDistName = append(foundDistName, entry.DN) - } - } + // Since the username parses as a valid DN, check that it exists and is + // under a configured base DN in the LDAP directory. + validDN, isUnderBaseDN, err := l.GetValidatedUserDN(conn, username) + if err == nil && !isUnderBaseDN { + // Not under any configured base DN, so treat as not found. + return nil, nil } + return validDN, err +} - if len(foundDistName) == 1 { - return foundDistName[0], nil - } else if len(foundDistName) > 1 { - // FIXME: This error would happen if the multiple base DNs are given and - // some base DNs are subtrees of other base DNs - we should validate - // and error out in such cases. - return "", fmt.Errorf("found multiple DNs for the given username") +// GetValidatedUserDN validates the given user DN. Will error out if conn is nil. The returned +// boolean is true iff the user DN is found under one of the LDAP user base DNs. +func (l *Config) GetValidatedUserDN(conn *ldap.Conn, userDN string) (*xldap.DNSearchResult, bool, error) { + return l.GetValidatedDNUnderBaseDN(conn, userDN, + l.LDAP.GetUserDNSearchBaseDistNames(), l.LDAP.GetUserDNAttributesList()) +} + +// GetValidatedGroupDN validates the given group DN. If conn is nil, creates a +// connection. The returned boolean is true iff the group DN is found under one +// of the configured LDAP base DNs. +func (l *Config) GetValidatedGroupDN(conn *ldap.Conn, groupDN string) (*xldap.DNSearchResult, bool, error) { + if conn == nil { + var err error + conn, err = l.LDAP.Connect() + if err != nil { + return nil, false, err + } + defer conn.Close() + + // Bind to the lookup user account + if err = l.LDAP.LookupBind(conn); err != nil { + return nil, false, err + } } - return "", nil + + return l.GetValidatedDNUnderBaseDN(conn, groupDN, + l.LDAP.GetGroupSearchBaseDistNames(), nil) } -// DoesGroupDNExist checks if the given group DN exists in the LDAP directory. -func (l *Config) DoesGroupDNExist(groupDN string) (bool, error) { - if len(l.LDAP.GroupSearchBaseDistNames) == 0 { - return false, errors.New("no group search Base DNs given") +// GetValidatedDNUnderBaseDN checks if the given DN exists in the LDAP +// directory. +// +// The `NormDN` value returned here in the search result may not be equal to the +// input DN, as LDAP equality is not a simple Golang string equality. However, +// we assume the value returned by the LDAP server is canonical. Additionally, +// the attribute type names in the DN are lower-cased. +// +// Return values: +// +// If the DN is found, the normalized (string) value and any requested +// attributes are returned and error is nil. +// +// If the DN is not found, a nil result and error are returned. +// +// The returned boolean is true iff the DN is found under one of the LDAP +// subtrees listed in `baseDNList`. +func (l *Config) GetValidatedDNUnderBaseDN(conn *ldap.Conn, dn string, baseDNList []xldap.BaseDNInfo, attrs []string) (*xldap.DNSearchResult, bool, error) { + if len(baseDNList) == 0 { + return nil, false, errors.New("no Base DNs given") } - gdn, err := ldap.ParseDN(groupDN) + // Check that DN exists in the LDAP directory. + searchRes, err := xldap.LookupDN(conn, dn, attrs) if err != nil { - return false, fmt.Errorf("Given group DN could not be parsed: %s", err) + return nil, false, fmt.Errorf("Error looking up DN %s: %w", dn, err) + } + if searchRes == nil { + return nil, false, nil + } + + // This will not return an error as the argument is validated to be a DN. + pdn, _ := ldap.ParseDN(searchRes.NormDN) + + // Check that the DN is under a configured base DN in the LDAP + // directory. + for _, baseDN := range baseDNList { + if baseDN.Parsed.AncestorOf(pdn) { + return searchRes, true, nil + } } + // Not under any configured base DN so return false. + return searchRes, false, nil +} + +// GetValidatedDNWithGroups - Gets validated DN from given DN or short username +// and returns the DN and the groups the user is a member of. +// +// If username is required in group search but a DN is passed, no groups are +// returned. +func (l *Config) GetValidatedDNWithGroups(username string) (*xldap.DNSearchResult, []string, error) { conn, err := l.LDAP.Connect() if err != nil { - return false, err + return nil, nil, err } defer conn.Close() // Bind to the lookup user account if err = l.LDAP.LookupBind(conn); err != nil { - return false, err + return nil, nil, err } - var foundDistName []string - for _, baseDN := range l.LDAP.GroupSearchBaseDistNames { - // BaseDN should not fail to parse. - baseDNParsed, _ := ldap.ParseDN(baseDN) - if baseDNParsed.AncestorOf(gdn) { - searchRequest := ldap.NewSearchRequest(groupDN, ldap.ScopeBaseObject, ldap.NeverDerefAliases, 0, 0, false, "(objectClass=*)", nil, nil) - searchResult, err := conn.Search(searchRequest) - if err != nil { - // Check if there is no matching result. - // Ref: https://ldap.com/ldap-result-code-reference/ - if ldap.IsErrorWithCode(err, 32) { - continue - } - return false, err - } - for _, entry := range searchResult.Entries { - foundDistName = append(foundDistName, entry.DN) + var lookupRes *xldap.DNSearchResult + shortUsername := "" + // Check if the passed in username is a valid DN. + if !l.ParsesAsDN(username) { + // We consider it as a login username and attempt to check it exists in + // the directory. + lookupRes, err = l.LDAP.LookupUsername(conn, username) + if err != nil { + if strings.Contains(err.Error(), "User DN not found for") { + return nil, nil, nil } + return nil, nil, fmt.Errorf("Unable to find user DN: %w", err) + } + shortUsername = username + } else { + // Since the username parses as a valid DN, check that it exists and is + // under a configured base DN in the LDAP directory. + var isUnderBaseDN bool + lookupRes, isUnderBaseDN, err = l.GetValidatedUserDN(conn, username) + if err == nil && !isUnderBaseDN { + return nil, nil, fmt.Errorf("Unable to find user DN: %w", err) } } - if len(foundDistName) == 1 { - return true, nil - } else if len(foundDistName) > 1 { - // FIXME: This error would happen if the multiple base DNs are given and - // some base DNs are subtrees of other base DNs - we should validate - // and error out in such cases. - return false, fmt.Errorf("found multiple DNs for the given group DN") + + groups, err := l.LDAP.SearchForUserGroups(conn, shortUsername, lookupRes.ActualDN) + if err != nil { + return nil, nil, err } - return false, nil + return lookupRes, groups, nil } // Bind - binds to ldap, searches LDAP and returns the distinguished name of the // user and the list of groups. -func (l *Config) Bind(username, password string) (string, []string, error) { +func (l *Config) Bind(username, password string) (*xldap.DNSearchResult, []string, error) { conn, err := l.LDAP.Connect() if err != nil { - return "", nil, err + return nil, nil, err } defer conn.Close() - var bindDN string // Bind to the lookup user account if err = l.LDAP.LookupBind(conn); err != nil { - return "", nil, err + return nil, nil, err } // Lookup user DN - bindDN, err = l.LDAP.LookupUserDN(conn, username) + lookupResult, err := l.LDAP.LookupUsername(conn, username) if err != nil { errRet := fmt.Errorf("Unable to find user DN: %w", err) - return "", nil, errRet + return nil, nil, errRet } // Authenticate the user credentials. - err = conn.Bind(bindDN, password) + err = conn.Bind(lookupResult.ActualDN, password) if err != nil { - errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", bindDN, err) - return "", nil, errRet + errRet := fmt.Errorf("LDAP auth failed for DN %s: %w", lookupResult.ActualDN, err) + return nil, nil, errRet } // Bind to the lookup user account again to perform group search. if err = l.LDAP.LookupBind(conn); err != nil { - return "", nil, err + return nil, nil, err } // User groups lookup. - groups, err := l.LDAP.SearchForUserGroups(conn, username, bindDN) + groups, err := l.LDAP.SearchForUserGroups(conn, username, lookupResult.ActualDN) if err != nil { - return "", nil, err + return nil, nil, err } - return bindDN, groups, nil + return lookupResult, groups, nil } // GetExpiryDuration - return parsed expiry duration. @@ -238,10 +288,21 @@ func (l Config) GetExpiryDuration(dsecs string) (time.Duration, error) { return dur, nil } +// ParsesAsDN determines if the given string could be a valid DN based on +// parsing alone. +func (l Config) ParsesAsDN(dn string) bool { + _, err := ldap.ParseDN(dn) + return err == nil +} + // IsLDAPUserDN determines if the given string could be a user DN from LDAP. func (l Config) IsLDAPUserDN(user string) bool { - for _, baseDN := range l.LDAP.UserDNSearchBaseDistNames { - if strings.HasSuffix(user, ","+baseDN) { + udn, err := ldap.ParseDN(user) + if err != nil { + return false + } + for _, baseDN := range l.LDAP.GetUserDNSearchBaseDistNames() { + if baseDN.Parsed.AncestorOf(udn) { return true } } @@ -249,9 +310,13 @@ func (l Config) IsLDAPUserDN(user string) bool { } // IsLDAPGroupDN determines if the given string could be a group DN from LDAP. -func (l Config) IsLDAPGroupDN(user string) bool { - for _, baseDN := range l.LDAP.GroupSearchBaseDistNames { - if strings.HasSuffix(user, ","+baseDN) { +func (l Config) IsLDAPGroupDN(group string) bool { + gdn, err := ldap.ParseDN(group) + if err != nil { + return false + } + for _, baseDN := range l.LDAP.GetGroupSearchBaseDistNames() { + if baseDN.Parsed.AncestorOf(gdn) { return true } } @@ -272,7 +337,7 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, return nil, err } - // Evaluate the filter again with generic wildcard instead of specific values + // Evaluate the filter again with generic wildcard instead of specific values filter := strings.ReplaceAll(l.LDAP.UserDNSearchFilter, "%s", "*") nonExistentUsers := []string{} @@ -289,7 +354,11 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, if err != nil { // Object does not exist error? if ldap.IsErrorWithCode(err, 32) { - nonExistentUsers = append(nonExistentUsers, dn) + ndn, err := ldap.ParseDN(dn) + if err != nil { + return nil, err + } + nonExistentUsers = append(nonExistentUsers, ndn.String()) continue } return nil, err @@ -297,7 +366,11 @@ func (l *Config) GetNonEligibleUserDistNames(userDistNames []string) ([]string, if len(searchResult.Entries) == 0 { // DN was not found - this means this user account is // expired. - nonExistentUsers = append(nonExistentUsers, dn) + ndn, err := ldap.ParseDN(dn) + if err != nil { + return nil, err + } + nonExistentUsers = append(nonExistentUsers, ndn.String()) } } return nonExistentUsers, nil @@ -329,3 +402,21 @@ func (l *Config) LookupGroupMemberships(userDistNames []string, userDNToUsername return res, nil } + +// QuickNormalizeDN - normalizes the given DN without checking if it is valid or +// exists in the LDAP directory. Returns input if error +func (l Config) QuickNormalizeDN(dn string) string { + if normDN, err := xldap.NormalizeDN(dn); err == nil { + return normDN + } + return dn +} + +// DecodeDN - denormalizes the given DN by unescaping any escaped characters. +// Returns input if error +func (l Config) DecodeDN(dn string) string { + if decodedDN, err := xldap.DecodeDN(dn); err == nil { + return decodedDN + } + return dn +} diff --git a/internal/config/identity/openid/ecdsa-sha3_contrib.go b/internal/config/identity/openid/ecdsa-sha3_contrib.go index 7a820b87082ba..11d7acb7962ef 100644 --- a/internal/config/identity/openid/ecdsa-sha3_contrib.go +++ b/internal/config/identity/openid/ecdsa-sha3_contrib.go @@ -11,9 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !fips -// +build !fips - package openid import ( @@ -22,7 +19,7 @@ import ( "github.com/golang-jwt/jwt/v4" // Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288 - _ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation + _ "golang.org/x/crypto/sha3" ) // Specific instances for EC256 and company diff --git a/internal/config/identity/openid/jwks.go b/internal/config/identity/openid/jwks.go index 025850ddc84b9..e1c00532dc58c 100644 --- a/internal/config/identity/openid/jwks.go +++ b/internal/config/identity/openid/jwks.go @@ -20,6 +20,7 @@ package openid import ( "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/rsa" "encoding/base64" @@ -117,6 +118,13 @@ func (key *JWKS) DecodePublicKey() (crypto.PublicKey, error) { Y: &y, }, nil default: + if key.Alg == "EdDSA" && key.Crv == "Ed25519" && key.X != "" { + pb, err := base64.RawURLEncoding.DecodeString(key.X) + if err != nil { + return nil, errMalformedJWKECKey + } + return ed25519.PublicKey(pb), nil + } return nil, fmt.Errorf("Unknown JWK key type %s", key.Kty) } } diff --git a/internal/config/identity/openid/jwt.go b/internal/config/identity/openid/jwt.go index 0eb3b5eed6d28..2a422010f842d 100644 --- a/internal/config/identity/openid/jwt.go +++ b/internal/config/identity/openid/jwt.go @@ -30,15 +30,15 @@ import ( jwtgo "github.com/golang-jwt/jwt/v4" "github.com/minio/minio/internal/arn" "github.com/minio/minio/internal/auth" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/policy" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/policy" ) type publicKeys struct { *sync.RWMutex // map of kid to public key - pkMap map[string]interface{} + pkMap map[string]any } func (pk *publicKeys) parseAndAdd(b io.Reader) error { @@ -59,14 +59,14 @@ func (pk *publicKeys) parseAndAdd(b io.Reader) error { return nil } -func (pk *publicKeys) add(keyID string, key interface{}) { +func (pk *publicKeys) add(keyID string, key any) { pk.Lock() defer pk.Unlock() pk.pkMap[keyID] = key } -func (pk *publicKeys) get(kid string) interface{} { +func (pk *publicKeys) get(kid string) any { pk.RLock() defer pk.RUnlock() return pk.pkMap[kid] @@ -103,7 +103,7 @@ var ( ErrTokenExpired = errors.New("token expired") ) -func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error { +func updateClaimsExpiry(dsecs string, claims map[string]any) error { expStr := claims["exp"] if expStr == "" { return ErrTokenExpired @@ -133,7 +133,7 @@ const ( ) // Validate - validates the id_token. -func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims jwtgo.MapClaims) error { +func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, dsecs string, claims map[string]any) error { jp := new(jwtgo.Parser) jp.ValidMethods = []string{ "RS256", "RS384", "RS512", @@ -143,12 +143,16 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, "ES3256", "ES3384", "ES3512", } - keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) { + keyFuncCallback := func(jwtToken *jwtgo.Token) (any, error) { kid, ok := jwtToken.Header["kid"].(string) if !ok { return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"]) } - return r.pubKeys.get(kid), nil + pubkey := r.pubKeys.get(kid) + if pubkey == nil { + return nil, fmt.Errorf("No public key found for kid %s", kid) + } + return pubkey, nil } pCfg, ok := r.arnProviderCfgsMap[arn] @@ -156,14 +160,15 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, return fmt.Errorf("Role %s does not exist", arn) } - jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback) + mclaims := jwtgo.MapClaims(claims) + jwtToken, err := jp.ParseWithClaims(token, &mclaims, keyFuncCallback) if err != nil { // Re-populate the public key in-case the JWKS // pubkeys are refreshed if err = r.PopulatePublicKey(arn); err != nil { return err } - jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback) + jwtToken, err = jwtgo.ParseWithClaims(token, &mclaims, keyFuncCallback) if err != nil { return err } @@ -173,11 +178,11 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, return ErrTokenExpired } - if err = updateClaimsExpiry(dsecs, claims); err != nil { + if err = updateClaimsExpiry(dsecs, mclaims); err != nil { return err } - if err = r.updateUserinfoClaims(ctx, arn, accessToken, claims); err != nil { + if err = r.updateUserinfoClaims(ctx, arn, accessToken, mclaims); err != nil { return err } @@ -190,7 +195,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, // array of case sensitive strings. In the common special case // when there is one audience, the aud value MAY be a single // case sensitive - audValues, ok := policy.GetValuesFromClaims(claims, audClaim) + audValues, ok := policy.GetValuesFromClaims(mclaims, audClaim) if !ok { return errors.New("STS JWT Token has `aud` claim invalid, `aud` must match configured OpenID Client ID") } @@ -204,7 +209,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, // be included even when the authorized party is the same // as the sole audience. The azp value is a case sensitive // string containing a StringOrURI value - azpValues, ok := policy.GetValuesFromClaims(claims, azpClaim) + azpValues, ok := policy.GetValuesFromClaims(mclaims, azpClaim) if !ok { return errors.New("STS JWT Token has `azp` claim invalid, `azp` must match configured OpenID Client ID") } @@ -216,7 +221,7 @@ func (r *Config) Validate(ctx context.Context, arn arn.ARN, token, accessToken, return nil } -func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]interface{}) error { +func (r *Config) updateUserinfoClaims(ctx context.Context, arn arn.ARN, accessToken string, claims map[string]any) error { pCfg, ok := r.arnProviderCfgsMap[arn] // If claim user info is enabled, get claims from userInfo // and overwrite them with the claims from JWT. diff --git a/internal/config/identity/openid/jwt_test.go b/internal/config/identity/openid/jwt_test.go index f6e258ca36846..cb54faff95741 100644 --- a/internal/config/identity/openid/jwt_test.go +++ b/internal/config/identity/openid/jwt_test.go @@ -19,7 +19,6 @@ package openid import ( "bytes" - "context" "encoding/base64" "encoding/json" "fmt" @@ -35,12 +34,12 @@ import ( "github.com/minio/minio/internal/arn" "github.com/minio/minio/internal/config" jwtm "github.com/minio/minio/internal/jwt" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) func TestUpdateClaimsExpiry(t *testing.T) { testCases := []struct { - exp interface{} + exp any dsecs string expectedFailure bool }{ @@ -59,9 +58,8 @@ func TestUpdateClaimsExpiry(t *testing.T) { } for _, testCase := range testCases { - testCase := testCase t.Run("", func(t *testing.T) { - claims := map[string]interface{}{} + claims := map[string]any{} claims["exp"] = testCase.exp err := updateClaimsExpiry(testCase.dsecs, claims) if err != nil && !testCase.expectedFailure { @@ -100,7 +98,7 @@ func TestJWTHMACType(t *testing.T) { ExpiresAt: 253428928061, Audience: "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", }, - Header: map[string]interface{}{ + Header: map[string]any{ "typ": "JWT", "alg": jwtgo.SigningMethodHS256.Alg(), "kid": "76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", @@ -120,7 +118,7 @@ func TestJWTHMACType(t *testing.T) { pubKeys := publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, } pubKeys.add("76b95ae5-33ef-4283-97b7-d2a85dc2d8f4", []byte("WNGvKVyyNmXq0TraSvjaDN9CtpFgx35IXtGEffMCPR0")) @@ -148,7 +146,7 @@ func TestJWTHMACType(t *testing.T) { } var claims jwtgo.MapClaims - if err = cfg.Validate(context.Background(), DummyRoleARN, token, "", "", claims); err != nil { + if err = cfg.Validate(t.Context(), DummyRoleARN, token, "", "", claims); err != nil { t.Fatal(err) } } @@ -166,7 +164,7 @@ func TestJWT(t *testing.T) { pubKeys := publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, } err := pubKeys.parseAndAdd(bytes.NewBuffer([]byte(jsonkey))) if err != nil { @@ -200,7 +198,7 @@ func TestJWT(t *testing.T) { } var claims jwtgo.MapClaims - if err = cfg.Validate(context.Background(), DummyRoleARN, u.Query().Get("Token"), "", "", claims); err == nil { + if err = cfg.Validate(t.Context(), DummyRoleARN, u.Query().Get("Token"), "", "", claims); err == nil { t.Fatal(err) } } diff --git a/internal/config/identity/openid/openid.go b/internal/config/identity/openid/openid.go index 523891fd8fb52..003ede923a43b 100644 --- a/internal/config/identity/openid/openid.go +++ b/internal/config/identity/openid/openid.go @@ -22,7 +22,9 @@ import ( "encoding/base64" "errors" "io" + "maps" "net/http" + "slices" "sort" "strconv" "strings" @@ -36,20 +38,22 @@ import ( "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/identity/openid/provider" "github.com/minio/minio/internal/hash/sha256" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/policy" ) // OpenID keys and envs. const ( - ClientID = "client_id" - ClientSecret = "client_secret" - ConfigURL = "config_url" - ClaimName = "claim_name" - ClaimUserinfo = "claim_userinfo" - RolePolicy = "role_policy" - DisplayName = "display_name" + ClientID = "client_id" + ClientSecret = "client_secret" + ConfigURL = "config_url" + ClaimName = "claim_name" + ClaimUserinfo = "claim_userinfo" + RolePolicy = "role_policy" + DisplayName = "display_name" + UserReadableClaim = "user_readable_claim" + UserIDClaim = "user_id_claim" Scopes = "scopes" RedirectURI = "redirect_uri" @@ -101,12 +105,14 @@ var ( Value: "", }, config.KV{ - Key: ClaimPrefix, - Value: "", + Key: ClaimPrefix, + Value: "", + HiddenIfEmpty: true, }, config.KV{ - Key: RedirectURI, - Value: "", + Key: RedirectURI, + Value: "", + HiddenIfEmpty: true, }, config.KV{ Key: RedirectURIDynamic, @@ -128,6 +134,14 @@ var ( Key: KeyCloakAdminURL, Value: "", }, + config.KV{ + Key: UserReadableClaim, + Value: "", + }, + config.KV{ + Key: UserIDClaim, + Value: "", + }, } ) @@ -174,15 +188,9 @@ func (r *Config) Clone() Config { transport: r.transport, closeRespFn: r.closeRespFn, } - for k, v := range r.arnProviderCfgsMap { - cfg.arnProviderCfgsMap[k] = v - } - for k, v := range r.ProviderCfgs { - cfg.ProviderCfgs[k] = v - } - for k, v := range r.roleArnPolicyMap { - cfg.roleArnPolicyMap[k] = v - } + maps.Copy(cfg.arnProviderCfgsMap, r.arnProviderCfgsMap) + maps.Copy(cfg.ProviderCfgs, r.ProviderCfgs) + maps.Copy(cfg.roleArnPolicyMap, r.roleArnPolicyMap) return cfg } @@ -198,7 +206,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func ProviderCfgs: map[string]*providerCfg{}, pubKeys: publicKeys{ RWMutex: &sync.RWMutex{}, - pkMap: map[string]interface{}{}, + pkMap: map[string]any{}, }, roleArnPolicyMap: map[arn.ARN]string{}, transport: openIDClientTransport, @@ -296,7 +304,7 @@ func LookupConfig(s config.Config, transport http.RoundTripper, closeRespFn func if scopeList := getCfgVal(Scopes); scopeList != "" { var scopes []string - for _, scope := range strings.Split(scopeList, ",") { + for scope := range strings.SplitSeq(scopeList, ",") { scope = strings.TrimSpace(scope) if scope == "" { return c, config.Errorf("empty scope value is not allowed '%s', please refer to our documentation", scopeList) @@ -402,13 +410,7 @@ func (r *Config) GetConfigInfo(s config.Config, cfgName string) ([]madmin.IDPCfg return nil, err } - present := false - for _, cfg := range openIDConfigs { - if cfg == cfgName { - present = true - break - } - } + present := slices.Contains(openIDConfigs, cfgName) if !present { return nil, ErrProviderConfigNotFound @@ -509,13 +511,11 @@ func (r *Config) GetSettings() madmin.OpenIDSettings { return res } h := sha256.New() + hashedSecret := "" for arn, provCfg := range r.arnProviderCfgsMap { - hashedSecret := "" - { - h.Reset() - h.Write([]byte(provCfg.ClientSecret)) - hashedSecret = base64.RawURLEncoding.EncodeToString(h.Sum(nil)) - } + h.Write([]byte(provCfg.ClientSecret)) + hashedSecret = base64.RawURLEncoding.EncodeToString(h.Sum(nil)) + h.Reset() if arn != DummyRoleARN { if res.Roles == nil { res.Roles = make(map[string]madmin.OpenIDProviderSettings) @@ -534,7 +534,6 @@ func (r *Config) GetSettings() madmin.OpenIDSettings { HashedClientSecret: hashedSecret, } } - } return res @@ -629,3 +628,25 @@ func GetDefaultExpiration(dsecs string) (time.Duration, error) { return defaultExpiryDuration, nil } + +// GetUserReadableClaim returns the human readable claim name for the given +// configuration name. +func (r Config) GetUserReadableClaim(cfgName string) string { + pCfg, ok := r.ProviderCfgs[cfgName] + if ok { + return pCfg.UserReadableClaim + } + return "" +} + +// GetUserIDClaim returns the user ID claim for the given configuration name, or "sub" if not set. +func (r Config) GetUserIDClaim(cfgName string) string { + pCfg, ok := r.ProviderCfgs[cfgName] + if ok { + if pCfg.UserIDClaim != "" { + return pCfg.UserIDClaim + } + return "sub" + } + return "" // an incorrect config should be handled outside this function +} diff --git a/internal/config/identity/openid/provider/keycloak.go b/internal/config/identity/openid/provider/keycloak.go index 11f54ef5247bf..3e9648d6c7beb 100644 --- a/internal/config/identity/openid/provider/keycloak.go +++ b/internal/config/identity/openid/provider/keycloak.go @@ -117,7 +117,7 @@ func (k *KeycloakProvider) LookupUser(userid string) (User, error) { case http.StatusUnauthorized: return User{}, ErrAccessTokenExpired } - return User{}, fmt.Errorf("Unable to lookup %s - keycloak user lookup returned %v", userid, resp.Status) + return User{}, fmt.Errorf("Unable to lookup - keycloak user lookup returned %v", resp.Status) } // Option is a function type that accepts a pointer Target diff --git a/internal/config/identity/openid/providercfg.go b/internal/config/identity/openid/providercfg.go index 8dc2b509c4a80..1ccc230cc73b0 100644 --- a/internal/config/identity/openid/providercfg.go +++ b/internal/config/identity/openid/providercfg.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/identity/openid/provider" xhttp "github.com/minio/minio/internal/http" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) type providerCfg struct { @@ -48,6 +48,8 @@ type providerCfg struct { ClientID string ClientSecret string RolePolicy string + UserReadableClaim string + UserIDClaim string roleArn arn.ARN provider provider.Provider @@ -64,6 +66,8 @@ func newProviderCfgFromConfig(getCfgVal func(cfgName string) string) providerCfg ClientID: getCfgVal(ClientID), ClientSecret: getCfgVal(ClientSecret), RolePolicy: getCfgVal(RolePolicy), + UserReadableClaim: getCfgVal(UserReadableClaim), + UserIDClaim: getCfgVal(UserIDClaim), } } @@ -109,7 +113,7 @@ func (p *providerCfg) GetRoleArn() string { // claims as part of the normal oauth2 flow, instead rely // on service providers making calls to IDP to fetch additional // claims available from the UserInfo endpoint -func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]interface{}, error) { +func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transport http.RoundTripper) (map[string]any, error) { if p.JWKS.URL == nil || p.JWKS.URL.String() == "" { return nil, errors.New("openid not configured") } @@ -143,7 +147,7 @@ func (p *providerCfg) UserInfo(ctx context.Context, accessToken string, transpor return nil, errors.New(resp.Status) } - claims := map[string]interface{}{} + claims := map[string]any{} if err = json.NewDecoder(resp.Body).Decode(&claims); err != nil { // uncomment this for debugging when needed. // reqBytes, _ := httputil.DumpRequest(req, false) diff --git a/internal/config/identity/openid/rsa-sha3_contrib.go b/internal/config/identity/openid/rsa-sha3_contrib.go index 2481abf9946a0..826074735b5c8 100644 --- a/internal/config/identity/openid/rsa-sha3_contrib.go +++ b/internal/config/identity/openid/rsa-sha3_contrib.go @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !fips -// +build !fips - package openid import ( @@ -23,7 +20,7 @@ import ( "github.com/golang-jwt/jwt/v4" // Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288 - _ "golang.org/x/crypto/sha3" // There is no SHA-3 FIPS-140 2 compliant implementation + _ "golang.org/x/crypto/sha3" ) // Specific instances for RS256 and company diff --git a/internal/config/identity/plugin/config.go b/internal/config/identity/plugin/config.go index 93c66c9b59795..3b0ac830f39c0 100644 --- a/internal/config/identity/plugin/config.go +++ b/internal/config/identity/plugin/config.go @@ -34,10 +34,14 @@ import ( "github.com/minio/minio/internal/arn" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" ) +func authNLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, "authN", err) +} + // Authentication Plugin config and env variables const ( URL = "url" @@ -329,9 +333,9 @@ func New(shutdownCtx context.Context, args Args) *AuthNPlugin { // AuthNSuccessResponse - represents the response from the authentication plugin // service. type AuthNSuccessResponse struct { - User string `json:"user"` - MaxValiditySeconds int `json:"maxValiditySeconds"` - Claims map[string]interface{} `json:"claims"` + User string `json:"user"` + MaxValiditySeconds int `json:"maxValiditySeconds"` + Claims map[string]any `json:"claims"` } // AuthNErrorResponse - represents an error response from the authN plugin. @@ -434,7 +438,7 @@ func (o *AuthNPlugin) checkConnectivity(ctx context.Context) bool { req, err := http.NewRequestWithContext(ctx, http.MethodHead, u.String(), nil) if err != nil { - logger.LogIf(ctx, err) + authNLogIf(ctx, err) return false } diff --git a/internal/config/identity/tls/config.go b/internal/config/identity/tls/config.go index a35926a98ada3..b002aab75ab7e 100644 --- a/internal/config/identity/tls/config.go +++ b/internal/config/identity/tls/config.go @@ -23,7 +23,7 @@ import ( "github.com/minio/minio/internal/auth" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) const ( diff --git a/internal/config/ilm/help.go b/internal/config/ilm/help.go index d0037d8dcac9f..9cbf30b157679 100644 --- a/internal/config/ilm/help.go +++ b/internal/config/ilm/help.go @@ -33,9 +33,9 @@ var ( return config.DefaultHelpPostfix(DefaultKVS, key) } - // HelpILM holds configuration keys and their default values for the ILM + // Help holds configuration keys and their default values for the ILM // subsystem - HelpILM = config.HelpKVS{ + Help = config.HelpKVS{ config.HelpKV{ Key: transitionWorkers, Type: "number", diff --git a/internal/config/ilm/ilm.go b/internal/config/ilm/ilm.go index b677647d5bb1d..3ecf68fae63e4 100644 --- a/internal/config/ilm/ilm.go +++ b/internal/config/ilm/ilm.go @@ -21,7 +21,7 @@ import ( "strconv" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // DefaultKVS default configuration values for ILM subsystem @@ -44,6 +44,15 @@ type Config struct { // LookupConfig - lookup ilm config and override with valid environment settings if any. func LookupConfig(kvs config.KVS) (cfg Config, err error) { + cfg = Config{ + TransitionWorkers: 100, + ExpirationWorkers: 100, + } + + if err = config.CheckValidKeys(config.ILMSubSys, kvs, DefaultKVS); err != nil { + return cfg, err + } + tw, err := strconv.Atoi(env.Get(EnvILMTransitionWorkers, kvs.GetWithDefault(transitionWorkers, DefaultKVS))) if err != nil { return cfg, err diff --git a/internal/config/lambda/event/arn.go b/internal/config/lambda/event/arn.go index 79dc284f4f9d2..65a9644d422ef 100644 --- a/internal/config/lambda/event/arn.go +++ b/internal/config/lambda/event/arn.go @@ -29,7 +29,7 @@ type ARN struct { // String - returns string representation. func (arn ARN) String() string { - if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { + if arn.ID == "" && arn.Name == "" && arn.region == "" { return "" } diff --git a/internal/config/lambda/event/targetidset.go b/internal/config/lambda/event/targetidset.go index e77affff9b799..eea4810266d72 100644 --- a/internal/config/lambda/event/targetidset.go +++ b/internal/config/lambda/event/targetidset.go @@ -17,6 +17,8 @@ package event +import "maps" + // TargetIDSet - Set representation of TargetIDs. type TargetIDSet map[TargetID]struct{} @@ -28,9 +30,7 @@ func (set TargetIDSet) IsEmpty() bool { // Clone - returns copy of this set. func (set TargetIDSet) Clone() TargetIDSet { setCopy := NewTargetIDSet() - for k, v := range set { - setCopy[k] = v - } + maps.Copy(setCopy, set) return setCopy } diff --git a/internal/config/lambda/event/targetlist.go b/internal/config/lambda/event/targetlist.go index 1bf29541401ef..343503180e514 100644 --- a/internal/config/lambda/event/targetlist.go +++ b/internal/config/lambda/event/targetlist.go @@ -19,6 +19,7 @@ package event import ( "fmt" + "maps" "net/http" "strings" "sync" @@ -151,9 +152,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target { defer list.RUnlock() ntargets := make(map[TargetID]Target, len(list.targets)) - for k, v := range list.targets { - ntargets[k] = v - } + maps.Copy(ntargets, list.targets) return ntargets } diff --git a/internal/config/lambda/parse.go b/internal/config/lambda/parse.go index 24bc2055a0d17..eac6a5def073c 100644 --- a/internal/config/lambda/parse.go +++ b/internal/config/lambda/parse.go @@ -27,10 +27,18 @@ import ( "github.com/minio/minio/internal/config/lambda/event" "github.com/minio/minio/internal/config/lambda/target" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" ) +const ( + logSubsys = "notify" +) + +func logOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) +} + // ErrTargetsOffline - Indicates single/multiple target failures. var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets") @@ -76,7 +84,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport) + t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport) if err != nil { return nil, err } diff --git a/internal/config/lambda/target/webhook.go b/internal/config/lambda/target/webhook.go index f69ea7e03872d..20149f026d0a4 100644 --- a/internal/config/lambda/target/webhook.go +++ b/internal/config/lambda/target/webhook.go @@ -32,8 +32,8 @@ import ( "github.com/minio/minio/internal/config/lambda/event" xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/certs" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/certs" + xnet "github.com/minio/pkg/v3/net" ) // Webhook constants @@ -138,7 +138,7 @@ func (target *WebhookTarget) isActive() (bool, error) { return true, nil } -// Stat - returns lamdba webhook target statistics such as +// Stat - returns lambda webhook target statistics such as // current calls in progress, successfully completed functions // failed functions. func (target *WebhookTarget) Stat() event.TargetStat { diff --git a/internal/config/notify/help.go b/internal/config/notify/help.go index c38674ef5c8d7..343f46c7bd649 100644 --- a/internal/config/notify/help.go +++ b/internal/config/notify/help.go @@ -274,6 +274,18 @@ var ( Optional: true, Type: "number", }, + config.HelpKV{ + Key: target.KafkaBatchSize, + Description: "batch size of the events; used only when queue_dir is set", + Optional: true, + Type: "number", + }, + config.HelpKV{ + Key: target.KafkaBatchCommitTimeout, + Description: "commit timeout set for the batch; used only when batch_size > 1", + Optional: true, + Type: "duration", + }, } HelpMQTT = config.HelpKVS{ @@ -674,6 +686,12 @@ var ( Sensitive: true, Secret: true, }, + config.HelpKV{ + Key: target.RedisUser, + Description: "Redis server user for the auth", + Optional: true, + Type: "string", + }, config.HelpKV{ Key: target.RedisQueueDir, Description: queueDirComment, diff --git a/internal/config/notify/legacy.go b/internal/config/notify/legacy.go index b742a36561519..c72aff1264f77 100644 --- a/internal/config/notify/legacy.go +++ b/internal/config/notify/legacy.go @@ -250,6 +250,10 @@ func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) err Key: target.RedisPassword, Value: cfg.Password, }, + config.KV{ + Key: target.RedisUser, + Value: cfg.User, + }, config.KV{ Key: target.RedisKey, Value: cfg.Key, @@ -458,6 +462,10 @@ func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error Key: target.NATSToken, Value: cfg.Token, }, + config.KV{ + Key: target.NATSNKeySeed, + Value: cfg.NKeySeed, + }, config.KV{ Key: target.NATSCertAuthority, Value: cfg.CertAuthority, @@ -478,6 +486,10 @@ func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error Key: target.NATSTLSSkipVerify, Value: config.FormatBool(cfg.Secure), }, + config.KV{ + Key: target.NATSTLSHandshakeFirst, + Value: config.FormatBool(cfg.TLSHandshakeFirst), + }, config.KV{ Key: target.NATSPingInterval, Value: strconv.FormatInt(cfg.PingInterval, 10), diff --git a/internal/config/notify/parse.go b/internal/config/notify/parse.go index ac80e2e340e57..b479d0d4d1db3 100644 --- a/internal/config/notify/parse.go +++ b/internal/config/notify/parse.go @@ -32,14 +32,23 @@ import ( "github.com/minio/minio/internal/event" "github.com/minio/minio/internal/event/target" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" + "github.com/rabbitmq/amqp091-go" ) const ( formatNamespace = "namespace" ) +const ( + logSubsys = "notify" +) + +func logOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, logSubsys, err, id, errKind...) +} + // ErrTargetsOffline - Indicates single/multiple target failures. var ErrTargetsOffline = errors.New("one or more targets are offline. Please use `mc admin info --json` to check the offline targets") @@ -97,7 +106,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewAMQPTarget(id, args, logger.LogOnceIf) + t, err := target.NewAMQPTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -112,12 +121,11 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewElasticsearchTarget(id, args, logger.LogOnceIf) + t, err := target.NewElasticsearchTarget(id, args, logOnceIf) if err != nil { return nil, err } targets = append(targets, t) - } case config.NotifyKafkaSubSys: kafkaTargets, err := GetNotifyKafka(cfg[config.NotifyKafkaSubSys]) @@ -129,12 +137,11 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t continue } args.TLS.RootCAs = transport.TLSClientConfig.RootCAs - t, err := target.NewKafkaTarget(id, args, logger.LogOnceIf) + t, err := target.NewKafkaTarget(id, args, logOnceIf) if err != nil { return nil, err } targets = append(targets, t) - } case config.NotifyMQTTSubSys: @@ -147,7 +154,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t continue } args.RootCAs = transport.TLSClientConfig.RootCAs - t, err := target.NewMQTTTarget(id, args, logger.LogOnceIf) + t, err := target.NewMQTTTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -162,7 +169,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewMySQLTarget(id, args, logger.LogOnceIf) + t, err := target.NewMySQLTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -177,7 +184,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewNATSTarget(id, args, logger.LogOnceIf) + t, err := target.NewNATSTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -192,7 +199,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewNSQTarget(id, args, logger.LogOnceIf) + t, err := target.NewNSQTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -207,7 +214,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewPostgreSQLTarget(id, args, logger.LogOnceIf) + t, err := target.NewPostgreSQLTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -222,7 +229,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewRedisTarget(id, args, logger.LogOnceIf) + t, err := target.NewRedisTarget(id, args, logOnceIf) if err != nil { return nil, err } @@ -237,7 +244,7 @@ func fetchSubSysTargets(ctx context.Context, cfg config.Config, subSys string, t if !args.Enable { continue } - t, err := target.NewWebhookTarget(ctx, id, args, logger.LogOnceIf, transport) + t, err := target.NewWebhookTarget(ctx, id, args, logOnceIf, transport) if err != nil { return nil, err } @@ -366,6 +373,10 @@ var ( Key: target.KafkaBatchSize, Value: "0", }, + config.KV{ + Key: target.KafkaBatchCommitTimeout, + Value: "0s", + }, config.KV{ Key: target.KafkaCompressionCodec, Value: "", @@ -401,7 +412,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs if len(kafkaBrokers) == 0 { return nil, config.Errorf("kafka 'brokers' cannot be empty") } - for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) { + for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) { var host *xnet.Host host, err = xnet.ParseHost(s) if err != nil { @@ -455,14 +466,23 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs return nil, err } + batchCommitTimeoutEnv := target.EnvKafkaBatchCommitTimeout + if k != config.Default { + batchCommitTimeoutEnv = batchCommitTimeoutEnv + config.Default + k + } + batchCommitTimeout, err := time.ParseDuration(env.Get(batchCommitTimeoutEnv, kv.Get(target.KafkaBatchCommitTimeout))) + if err != nil { + return nil, err + } kafkaArgs := target.KafkaArgs{ - Enable: enabled, - Brokers: brokers, - Topic: env.Get(topicEnv, kv.Get(target.KafkaTopic)), - QueueDir: env.Get(queueDirEnv, kv.Get(target.KafkaQueueDir)), - QueueLimit: queueLimit, - Version: env.Get(versionEnv, kv.Get(target.KafkaVersion)), - BatchSize: uint32(batchSize), + Enable: enabled, + Brokers: brokers, + Topic: env.Get(topicEnv, kv.Get(target.KafkaTopic)), + QueueDir: env.Get(queueDirEnv, kv.Get(target.KafkaQueueDir)), + QueueLimit: queueLimit, + Version: env.Get(versionEnv, kv.Get(target.KafkaVersion)), + BatchSize: uint32(batchSize), + BatchCommitTimeout: batchCommitTimeout, } tlsEnableEnv := target.EnvKafkaTLS @@ -853,20 +873,24 @@ var ( Value: config.EnableOff, }, config.KV{ - Key: target.NATSStreaming, - Value: config.EnableOff, + Key: target.NATSStreaming, + Value: config.EnableOff, + HiddenIfEmpty: true, }, config.KV{ - Key: target.NATSStreamingAsync, - Value: config.EnableOff, + Key: target.NATSStreamingAsync, + Value: config.EnableOff, + HiddenIfEmpty: true, }, config.KV{ - Key: target.NATSStreamingMaxPubAcksInFlight, - Value: "0", + Key: target.NATSStreamingMaxPubAcksInFlight, + Value: "0", + HiddenIfEmpty: true, }, config.KV{ - Key: target.NATSStreamingClusterID, - Value: "", + Key: target.NATSStreamingClusterID, + Value: "", + HiddenIfEmpty: true, }, config.KV{ Key: target.NATSQueueDir, @@ -936,6 +960,11 @@ func GetNotifyNATS(natsKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[s tlsSkipVerifyEnv = tlsSkipVerifyEnv + config.Default + k } + tlsHandshakeFirstEnv := target.EnvNatsTLSHandshakeFirst + if k != config.Default { + tlsHandshakeFirstEnv = tlsHandshakeFirstEnv + config.Default + k + } + subjectEnv := target.EnvNATSSubject if k != config.Default { subjectEnv = subjectEnv + config.Default + k @@ -961,6 +990,11 @@ func GetNotifyNATS(natsKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[s tokenEnv = tokenEnv + config.Default + k } + nKeySeedEnv := target.EnvNATSNKeySeed + if k != config.Default { + nKeySeedEnv = nKeySeedEnv + config.Default + k + } + queueDirEnv := target.EnvNATSQueueDir if k != config.Default { queueDirEnv = queueDirEnv + config.Default + k @@ -987,22 +1021,24 @@ func GetNotifyNATS(natsKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[s } natsArgs := target.NATSArgs{ - Enable: true, - Address: *address, - Subject: env.Get(subjectEnv, kv.Get(target.NATSSubject)), - Username: env.Get(usernameEnv, kv.Get(target.NATSUsername)), - UserCredentials: env.Get(userCredentialsEnv, kv.Get(target.NATSUserCredentials)), - Password: env.Get(passwordEnv, kv.Get(target.NATSPassword)), - CertAuthority: env.Get(certAuthorityEnv, kv.Get(target.NATSCertAuthority)), - ClientCert: env.Get(clientCertEnv, kv.Get(target.NATSClientCert)), - ClientKey: env.Get(clientKeyEnv, kv.Get(target.NATSClientKey)), - Token: env.Get(tokenEnv, kv.Get(target.NATSToken)), - TLS: env.Get(tlsEnv, kv.Get(target.NATSTLS)) == config.EnableOn, - TLSSkipVerify: env.Get(tlsSkipVerifyEnv, kv.Get(target.NATSTLSSkipVerify)) == config.EnableOn, - PingInterval: pingInterval, - QueueDir: env.Get(queueDirEnv, kv.Get(target.NATSQueueDir)), - QueueLimit: queueLimit, - RootCAs: rootCAs, + Enable: true, + Address: *address, + Subject: env.Get(subjectEnv, kv.Get(target.NATSSubject)), + Username: env.Get(usernameEnv, kv.Get(target.NATSUsername)), + UserCredentials: env.Get(userCredentialsEnv, kv.Get(target.NATSUserCredentials)), + Password: env.Get(passwordEnv, kv.Get(target.NATSPassword)), + CertAuthority: env.Get(certAuthorityEnv, kv.Get(target.NATSCertAuthority)), + ClientCert: env.Get(clientCertEnv, kv.Get(target.NATSClientCert)), + ClientKey: env.Get(clientKeyEnv, kv.Get(target.NATSClientKey)), + Token: env.Get(tokenEnv, kv.Get(target.NATSToken)), + NKeySeed: env.Get(nKeySeedEnv, kv.Get(target.NATSNKeySeed)), + TLS: env.Get(tlsEnv, kv.Get(target.NATSTLS)) == config.EnableOn, + TLSSkipVerify: env.Get(tlsSkipVerifyEnv, kv.Get(target.NATSTLSSkipVerify)) == config.EnableOn, + TLSHandshakeFirst: env.Get(tlsHandshakeFirstEnv, kv.Get(target.NATSTLSHandshakeFirst)) == config.EnableOn, + PingInterval: pingInterval, + QueueDir: env.Get(queueDirEnv, kv.Get(target.NATSQueueDir)), + QueueLimit: queueLimit, + RootCAs: rootCAs, } natsArgs.JetStream.Enable = env.Get(jetStreamEnableEnv, kv.Get(target.NATSJetStream)) == config.EnableOn @@ -1282,6 +1318,10 @@ var ( Key: target.RedisPassword, Value: "", }, + config.KV{ + Key: target.RedisUser, + Value: "", + }, config.KV{ Key: target.RedisQueueDir, Value: "", @@ -1334,6 +1374,10 @@ func GetNotifyRedis(redisKVS map[string]config.KVS) (map[string]target.RedisArgs if k != config.Default { passwordEnv = passwordEnv + config.Default + k } + userEnv := target.EnvRedisUser + if k != config.Default { + userEnv = userEnv + config.Default + k + } keyEnv := target.EnvRedisKey if k != config.Default { keyEnv = keyEnv + config.Default + k @@ -1347,6 +1391,7 @@ func GetNotifyRedis(redisKVS map[string]config.KVS) (map[string]target.RedisArgs Format: env.Get(formatEnv, kv.Get(target.RedisFormat)), Addr: *addr, Password: env.Get(passwordEnv, kv.Get(target.RedisPassword)), + User: env.Get(userEnv, kv.Get(target.RedisUser)), Key: env.Get(keyEnv, kv.Get(target.RedisKey)), QueueDir: env.Get(queueDirEnv, kv.Get(target.RedisQueueDir)), QueueLimit: uint64(queueLimit), @@ -1661,7 +1706,7 @@ func GetNotifyAMQP(amqpKVS map[string]config.KVS) (map[string]target.AMQPArgs, e if k != config.Default { urlEnv = urlEnv + config.Default + k } - url, err := xnet.ParseURL(env.Get(urlEnv, kv.Get(target.AmqpURL))) + url, err := amqp091.ParseURI(env.Get(urlEnv, kv.Get(target.AmqpURL))) if err != nil { return nil, err } @@ -1727,7 +1772,7 @@ func GetNotifyAMQP(amqpKVS map[string]config.KVS) (map[string]target.AMQPArgs, e } amqpArgs := target.AMQPArgs{ Enable: enabled, - URL: *url, + URL: url, Exchange: env.Get(exchangeEnv, kv.Get(target.AmqpExchange)), RoutingKey: env.Get(routingKeyEnv, kv.Get(target.AmqpRoutingKey)), ExchangeType: env.Get(exchangeTypeEnv, kv.Get(target.AmqpExchangeType)), diff --git a/internal/config/policy/opa/config.go b/internal/config/policy/opa/config.go index bfde1f2a68967..47185fb8761ce 100644 --- a/internal/config/policy/opa/config.go +++ b/internal/config/policy/opa/config.go @@ -24,9 +24,9 @@ import ( "net/http" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/policy" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/policy" ) // Env IAM OPA URL @@ -42,12 +42,14 @@ const ( var ( DefaultKVS = config.KVS{ config.KV{ - Key: URL, - Value: "", + Key: URL, + Value: "", + HiddenIfEmpty: true, }, config.KV{ - Key: AuthToken, - Value: "", + Key: AuthToken, + Value: "", + HiddenIfEmpty: true, }, } ) @@ -168,7 +170,7 @@ func (o *Opa) IsAllowed(args policy.Args) (bool, error) { } // OPA input - body := make(map[string]interface{}) + body := make(map[string]any) body["input"] = args inputBytes, err := json.Marshal(body) diff --git a/internal/config/policy/plugin/config.go b/internal/config/policy/plugin/config.go index da168b530bf4b..6e651c5ad422a 100644 --- a/internal/config/policy/plugin/config.go +++ b/internal/config/policy/plugin/config.go @@ -26,8 +26,8 @@ import ( "github.com/minio/minio/internal/config" xhttp "github.com/minio/minio/internal/http" - xnet "github.com/minio/pkg/v2/net" - "github.com/minio/pkg/v2/policy" + xnet "github.com/minio/pkg/v3/net" + "github.com/minio/pkg/v3/policy" ) // Authorization Plugin config and env variables @@ -185,7 +185,7 @@ func (o *AuthZPlugin) IsAllowed(args policy.Args) (bool, error) { } // Access Management Plugin Input - body := make(map[string]interface{}) + body := make(map[string]any) body["input"] = args inputBytes, err := json.Marshal(body) diff --git a/internal/config/scanner/scanner.go b/internal/config/scanner/scanner.go index 8fca3644daedf..7d17141398f72 100644 --- a/internal/config/scanner/scanner.go +++ b/internal/config/scanner/scanner.go @@ -23,7 +23,7 @@ import ( "time" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) // Compression environment variables @@ -114,15 +114,44 @@ var DefaultKVS = config.KVS{ // LookupConfig - lookup config and override with valid environment settings if any. func LookupConfig(kvs config.KVS) (cfg Config, err error) { + cfg = Config{ + ExcessVersions: 100, + ExcessFolders: 50000, + IdleMode: 0, // Default is on + } + if err = config.CheckValidKeys(config.ScannerSubSys, kvs, DefaultKVS); err != nil { return cfg, err } + excessVersions, err := strconv.ParseInt(env.Get(EnvExcessVersions, kvs.GetWithDefault(ExcessVersions, DefaultKVS)), 10, 64) + if err != nil { + return cfg, err + } + cfg.ExcessVersions = excessVersions + + excessFolders, err := strconv.ParseInt(env.Get(EnvExcessFolders, kvs.GetWithDefault(ExcessFolders, DefaultKVS)), 10, 64) + if err != nil { + return cfg, err + } + cfg.ExcessFolders = excessFolders + + switch idleSpeed := env.Get(EnvIdleSpeed, kvs.GetWithDefault(IdleSpeed, DefaultKVS)); idleSpeed { + case "", config.EnableOn: + cfg.IdleMode = 0 + case config.EnableOff: + cfg.IdleMode = 1 + default: + return cfg, fmt.Errorf("unknown value: '%s'", idleSpeed) + } + // Stick to loading deprecated config/env if they are already set, and the Speed value // has not been changed from its "default" value, if it has been changed honor new settings. if kvs.GetWithDefault(Speed, DefaultKVS) == "default" { if kvs.Get(Delay) != "" && kvs.Get(MaxWait) != "" { - return lookupDeprecatedScannerConfig(kvs) + if err = lookupDeprecatedScannerConfig(kvs, &cfg); err != nil { + return cfg, err + } } } @@ -141,38 +170,17 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) { return cfg, fmt.Errorf("unknown '%s' value", speed) } - switch idleSpeed := env.Get(EnvIdleSpeed, kvs.GetWithDefault(IdleSpeed, DefaultKVS)); idleSpeed { - case "", config.EnableOn: - cfg.IdleMode = 0 - case config.EnableOff: - cfg.IdleMode = 1 - default: - return cfg, fmt.Errorf("unknown value: '%s'", idleSpeed) - } - - excessVersions, err := strconv.ParseInt(env.Get(EnvExcessVersions, kvs.GetWithDefault(ExcessVersions, DefaultKVS)), 10, 64) - if err != nil { - return cfg, err - } - cfg.ExcessVersions = excessVersions - - excessFolders, err := strconv.ParseInt(env.Get(EnvExcessFolders, kvs.GetWithDefault(ExcessFolders, DefaultKVS)), 10, 64) - if err != nil { - return cfg, err - } - cfg.ExcessFolders = excessFolders - return cfg, nil } -func lookupDeprecatedScannerConfig(kvs config.KVS) (cfg Config, err error) { +func lookupDeprecatedScannerConfig(kvs config.KVS, cfg *Config) (err error) { delay := env.Get(EnvDelayLegacy, "") if delay == "" { delay = env.Get(EnvDelay, kvs.GetWithDefault(Delay, DefaultKVS)) } cfg.Delay, err = strconv.ParseFloat(delay, 64) if err != nil { - return cfg, err + return err } maxWait := env.Get(EnvMaxWaitLegacy, "") if maxWait == "" { @@ -180,7 +188,7 @@ func lookupDeprecatedScannerConfig(kvs config.KVS) (cfg Config, err error) { } cfg.MaxWait, err = time.ParseDuration(maxWait) if err != nil { - return cfg, err + return err } cycle := env.Get(EnvCycle, kvs.GetWithDefault(Cycle, DefaultKVS)) if cycle == "" { @@ -188,7 +196,7 @@ func lookupDeprecatedScannerConfig(kvs config.KVS) (cfg Config, err error) { } cfg.Cycle, err = time.ParseDuration(cycle) if err != nil { - return cfg, err + return err } - return cfg, nil + return nil } diff --git a/internal/config/server.go b/internal/config/server.go index 7346a289c8c9e..e9f750f401f37 100644 --- a/internal/config/server.go +++ b/internal/config/server.go @@ -29,14 +29,34 @@ type Opts struct { } `yaml:"sftp"` } +// ServerConfigVersion struct is used to extract the version +type ServerConfigVersion struct { + Version string `yaml:"version"` +} + +// ServerConfigCommon struct for server config common options +type ServerConfigCommon struct { + RootUser string `yaml:"rootUser"` + RootPwd string `yaml:"rootPassword"` + Addr string `yaml:"address"` + ConsoleAddr string `yaml:"console-address"` + CertsDir string `yaml:"certs-dir"` + Options Opts `yaml:"options"` +} + +// ServerConfigV1 represents a MinIO configuration file v1 +type ServerConfigV1 struct { + ServerConfigVersion + ServerConfigCommon + Pools [][]string `yaml:"pools"` +} + // ServerConfig represents a MinIO configuration file type ServerConfig struct { - Version string `yaml:"version"` - RootUser string `yaml:"rootUser"` - RootPwd string `yaml:"rootPassword"` - Addr string `yaml:"address"` - ConsoleAddr string `yaml:"console-address"` - CertsDir string `yaml:"certs-dir"` - Pools [][]string `yaml:"pools"` - Options Opts `yaml:"options"` + ServerConfigVersion + ServerConfigCommon + Pools []struct { + Args []string `yaml:"args"` + SetDriveCount uint64 `yaml:"set-drive-count"` + } `yaml:"pools"` } diff --git a/internal/config/storageclass/help.go b/internal/config/storageclass/help.go index 72afafb77020d..0b57085e1a91b 100644 --- a/internal/config/storageclass/help.go +++ b/internal/config/storageclass/help.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -38,6 +38,12 @@ var ( Optional: true, Type: "string", }, + config.HelpKV{ + Key: Optimize, + Description: `optimize parity calculation for standard storage class, set 'capacity' for capacity optimized (no additional parity)` + defaultHelpPostfix(Optimize), + Optional: true, + Type: "string", + }, config.HelpKV{ Key: config.Comment, Description: config.DefaultComment, diff --git a/internal/config/storageclass/storage-class.go b/internal/config/storageclass/storage-class.go index 15580a6aaa425..18121141a6b98 100644 --- a/internal/config/storageclass/storage-class.go +++ b/internal/config/storageclass/storage-class.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -18,14 +18,17 @@ package storageclass import ( + "context" "encoding/json" "fmt" "strconv" "strings" "sync" + "github.com/dustin/go-humanize" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/minio/internal/logger" + "github.com/minio/pkg/v3/env" ) // Standard constants for all storage class @@ -36,15 +39,27 @@ const ( STANDARD = "STANDARD" ) -// Standard constats for config info storage class +// Standard constants for config info storage class const ( ClassStandard = "standard" ClassRRS = "rrs" + Optimize = "optimize" + InlineBlock = "inline_block" // Reduced redundancy storage class environment variable RRSEnv = "MINIO_STORAGE_CLASS_RRS" // Standard storage class environment variable StandardEnv = "MINIO_STORAGE_CLASS_STANDARD" + // Optimize storage class environment variable + OptimizeEnv = "MINIO_STORAGE_CLASS_OPTIMIZE" + // Inline block indicates the size of the shard + // that is considered for inlining, remember this + // shard value is the value per drive shard it + // will vary based on the parity that is configured + // for the STANDARD storage_class. + // inlining means data and metadata are written + // together in a single file i.e xl.meta + InlineBlockEnv = "MINIO_STORAGE_CLASS_INLINE_BLOCK" // Supported storage class scheme is EC schemePrefix = "EC" @@ -67,6 +82,15 @@ var ( Key: ClassRRS, Value: "EC:1", }, + config.KV{ + Key: Optimize, + Value: "availability", + }, + config.KV{ + Key: InlineBlock, + Value: "", + HiddenIfEmpty: true, + }, } ) @@ -82,6 +106,9 @@ var ConfigLock sync.RWMutex type Config struct { Standard StorageClass `json:"standard"` RRS StorageClass `json:"rrs"` + Optimize string `json:"optimize"` + inlineBlock int64 + initialized bool } @@ -120,7 +147,7 @@ func (sc *StorageClass) UnmarshalText(b []byte) error { // MarshalText - marshals storage class string. func (sc *StorageClass) MarshalText() ([]byte, error) { if sc.Parity != 0 { - return []byte(fmt.Sprintf("%s:%d", schemePrefix, sc.Parity)), nil + return fmt.Appendf(nil, "%s:%d", schemePrefix, sc.Parity), nil } return []byte{}, nil } @@ -245,12 +272,75 @@ func (sCfg *Config) GetParityForSC(sc string) (parity int) { } } +// ShouldInline returns true if the shardSize is worthy of inline +// if versioned is true then we chosen 1/8th inline block size +// to satisfy the same constraints. +func (sCfg *Config) ShouldInline(shardSize int64, versioned bool) bool { + if shardSize < 0 { + return false + } + + ConfigLock.RLock() + inlineBlock := int64(128 * humanize.KiByte) + if sCfg.initialized { + inlineBlock = sCfg.inlineBlock + } + ConfigLock.RUnlock() + + if versioned { + return shardSize <= inlineBlock/8 + } + return shardSize <= inlineBlock +} + +// InlineBlock indicates the size of the block which will be used to inline +// an erasure shard and written along with xl.meta on the drive, on a versioned +// bucket this value is automatically chosen to 1/8th of the this value, make +// sure to put this into consideration when choosing this value. +func (sCfg *Config) InlineBlock() int64 { + ConfigLock.RLock() + defer ConfigLock.RUnlock() + if !sCfg.initialized { + return 128 * humanize.KiByte + } + return sCfg.inlineBlock +} + +// CapacityOptimized - returns true if the storage-class is capacity optimized +// meaning we will not use additional parities when drives are offline. +// +// Default is "availability" optimized, unless this is configured. +func (sCfg *Config) CapacityOptimized() bool { + ConfigLock.RLock() + defer ConfigLock.RUnlock() + if !sCfg.initialized { + return false + } + return sCfg.Optimize == "capacity" +} + +// AvailabilityOptimized - returns true if the storage-class is availability +// optimized, meaning we will use additional parities when drives are offline +// to retain parity SLA. +// +// Default is "availability" optimized. +func (sCfg *Config) AvailabilityOptimized() bool { + ConfigLock.RLock() + defer ConfigLock.RUnlock() + if !sCfg.initialized { + return true + } + return sCfg.Optimize == "availability" || sCfg.Optimize == "" +} + // Update update storage-class with new config func (sCfg *Config) Update(newCfg Config) { ConfigLock.Lock() defer ConfigLock.Unlock() sCfg.RRS = newCfg.RRS sCfg.Standard = newCfg.Standard + sCfg.Optimize = newCfg.Optimize + sCfg.inlineBlock = newCfg.inlineBlock sCfg.initialized = true } @@ -319,6 +409,27 @@ func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) { return Config{}, err } + cfg.Optimize = env.Get(OptimizeEnv, kvs.Get(Optimize)) + + inlineBlockStr := env.Get(InlineBlockEnv, kvs.Get(InlineBlock)) + if inlineBlockStr != "" { + inlineBlock, err := humanize.ParseBytes(inlineBlockStr) + if err != nil { + return cfg, err + } + if inlineBlock > 128*humanize.KiByte { + configLogOnceIf(context.Background(), fmt.Errorf("inline block value bigger than recommended max of 128KiB -> %s, performance may degrade for PUT please benchmark the changes", inlineBlockStr), inlineBlockStr) + } + cfg.inlineBlock = int64(inlineBlock) + } else { + cfg.inlineBlock = 128 * humanize.KiByte + } + cfg.initialized = true + return cfg, nil } + +func configLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "config", err, id, errKind...) +} diff --git a/internal/config/subnet/config.go b/internal/config/subnet/config.go index 55e0a9bb7c33f..9e2420a647cdd 100644 --- a/internal/config/subnet/config.go +++ b/internal/config/subnet/config.go @@ -25,8 +25,8 @@ import ( "sync" "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" ) const ( @@ -127,7 +127,6 @@ func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err if err != nil { return cfg, err } - } cfg.License = strings.TrimSpace(env.Get(config.EnvMinIOSubnetLicense, kvs.Get(config.License))) @@ -142,9 +141,11 @@ func LookupConfig(kvs config.KVS, transport http.RoundTripper) (cfg Config, err // Make sure to clone the transport before editing the ProxyURL if proxyURL != nil { - ctransport := transport.(*http.Transport).Clone() - ctransport.Proxy = http.ProxyURL((*url.URL)(proxyURL)) - cfg.transport = ctransport + if tr, ok := transport.(*http.Transport); ok { + ctransport := tr.Clone() + ctransport.Proxy = http.ProxyURL((*url.URL)(proxyURL)) + cfg.transport = ctransport + } } else { cfg.transport = transport } diff --git a/internal/config/subnet/help.go b/internal/config/subnet/help.go index f16713f33803e..da4451d5d60ed 100644 --- a/internal/config/subnet/help.go +++ b/internal/config/subnet/help.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -27,23 +27,23 @@ var ( // HelpSubnet - provides help for subnet api key config HelpSubnet = config.HelpKVS{ config.HelpKV{ - Key: config.License, // Deprecated Dec 2021 + Key: config.License, Type: "string", - Description: "[DEPRECATED use api_key] Subnet license token for the cluster" + defaultHelpPostfix(config.License), + Description: "Enterprise license for the cluster" + defaultHelpPostfix(config.License), Optional: true, Sensitive: true, }, config.HelpKV{ Key: config.APIKey, Type: "string", - Description: "Subnet api key for the cluster" + defaultHelpPostfix(config.APIKey), + Description: "Enterprise license API key for the cluster" + defaultHelpPostfix(config.APIKey), Optional: true, Sensitive: true, }, config.HelpKV{ Key: config.Proxy, Type: "string", - Description: "HTTP(S) proxy URL to use for connecting to SUBNET" + defaultHelpPostfix(config.Proxy), + Description: "HTTP(s) proxy URL to use for connecting to SUBNET" + defaultHelpPostfix(config.Proxy), Optional: true, Sensitive: true, }, diff --git a/internal/config/subnet/subnet.go b/internal/config/subnet/subnet.go index 783dcdd0420e7..c4ba019824bc5 100644 --- a/internal/config/subnet/subnet.go +++ b/internal/config/subnet/subnet.go @@ -95,7 +95,7 @@ func (c Config) submitPost(r *http.Request) (string, error) { } // Post submit 'payload' to specified URL -func (c Config) Post(reqURL string, payload interface{}) (string, error) { +func (c Config) Post(reqURL string, payload any) (string, error) { if !c.Registered() { return "", errors.New("Deployment is not registered with SUBNET. Please register the deployment via 'mc license register ALIAS'") } diff --git a/internal/crypto/auto-encryption.go b/internal/crypto/auto-encryption.go index 90a55b465ded3..f2cdcc5c5a804 100644 --- a/internal/crypto/auto-encryption.go +++ b/internal/crypto/auto-encryption.go @@ -19,7 +19,7 @@ package crypto import ( "github.com/minio/minio/internal/config" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" ) const ( diff --git a/internal/fips/api.go b/internal/crypto/crypto.go similarity index 50% rename from internal/fips/api.go rename to internal/crypto/crypto.go index debcc1b10dca4..f4a9b943c3db7 100644 --- a/internal/fips/api.go +++ b/internal/crypto/crypto.go @@ -15,22 +15,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -// Package fips provides functionality to configure cryptographic -// implementations compliant with FIPS 140. -// -// FIPS 140 [1] is a US standard for data processing that specifies -// requirements for cryptographic modules. Software that is "FIPS 140 -// compliant" must use approved cryptographic primitives only and that -// are implemented by a FIPS 140 certified cryptographic module. -// -// So, FIPS 140 requires that a certified implementation of e.g. AES -// is used to implement more high-level cryptographic protocols. -// It does not require any specific security criteria for those -// high-level protocols. FIPS 140 focuses only on the implementation -// and usage of the most low-level cryptographic building blocks. -// -// [1]: https://en.wikipedia.org/wiki/FIPS_140 -package fips +package crypto import ( "crypto/tls" @@ -38,40 +23,13 @@ import ( "github.com/minio/sio" ) -// Enabled indicates whether cryptographic primitives, -// like AES or SHA-256, are implemented using a FIPS 140 -// certified module. -// -// If FIPS-140 is enabled no non-NIST/FIPS approved -// primitives must be used. -const Enabled = enabled - // DARECiphers returns a list of supported cipher suites // for the DARE object encryption. -func DARECiphers() []byte { - if Enabled { - return []byte{sio.AES_256_GCM} - } - return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305} -} +func DARECiphers() []byte { return []byte{sio.AES_256_GCM, sio.CHACHA20_POLY1305} } // TLSCiphers returns a list of supported TLS transport // cipher suite IDs. -// -// The list contains only ciphers that use AES-GCM or -// (non-FIPS) CHACHA20-POLY1305 and ellitpic curve key -// exchange. func TLSCiphers() []uint16 { - if Enabled { - return []uint16{ - tls.TLS_AES_128_GCM_SHA256, // TLS 1.3 - tls.TLS_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - } - } return []uint16{ tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3 tls.TLS_AES_128_GCM_SHA256, @@ -92,24 +50,6 @@ func TLSCiphers() []uint16 { // ciphers for backward compatibility. In particular, AES-CBC // and non-ECDHE ciphers. func TLSCiphersBackwardCompatible() []uint16 { - if Enabled { - return []uint16{ - tls.TLS_AES_128_GCM_SHA256, // TLS 1.3 - tls.TLS_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 ECDHE GCM - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // TLS 1.2 ECDHE CBC - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // TLS 1.2 non-ECDHE - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - } - } return []uint16{ tls.TLS_CHACHA20_POLY1305_SHA256, // TLS 1.3 tls.TLS_AES_128_GCM_SHA256, @@ -134,14 +74,5 @@ func TLSCiphersBackwardCompatible() []uint16 { // TLSCurveIDs returns a list of supported elliptic curve IDs // in preference order. func TLSCurveIDs() []tls.CurveID { - var curves []tls.CurveID - if !Enabled { - curves = append(curves, tls.X25519) // Only enable X25519 in non-FIPS mode - } - curves = append(curves, tls.CurveP256) - if go19 { - // With go1.19 enable P384, P521 newer constant time implementations. - curves = append(curves, tls.CurveP384, tls.CurveP521) - } - return curves + return []tls.CurveID{tls.X25519MLKEM768, tls.CurveP256, tls.X25519, tls.CurveP384, tls.CurveP521} } diff --git a/internal/crypto/error.go b/internal/crypto/error.go index eca05a9f92883..4d21b936e878a 100644 --- a/internal/crypto/error.go +++ b/internal/crypto/error.go @@ -32,7 +32,7 @@ type Error struct { // Errorf - formats according to a format specifier and returns // the string as a value that satisfies error of type crypto.Error -func Errorf(format string, a ...interface{}) error { +func Errorf(format string, a ...any) error { e := fmt.Errorf(format, a...) ee := Error{} ee.msg = e.Error() @@ -82,6 +82,8 @@ var ( // ErrIncompatibleEncryptionMethod indicates that both SSE-C headers and SSE-S3 headers were specified, and are incompatible // The client needs to remove the SSE-S3 header or the SSE-C headers ErrIncompatibleEncryptionMethod = Errorf("Server side encryption specified with both SSE-C and SSE-S3 headers") + // ErrIncompatibleEncryptionWithCompression indicates that both data compression and SSE-C not allowed at the same time + ErrIncompatibleEncryptionWithCompression = Errorf("Server side encryption specified with SSE-C with compression not allowed") // ErrInvalidEncryptionKeyID returns error when KMS key id contains invalid characters ErrInvalidEncryptionKeyID = Errorf("KMS KeyID contains unsupported characters") diff --git a/internal/crypto/key.go b/internal/crypto/key.go index 5e253c0ba2fa6..7c59ecd3459e0 100644 --- a/internal/crypto/key.go +++ b/internal/crypto/key.go @@ -27,7 +27,6 @@ import ( "io" "path" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/hash/sha256" "github.com/minio/minio/internal/logger" "github.com/minio/sio" @@ -51,10 +50,12 @@ func GenerateKey(extKey []byte, random io.Reader) (key ObjectKey) { if _, err := io.ReadFull(random, nonce[:]); err != nil { logger.CriticalIf(context.Background(), errOutOfEntropy) } - sha := sha256.New() - sha.Write(extKey) - sha.Write(nonce[:]) - sha.Sum(key[:0]) + + const Context = "object-encryption-key generation" + mac := hmac.New(sha256.New, extKey) + mac.Write([]byte(Context)) + mac.Write(nonce[:]) + mac.Sum(key[:0]) return key } @@ -96,7 +97,7 @@ func (key ObjectKey) Seal(extKey []byte, iv [32]byte, domain, bucket, object str mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object' mac.Sum(sealingKey[:0]) - if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:], CipherSuites: fips.DARECiphers()}); n != 64 || err != nil { + if n, err := sio.Encrypt(&encryptedKey, bytes.NewReader(key[:]), sio.Config{Key: sealingKey[:]}); n != 64 || err != nil { logger.CriticalIf(context.Background(), errors.New("Unable to generate sealed key")) } sealedKey := SealedKey{ @@ -121,12 +122,12 @@ func (key *ObjectKey) Unseal(extKey []byte, sealedKey SealedKey, domain, bucket, mac.Write([]byte(domain)) mac.Write([]byte(SealAlgorithm)) mac.Write([]byte(path.Join(bucket, object))) // use path.Join for canonical 'bucket/object' - unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()} + unsealConfig = sio.Config{MinVersion: sio.Version20, Key: mac.Sum(nil)} case InsecureSealAlgorithm: sha := sha256.New() sha.Write(extKey) sha.Write(sealedKey.IV[:]) - unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil), CipherSuites: fips.DARECiphers()} + unsealConfig = sio.Config{MinVersion: sio.Version10, Key: sha.Sum(nil)} } if out, err := sio.DecryptBuffer(key[:0], sealedKey.Key[:], unsealConfig); len(out) != 32 || err != nil { @@ -157,7 +158,7 @@ func (key ObjectKey) SealETag(etag []byte) []byte { var buffer bytes.Buffer mac := hmac.New(sha256.New, key[:]) mac.Write([]byte("SSE-etag")) - if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}); err != nil { + if _, err := sio.Encrypt(&buffer, bytes.NewReader(etag), sio.Config{Key: mac.Sum(nil)}); err != nil { logger.CriticalIf(context.Background(), errors.New("Unable to encrypt ETag using object key")) } return buffer.Bytes() @@ -173,5 +174,5 @@ func (key ObjectKey) UnsealETag(etag []byte) ([]byte, error) { } mac := hmac.New(sha256.New, key[:]) mac.Write([]byte("SSE-etag")) - return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil), CipherSuites: fips.DARECiphers()}) + return sio.DecryptBuffer(make([]byte, 0, len(etag)), etag, sio.Config{Key: mac.Sum(nil)}) } diff --git a/internal/crypto/key_test.go b/internal/crypto/key_test.go index cdc56d9533d48..bf15fd888226a 100644 --- a/internal/crypto/key_test.go +++ b/internal/crypto/key_test.go @@ -49,8 +49,8 @@ var generateKeyTests = []struct { } func TestGenerateKey(t *testing.T) { - defer func(l bool) { logger.DisableErrorLog = l }(logger.DisableErrorLog) - logger.DisableErrorLog = true + defer func(l bool) { logger.DisableLog = l }(logger.DisableLog) + logger.DisableLog = true for i, test := range generateKeyTests { i, test := i, test @@ -75,8 +75,8 @@ var generateIVTests = []struct { } func TestGenerateIV(t *testing.T) { - defer func(l bool) { logger.DisableErrorLog = l }(logger.DisableErrorLog) - logger.DisableErrorLog = true + defer func(l bool) { logger.DisableLog = l }(logger.DisableLog) + logger.DisableLog = true for i, test := range generateIVTests { i, test := i, test diff --git a/internal/crypto/metadata.go b/internal/crypto/metadata.go index 900f4ad5f05c6..43e8cfe4fc84a 100644 --- a/internal/crypto/metadata.go +++ b/internal/crypto/metadata.go @@ -48,6 +48,9 @@ const ( // the KMS. MetaDataEncryptionKey = "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key" + // MetaSsecCRC is the encrypted checksum of the SSE-C encrypted object. + MetaSsecCRC = "X-Minio-Replication-Ssec-Crc" + // MetaContext is the KMS context provided by a client when encrypting an // object with SSE-KMS. A client may not send a context in which case the // MetaContext will not be present. @@ -106,6 +109,7 @@ func RemoveInternalEntries(metadata map[string]string) { delete(metadata, MetaSealedKeyKMS) delete(metadata, MetaKeyID) delete(metadata, MetaDataEncryptionKey) + delete(metadata, MetaSsecCRC) } // IsSourceEncrypted returns true if the source is encrypted diff --git a/internal/crypto/metadata_test.go b/internal/crypto/metadata_test.go index df2ed47646671..612cf19c2ffc1 100644 --- a/internal/crypto/metadata_test.go +++ b/internal/crypto/metadata_test.go @@ -313,8 +313,8 @@ var s3CreateMetadataTests = []struct { } func TestS3CreateMetadata(t *testing.T) { - defer func(l bool) { logger.DisableErrorLog = l }(logger.DisableErrorLog) - logger.DisableErrorLog = true + defer func(l bool) { logger.DisableLog = l }(logger.DisableLog) + logger.DisableLog = true for i, test := range s3CreateMetadataTests { metadata := S3.CreateMetadata(nil, test.KeyID, test.SealedDataKey, test.SealedKey) keyID, kmsKey, sealedKey, err := S3.ParseMetadata(metadata) @@ -358,8 +358,8 @@ var ssecCreateMetadataTests = []struct { } func TestSSECCreateMetadata(t *testing.T) { - defer func(l bool) { logger.DisableErrorLog = l }(logger.DisableErrorLog) - logger.DisableErrorLog = true + defer func(l bool) { logger.DisableLog = l }(logger.DisableLog) + logger.DisableLog = true for i, test := range ssecCreateMetadataTests { metadata := SSEC.CreateMetadata(nil, test.SealedKey) sealedKey, err := SSEC.ParseMetadata(metadata) diff --git a/internal/crypto/sse-c.go b/internal/crypto/sse-c.go index 2b5a0d3bb44fd..2b96fc02ccc4d 100644 --- a/internal/crypto/sse-c.go +++ b/internal/crypto/sse-c.go @@ -98,7 +98,7 @@ func (ssec) ParseHTTP(h http.Header) (key [32]byte, err error) { func (s3 ssec) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { clientKey, err := s3.ParseHTTP(h) if err != nil { - return + return key, err } return unsealObjectKey(clientKey[:], metadata, bucket, object) } diff --git a/internal/crypto/sse-kms.go b/internal/crypto/sse-kms.go index 594262546649b..dd0aa46a27c25 100644 --- a/internal/crypto/sse-kms.go +++ b/internal/crypto/sse-kms.go @@ -106,7 +106,7 @@ func (ssekms) IsEncrypted(metadata map[string]string) bool { // UnsealObjectKey extracts and decrypts the sealed object key // from the metadata using KMS and returns the decrypted object // key. -func (s3 ssekms) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { +func (s3 ssekms) UnsealObjectKey(k *kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { if k == nil { return key, Errorf("KMS not configured") } @@ -120,7 +120,11 @@ func (s3 ssekms) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, } else if _, ok := ctx[bucket]; !ok { ctx[bucket] = path.Join(bucket, object) } - unsealKey, err := k.DecryptKey(keyID, kmsKey, ctx) + unsealKey, err := k.Decrypt(context.TODO(), &kms.DecryptRequest{ + Name: keyID, + Ciphertext: kmsKey, + AssociatedData: ctx, + }) if err != nil { return key, err } diff --git a/internal/crypto/sse-s3.go b/internal/crypto/sse-s3.go index bda3449c0dc6b..ce34d5a4fc432 100644 --- a/internal/crypto/sse-s3.go +++ b/internal/crypto/sse-s3.go @@ -71,7 +71,7 @@ func (sses3) IsEncrypted(metadata map[string]string) bool { // UnsealObjectKey extracts and decrypts the sealed object key // from the metadata using KMS and returns the decrypted object // key. -func (s3 sses3) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { +func (s3 sses3) UnsealObjectKey(k *kms.KMS, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { if k == nil { return key, Errorf("KMS not configured") } @@ -79,7 +79,11 @@ func (s3 sses3) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, o if err != nil { return key, err } - unsealKey, err := k.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)}) + unsealKey, err := k.Decrypt(context.TODO(), &kms.DecryptRequest{ + Name: keyID, + Ciphertext: kmsKey, + AssociatedData: kms.Context{bucket: path.Join(bucket, object)}, + }) if err != nil { return key, err } @@ -92,7 +96,7 @@ func (s3 sses3) UnsealObjectKey(k kms.KMS, metadata map[string]string, bucket, o // keys. // // The metadata, buckets and objects slices must have the same length. -func (s3 sses3) UnsealObjectKeys(ctx context.Context, k kms.KMS, metadata []map[string]string, buckets, objects []string) ([]ObjectKey, error) { +func (s3 sses3) UnsealObjectKeys(ctx context.Context, k *kms.KMS, metadata []map[string]string, buckets, objects []string) ([]ObjectKey, error) { if k == nil { return nil, Errorf("KMS not configured") } @@ -100,45 +104,8 @@ func (s3 sses3) UnsealObjectKeys(ctx context.Context, k kms.KMS, metadata []map[ if len(metadata) != len(buckets) || len(metadata) != len(objects) { return nil, Errorf("invalid metadata/object count: %d != %d != %d", len(metadata), len(buckets), len(objects)) } - - keyIDs := make([]string, 0, len(metadata)) - kmsKeys := make([][]byte, 0, len(metadata)) - sealedKeys := make([]SealedKey, 0, len(metadata)) - - sameKeyID := true + keys := make([]ObjectKey, 0, len(metadata)) for i := range metadata { - keyID, kmsKey, sealedKey, err := s3.ParseMetadata(metadata[i]) - if err != nil { - return nil, err - } - keyIDs = append(keyIDs, keyID) - kmsKeys = append(kmsKeys, kmsKey) - sealedKeys = append(sealedKeys, sealedKey) - - if i > 0 && keyID != keyIDs[i-1] { - sameKeyID = false - } - } - if sameKeyID { - contexts := make([]kms.Context, 0, len(keyIDs)) - for i := range buckets { - contexts = append(contexts, kms.Context{buckets[i]: path.Join(buckets[i], objects[i])}) - } - unsealKeys, err := k.DecryptAll(ctx, keyIDs[0], kmsKeys, contexts) - if err != nil { - return nil, err - } - keys := make([]ObjectKey, len(unsealKeys)) - for i := range keys { - if err := keys[i].Unseal(unsealKeys[i], sealedKeys[i], s3.String(), buckets[i], objects[i]); err != nil { - return nil, err - } - } - return keys, nil - } - - keys := make([]ObjectKey, 0, len(keyIDs)) - for i := range keyIDs { key, err := s3.UnsealObjectKey(k, metadata[i], buckets[i], objects[i]) if err != nil { return nil, err diff --git a/internal/crypto/sse.go b/internal/crypto/sse.go index 422ff1488d268..e0daf750cd166 100644 --- a/internal/crypto/sse.go +++ b/internal/crypto/sse.go @@ -24,7 +24,6 @@ import ( "io" "net/http" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/minio/sio" @@ -82,7 +81,7 @@ func Requested(h http.Header) bool { func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { clientKey, err := sse.ParseHTTP(h) if err != nil { - return + return key, err } return unsealObjectKey(clientKey[:], metadata, bucket, object) } @@ -92,16 +91,16 @@ func (sse ssecCopy) UnsealObjectKey(h http.Header, metadata map[string]string, b func unsealObjectKey(clientKey []byte, metadata map[string]string, bucket, object string) (key ObjectKey, err error) { sealedKey, err := SSEC.ParseMetadata(metadata) if err != nil { - return + return key, err } err = key.Unseal(clientKey, sealedKey, SSEC.String(), bucket, object) - return + return key, err } // EncryptSinglePart encrypts an io.Reader which must be the // body of a single-part PUT request. func EncryptSinglePart(r io.Reader, key ObjectKey) io.Reader { - r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:], CipherSuites: fips.DARECiphers()}) + r, err := sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, Key: key[:]}) if err != nil { logger.CriticalIf(context.Background(), errors.New("Unable to encrypt io.Reader using object key")) } @@ -123,7 +122,7 @@ func DecryptSinglePart(w io.Writer, offset, length int64, key ObjectKey) io.Writ const PayloadSize = 1 << 16 // DARE 2.0 w = ioutil.LimitedWriter(w, offset%PayloadSize, length) - decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:], CipherSuites: fips.DARECiphers()}) + decWriter, err := sio.DecryptWriter(w, sio.Config{Key: key[:]}) if err != nil { logger.CriticalIf(context.Background(), errors.New("Unable to decrypt io.Writer using object key")) } diff --git a/internal/deadlineconn/deadlineconn.go b/internal/deadlineconn/deadlineconn.go index 25b479ea3b6fb..95bb43efff772 100644 --- a/internal/deadlineconn/deadlineconn.go +++ b/internal/deadlineconn/deadlineconn.go @@ -19,32 +19,80 @@ package deadlineconn import ( + "context" "net" + "sync" + "sync/atomic" "time" ) +// updateInterval is the minimum time between deadline updates. +const updateInterval = 250 * time.Millisecond + // DeadlineConn - is a generic stream-oriented network connection supporting buffered reader and read/write timeout. type DeadlineConn struct { net.Conn - readDeadline time.Duration // sets the read deadline on a connection. - writeDeadline time.Duration // sets the write deadline on a connection. + readDeadline time.Duration // sets the read deadline on a connection. + readSetAt time.Time + writeDeadline time.Duration // sets the write deadline on a connection. + writeSetAt time.Time + abortReads, abortWrites atomic.Bool // A deadline was set to indicate caller wanted the conn to time out. + infReads, infWrites atomic.Bool + mu sync.Mutex +} + +// Unwrap will unwrap the connection and remove the deadline if applied. +// If not a *DeadlineConn, the unmodified net.Conn is returned. +func Unwrap(c net.Conn) net.Conn { + if dc, ok := c.(*DeadlineConn); ok { + return dc.Conn + } + return c } // Sets read deadline func (c *DeadlineConn) setReadDeadline() { - if c.readDeadline > 0 { - c.SetReadDeadline(time.Now().UTC().Add(c.readDeadline)) + // Do not set a Read deadline, if upstream wants to cancel all reads. + if c.readDeadline <= 0 || c.abortReads.Load() || c.infReads.Load() { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + if c.abortReads.Load() { + return + } + + now := time.Now() + if now.Sub(c.readSetAt) > updateInterval { + c.Conn.SetReadDeadline(now.Add(c.readDeadline + updateInterval)) + c.readSetAt = now } } func (c *DeadlineConn) setWriteDeadline() { - if c.writeDeadline > 0 { - c.SetWriteDeadline(time.Now().UTC().Add(c.writeDeadline)) + // Do not set a Write deadline, if upstream wants to cancel all reads. + if c.writeDeadline <= 0 || c.abortWrites.Load() || c.infWrites.Load() { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + if c.abortWrites.Load() { + return + } + now := time.Now() + if now.Sub(c.writeSetAt) > updateInterval { + c.Conn.SetWriteDeadline(now.Add(c.writeDeadline + updateInterval)) + c.writeSetAt = now } } // Read - reads data from the connection using wrapped buffered reader. func (c *DeadlineConn) Read(b []byte) (n int, err error) { + if c.abortReads.Load() { + return 0, context.DeadlineExceeded + } c.setReadDeadline() n, err = c.Conn.Read(b) return n, err @@ -52,11 +100,62 @@ func (c *DeadlineConn) Read(b []byte) (n int, err error) { // Write - writes data to the connection. func (c *DeadlineConn) Write(b []byte) (n int, err error) { + if c.abortWrites.Load() { + return 0, context.DeadlineExceeded + } c.setWriteDeadline() n, err = c.Conn.Write(b) return n, err } +// SetDeadline will set the deadline for reads and writes. +// A zero value for t means I/O operations will not time out. +func (c *DeadlineConn) SetDeadline(t time.Time) error { + c.mu.Lock() + defer c.mu.Unlock() + + c.readSetAt = time.Time{} + c.writeSetAt = time.Time{} + c.abortReads.Store(!t.IsZero() && time.Until(t) < 0) + c.abortWrites.Store(!t.IsZero() && time.Until(t) < 0) + c.infReads.Store(t.IsZero()) + c.infWrites.Store(t.IsZero()) + return c.Conn.SetDeadline(t) +} + +// SetReadDeadline sets the deadline for future Read calls +// and any currently-blocked Read call. +// A zero value for t means Read will not time out. +func (c *DeadlineConn) SetReadDeadline(t time.Time) error { + c.mu.Lock() + defer c.mu.Unlock() + c.abortReads.Store(!t.IsZero() && time.Until(t) < 0) + c.infReads.Store(t.IsZero()) + c.readSetAt = time.Time{} + return c.Conn.SetReadDeadline(t) +} + +// SetWriteDeadline sets the deadline for future Write calls +// and any currently-blocked Write call. +// Even if write times out, it may return n > 0, indicating that +// some of the data was successfully written. +// A zero value for t means Write will not time out. +func (c *DeadlineConn) SetWriteDeadline(t time.Time) error { + c.mu.Lock() + defer c.mu.Unlock() + c.abortWrites.Store(!t.IsZero() && time.Until(t) < 0) + c.infWrites.Store(t.IsZero()) + c.writeSetAt = time.Time{} + return c.Conn.SetWriteDeadline(t) +} + +// Close wraps conn.Close and stops sending deadline updates. +func (c *DeadlineConn) Close() error { + c.abortReads.Store(true) + c.abortWrites.Store(true) + return c.Conn.Close() +} + // WithReadDeadline sets a new read side net.Conn deadline. func (c *DeadlineConn) WithReadDeadline(d time.Duration) *DeadlineConn { c.readDeadline = d diff --git a/internal/deadlineconn/deadlineconn_test.go b/internal/deadlineconn/deadlineconn_test.go index c8269f97b1b06..6921e47b1e476 100644 --- a/internal/deadlineconn/deadlineconn_test.go +++ b/internal/deadlineconn/deadlineconn_test.go @@ -19,6 +19,7 @@ package deadlineconn import ( "bufio" + "fmt" "io" "net" "sync" @@ -115,3 +116,77 @@ func TestBuffConnReadTimeout(t *testing.T) { wg.Wait() } + +// Test deadlineconn handles read timeout properly by reading two messages beyond deadline. +func TestBuffConnReadCheckTimeout(t *testing.T) { + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("unable to create listener. %v", err) + } + defer l.Close() + serverAddr := l.Addr().String() + + tcpListener, ok := l.(*net.TCPListener) + if !ok { + t.Fatalf("failed to assert to net.TCPListener") + } + var cerr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + tcpConn, terr := tcpListener.AcceptTCP() + if terr != nil { + cerr = fmt.Errorf("failed to accept new connection. %v", terr) + return + } + deadlineconn := New(tcpConn) + deadlineconn.WithReadDeadline(time.Second) + deadlineconn.WithWriteDeadline(time.Second) + defer deadlineconn.Close() + + // Read a line + b := make([]byte, 12) + _, terr = deadlineconn.Read(b) + if terr != nil { + cerr = fmt.Errorf("failed to read from client. %v", terr) + return + } + received := string(b) + if received != "message one\n" { + cerr = fmt.Errorf(`server: expected: "message one\n", got: %v`, received) + return + } + + // Set a deadline in the past to indicate we want the next read to fail. + // Ensure we don't override it on read. + deadlineconn.SetReadDeadline(time.Unix(1, 0)) + + // Be sure to exceed update interval + time.Sleep(updateInterval * 2) + + _, terr = deadlineconn.Read(b) + if terr == nil { + cerr = fmt.Errorf("could read from client, expected error, got %v", terr) + return + } + }() + + c, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("unable to connect to server. %v", err) + } + defer c.Close() + + _, err = io.WriteString(c, "message one\n") + if err != nil { + t.Fatalf("failed to write to server. %v", err) + } + _, _ = io.WriteString(c, "message two\n") + + wg.Wait() + if cerr != nil { + t.Fatal(cerr) + } +} diff --git a/internal/disk/stat_linux.go b/internal/disk/stat_linux.go index 33f7570226157..390cdcede45af 100644 --- a/internal/disk/stat_linux.go +++ b/internal/disk/stat_linux.go @@ -146,7 +146,7 @@ func readDriveStats(statsFile string) (iostats IOStats, err error) { iostats.DiscardSectors = stats[13] iostats.DiscardTicks = stats[14] } - return + return iostats, err } func readStat(fileName string) (stats []uint64, err error) { diff --git a/internal/disk/stat_test.go b/internal/disk/stat_test.go index a33d2e68fc13c..73ca017f513e1 100644 --- a/internal/disk/stat_test.go +++ b/internal/disk/stat_test.go @@ -23,14 +23,10 @@ package disk import ( "os" "reflect" - "runtime" "testing" ) func TestReadDriveStats(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("skipping this test in windows") - } testCases := []struct { stat string expectedIOStats IOStats @@ -107,7 +103,7 @@ func TestReadDriveStats(t *testing.T) { for _, testCase := range testCases { testCase := testCase t.Run("", func(t *testing.T) { - tmpfile, err := os.CreateTemp("", "testfile") + tmpfile, err := os.CreateTemp(t.TempDir(), "testfile") if err != nil { t.Error(err) } diff --git a/internal/dsync/drwmutex.go b/internal/dsync/drwmutex.go index 682eb12188275..7d6506eaeefbb 100644 --- a/internal/dsync/drwmutex.go +++ b/internal/dsync/drwmutex.go @@ -21,6 +21,7 @@ import ( "context" "errors" "math/rand" + "slices" "sort" "strconv" "sync" @@ -28,8 +29,8 @@ import ( xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/mcontext" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/console" + "github.com/minio/pkg/v3/env" ) // Indicator if logging is enabled. @@ -60,7 +61,7 @@ func init() { ) } -func log(format string, data ...interface{}) { +func log(format string, data ...any) { if dsyncLog { console.Printf(format, data...) } @@ -381,7 +382,7 @@ func refreshLock(ctx context.Context, ds *Dsync, id, source string, quorum int) lockNotFound, lockRefreshed := 0, 0 done := false - for i := 0; i < len(restClnts); i++ { + for range len(restClnts) { select { case refreshResult := <-ch: if refreshResult.offline { @@ -433,7 +434,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is UID: id, Resources: names, Source: source, - Quorum: quorum, + Quorum: &quorum, } // Combined timeout for the lock attempt. @@ -443,6 +444,7 @@ func lock(ctx context.Context, ds *Dsync, locks *[]string, id, source string, is // Special context for NetLockers - do not use timeouts. // Also, pass the trace context info if found for debugging netLockCtx := context.Background() + tc, ok := ctx.Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt) if ok { netLockCtx = context.WithValue(netLockCtx, mcontext.ContextTraceKey, tc) @@ -620,13 +622,7 @@ func (dm *DRWMutex) Unlock(ctx context.Context) { defer dm.m.Unlock() // Check if minimally a single bool is set in the writeLocks array - lockFound := false - for _, uid := range dm.writeLocks { - if isLocked(uid) { - lockFound = true - break - } - } + lockFound := slices.ContainsFunc(dm.writeLocks, isLocked) if !lockFound { panic("Trying to Unlock() while no Lock() is active") } @@ -639,9 +635,19 @@ func (dm *DRWMutex) Unlock(ctx context.Context) { tolerance := len(restClnts) / 2 isReadLock := false - for !releaseAll(ctx, dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) { - time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryMinInterval))) - } + started := time.Now() + // Do async unlocking. + // This means unlock will no longer block on the network or missing quorum. + go func() { + ctx, done := context.WithTimeout(ctx, drwMutexUnlockCallTimeout) + defer done() + for !releaseAll(ctx, dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) { + time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryMinInterval))) + if time.Since(started) > dm.clnt.Timeouts.UnlockCall { + return + } + } + }() } // RUnlock releases a read lock held on dm. @@ -661,13 +667,7 @@ func (dm *DRWMutex) RUnlock(ctx context.Context) { defer dm.m.Unlock() // Check if minimally a single bool is set in the writeLocks array - lockFound := false - for _, uid := range dm.readLocks { - if isLocked(uid) { - lockFound = true - break - } - } + lockFound := slices.ContainsFunc(dm.readLocks, isLocked) if !lockFound { panic("Trying to RUnlock() while no RLock() is active") } @@ -678,11 +678,20 @@ func (dm *DRWMutex) RUnlock(ctx context.Context) { // Tolerance is not set, defaults to half of the locker clients. tolerance := len(restClnts) / 2 - isReadLock := true - for !releaseAll(ctx, dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) { - time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryMinInterval))) - } + started := time.Now() + // Do async unlocking. + // This means unlock will no longer block on the network or missing quorum. + go func() { + for !releaseAll(ctx, dm.clnt, tolerance, owner, &locks, isReadLock, restClnts, dm.Names...) { + time.Sleep(time.Duration(dm.rng.Float64() * float64(dm.lockRetryMinInterval))) + // If we have been waiting for more than the force unlock timeout, return + // Remotes will have canceled due to the missing refreshes anyway. + if time.Since(started) > dm.clnt.Timeouts.UnlockCall { + return + } + } + }() } // sendRelease sends a release message to a node that previously granted a lock diff --git a/internal/dsync/drwmutex_test.go b/internal/dsync/drwmutex_test.go index 526e1717d539b..78d37b4e81f08 100644 --- a/internal/dsync/drwmutex_test.go +++ b/internal/dsync/drwmutex_test.go @@ -33,14 +33,14 @@ const ( func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { drwm1 := NewDRWMutex(ds, "simplelock") - ctx1, cancel1 := context.WithCancel(context.Background()) + ctx1, cancel1 := context.WithCancel(t.Context()) if !drwm1.GetRLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) { panic("Failed to acquire read lock") } // fmt.Println("1st read lock acquired, waiting...") drwm2 := NewDRWMutex(ds, "simplelock") - ctx2, cancel2 := context.WithCancel(context.Background()) + ctx2, cancel2 := context.WithCancel(t.Context()) if !drwm2.GetRLock(ctx2, cancel2, id, source, Options{Timeout: time.Second}) { panic("Failed to acquire read lock") } @@ -48,28 +48,28 @@ func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { go func() { time.Sleep(2 * testDrwMutexAcquireTimeout) - drwm1.RUnlock(context.Background()) + drwm1.RUnlock(t.Context()) // fmt.Println("1st read lock released, waiting...") }() go func() { time.Sleep(3 * testDrwMutexAcquireTimeout) - drwm2.RUnlock(context.Background()) + drwm2.RUnlock(t.Context()) // fmt.Println("2nd read lock released, waiting...") }() drwm3 := NewDRWMutex(ds, "simplelock") // fmt.Println("Trying to acquire write lock, waiting...") - ctx3, cancel3 := context.WithCancel(context.Background()) + ctx3, cancel3 := context.WithCancel(t.Context()) locked = drwm3.GetLock(ctx3, cancel3, id, source, Options{Timeout: duration}) if locked { // fmt.Println("Write lock acquired, waiting...") time.Sleep(testDrwMutexAcquireTimeout) - drwm3.Unlock(context.Background()) + drwm3.Unlock(t.Context()) } // fmt.Println("Write lock failed due to timeout") - return + return locked } func TestSimpleWriteLockAcquired(t *testing.T) { @@ -94,29 +94,29 @@ func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { drwm1 := NewDRWMutex(ds, "duallock") // fmt.Println("Getting initial write lock") - ctx1, cancel1 := context.WithCancel(context.Background()) + ctx1, cancel1 := context.WithCancel(t.Context()) if !drwm1.GetLock(ctx1, cancel1, id, source, Options{Timeout: time.Second}) { panic("Failed to acquire initial write lock") } go func() { time.Sleep(3 * testDrwMutexAcquireTimeout) - drwm1.Unlock(context.Background()) + drwm1.Unlock(t.Context()) // fmt.Println("Initial write lock released, waiting...") }() // fmt.Println("Trying to acquire 2nd write lock, waiting...") drwm2 := NewDRWMutex(ds, "duallock") - ctx2, cancel2 := context.WithCancel(context.Background()) + ctx2, cancel2 := context.WithCancel(t.Context()) locked = drwm2.GetLock(ctx2, cancel2, id, source, Options{Timeout: duration}) if locked { // fmt.Println("2nd write lock acquired, waiting...") time.Sleep(testDrwMutexAcquireTimeout) - drwm2.Unlock(context.Background()) + drwm2.Unlock(t.Context()) } // fmt.Println("2nd write lock failed due to timeout") - return + return locked } func TestDualWriteLockAcquired(t *testing.T) { @@ -157,18 +157,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) { clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) - for i := 0; i < numReaders; i++ { + for range numReaders { go parallelReader(context.Background(), m, clocked, cunlock, cdone) } // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { + for range numReaders { <-clocked } - for i := 0; i < numReaders; i++ { + for range numReaders { cunlock <- true } // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { + for range numReaders { <-cdone } } @@ -184,13 +184,13 @@ func TestParallelReaders(t *testing.T) { // Borrowed from rwmutex_test.go func reader(resource string, numIterations int, activity *int32, cdone chan bool) { rwm := NewDRWMutex(ds, resource) - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetRLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) { n := atomic.AddInt32(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -1) rwm.RUnlock(context.Background()) @@ -202,13 +202,13 @@ func reader(resource string, numIterations int, activity *int32, cdone chan bool // Borrowed from rwmutex_test.go func writer(resource string, numIterations int, activity *int32, cdone chan bool) { rwm := NewDRWMutex(ds, resource) - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetLock(context.Background(), nil, id, source, Options{Timeout: time.Second}) { n := atomic.AddInt32(activity, 10000) if n != 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -10000) rwm.Unlock(context.Background()) @@ -268,7 +268,7 @@ func TestUnlockPanic(t *testing.T) { } }() mu := NewDRWMutex(ds, "test") - mu.Unlock(context.Background()) + mu.Unlock(t.Context()) } // Borrowed from rwmutex_test.go @@ -278,10 +278,10 @@ func TestUnlockPanic2(t *testing.T) { if recover() == nil { t.Fatalf("unlock of unlocked RWMutex did not panic") } - mu.RUnlock(context.Background()) // Unlock, so -test.count > 1 works + mu.RUnlock(t.Context()) // Unlock, so -test.count > 1 works }() mu.RLock(id, source) - mu.Unlock(context.Background()) + mu.Unlock(t.Context()) } // Borrowed from rwmutex_test.go @@ -292,7 +292,7 @@ func TestRUnlockPanic(t *testing.T) { } }() mu := NewDRWMutex(ds, "test") - mu.RUnlock(context.Background()) + mu.RUnlock(t.Context()) } // Borrowed from rwmutex_test.go @@ -302,10 +302,10 @@ func TestRUnlockPanic2(t *testing.T) { if recover() == nil { t.Fatalf("read unlock of unlocked RWMutex did not panic") } - mu.Unlock(context.Background()) // Unlock, so -test.count > 1 works + mu.Unlock(t.Context()) // Unlock, so -test.count > 1 works }() mu.Lock(id, source) - mu.RUnlock(context.Background()) + mu.RUnlock(t.Context()) } // Borrowed from rwmutex_test.go @@ -320,14 +320,14 @@ func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) { foo++ if foo%writeRatio == 0 { rwm.Lock(id, source) - rwm.Unlock(context.Background()) + rwm.Unlock(b.Context()) } else { rwm.RLock(id, source) for i := 0; i != localWork; i++ { foo *= 2 foo /= 2 } - rwm.RUnlock(context.Background()) + rwm.RUnlock(b.Context()) } } _ = foo diff --git a/internal/dsync/dsync-server_test.go b/internal/dsync/dsync-server_test.go index 0fa48f4d1928e..bdf7ac7bb6e13 100644 --- a/internal/dsync/dsync-server_test.go +++ b/internal/dsync/dsync-server_test.go @@ -149,13 +149,13 @@ func (lh *lockServerHandler) RLockHandler(w http.ResponseWriter, r *http.Request } func stopLockServers() { - for i := 0; i < numberOfNodes; i++ { + for i := range numberOfNodes { nodes[i].Close() } } func startLockServers() { - for i := 0; i < numberOfNodes; i++ { + for i := range numberOfNodes { lsrv := &lockServer{ mutex: sync.Mutex{}, lockMap: make(map[string]int64), diff --git a/internal/dsync/dsync_test.go b/internal/dsync/dsync_test.go index b4556bbe7c4d0..18b6c0ba87cf0 100644 --- a/internal/dsync/dsync_test.go +++ b/internal/dsync/dsync_test.go @@ -42,7 +42,7 @@ func TestMain(m *testing.M) { // Initialize locker clients for dsync. var clnts []NetLocker - for i := 0; i < len(nodes); i++ { + for i := range nodes { clnts = append(clnts, newClient(nodes[i].URL)) } @@ -69,7 +69,7 @@ func TestSimpleLock(t *testing.T) { // fmt.Println("Lock acquired, waiting...") time.Sleep(testDrwMutexRefreshCallTimeout) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) } func TestSimpleLockUnlockMultipleTimes(t *testing.T) { @@ -77,23 +77,23 @@ func TestSimpleLockUnlockMultipleTimes(t *testing.T) { dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) dm.Lock(id, source) time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) } // Test two locks for same resource, one succeeds, one fails (after timeout) @@ -108,7 +108,7 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) { time.Sleep(5 * testDrwMutexAcquireTimeout) // fmt.Println("Unlocking dm1") - dm1st.Unlock(context.Background()) + dm1st.Unlock(t.Context()) }() dm2nd.Lock(id, source) @@ -116,7 +116,7 @@ func TestTwoSimultaneousLocksForSameResource(t *testing.T) { // fmt.Printf("2nd lock obtained after 1st lock is released\n") time.Sleep(testDrwMutexRefreshCallTimeout * 2) - dm2nd.Unlock(context.Background()) + dm2nd.Unlock(t.Context()) } // Test three locks for same resource, one succeeds, one fails (after timeout) @@ -134,7 +134,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { time.Sleep(2 * testDrwMutexAcquireTimeout) // fmt.Println("Unlocking dm1") - dm1st.Unlock(context.Background()) + dm1st.Unlock(t.Context()) }() expect += 2 * testDrwMutexAcquireTimeout @@ -151,7 +151,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { time.Sleep(2 * testDrwMutexAcquireTimeout) // fmt.Println("Unlocking dm2") - dm2nd.Unlock(context.Background()) + dm2nd.Unlock(t.Context()) }() dm3rd.Lock(id, source) @@ -159,7 +159,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { // fmt.Printf("3rd lock obtained after 1st & 2nd locks are released\n") time.Sleep(testDrwMutexRefreshCallTimeout) - dm3rd.Unlock(context.Background()) + dm3rd.Unlock(t.Context()) }() expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout @@ -173,7 +173,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { time.Sleep(2 * testDrwMutexAcquireTimeout) // fmt.Println("Unlocking dm3") - dm3rd.Unlock(context.Background()) + dm3rd.Unlock(t.Context()) }() dm2nd.Lock(id, source) @@ -181,7 +181,7 @@ func TestThreeSimultaneousLocksForSameResource(t *testing.T) { // fmt.Printf("2nd lock obtained after 1st & 3rd locks are released\n") time.Sleep(testDrwMutexRefreshCallTimeout) - dm2nd.Unlock(context.Background()) + dm2nd.Unlock(t.Context()) }() expect += 2*testDrwMutexAcquireTimeout + testDrwMutexRefreshCallTimeout @@ -201,8 +201,8 @@ func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) { dm1.Lock(id, source) dm2.Lock(id, source) - dm1.Unlock(context.Background()) - dm2.Unlock(context.Background()) + dm1.Unlock(t.Context()) + dm2.Unlock(t.Context()) } // Test refreshing lock - refresh should always return true @@ -214,7 +214,7 @@ func TestSuccessfulLockRefresh(t *testing.T) { dm := NewDRWMutex(ds, "aap") dm.refreshInterval = testDrwMutexRefreshInterval - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) if !dm.GetLock(ctx, cancel, id, source, Options{Timeout: 5 * time.Minute}) { t.Fatal("GetLock() should be successful") @@ -230,7 +230,7 @@ func TestSuccessfulLockRefresh(t *testing.T) { } // Should be safe operation in all cases - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) } // Test canceling context while quorum servers report lock not found @@ -250,7 +250,7 @@ func TestFailedRefreshLock(t *testing.T) { var wg sync.WaitGroup wg.Add(1) - ctx, cl := context.WithCancel(context.Background()) + ctx, cl := context.WithCancel(t.Context()) cancel := func() { cl() wg.Done() @@ -267,7 +267,7 @@ func TestFailedRefreshLock(t *testing.T) { } // Should be safe operation in all cases - dm.Unlock(context.Background()) + dm.Unlock(t.Context()) } // Test Unlock should not timeout @@ -278,7 +278,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) { dm := NewDRWMutex(ds, "aap") dm.refreshInterval = testDrwMutexUnlockCallTimeout - if !dm.GetLock(context.Background(), nil, id, source, Options{Timeout: 5 * time.Minute}) { + if !dm.GetLock(t.Context(), nil, id, source, Options{Timeout: 5 * time.Minute}) { t.Fatal("GetLock() should be successful") } @@ -290,9 +290,11 @@ func TestUnlockShouldNotTimeout(t *testing.T) { unlockReturned := make(chan struct{}, 1) go func() { - ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + ctx, cancel := context.WithTimeout(t.Context(), 500*time.Millisecond) defer cancel() dm.Unlock(ctx) + // Unlock is not blocking. Try to get a new lock. + dm.GetLock(ctx, nil, id, source, Options{Timeout: 5 * time.Minute}) unlockReturned <- struct{}{} }() @@ -308,7 +310,7 @@ func TestUnlockShouldNotTimeout(t *testing.T) { // Borrowed from mutex_test.go func HammerMutex(m *DRWMutex, loops int, cdone chan bool) { - for i := 0; i < loops; i++ { + for range loops { m.Lock(id, source) m.Unlock(context.Background()) } @@ -323,10 +325,10 @@ func TestMutex(t *testing.T) { } c := make(chan bool) m := NewDRWMutex(ds, "test") - for i := 0; i < 10; i++ { + for range 10 { go HammerMutex(m, loops, c) } - for i := 0; i < 10; i++ { + for range 10 { <-c } } @@ -342,7 +344,7 @@ func BenchmarkMutexUncontended(b *testing.B) { mu := PaddedMutex{NewDRWMutex(ds, "")} for pb.Next() { mu.Lock(id, source) - mu.Unlock(context.Background()) + mu.Unlock(b.Context()) } }) } @@ -359,9 +361,9 @@ func benchmarkMutex(b *testing.B, slack, work bool) { foo := 0 for pb.Next() { mu.Lock(id, source) - mu.Unlock(context.Background()) + mu.Unlock(b.Context()) if work { - for i := 0; i < 100; i++ { + for range 100 { foo *= 2 foo /= 2 } @@ -408,7 +410,7 @@ func BenchmarkMutexNoSpin(b *testing.B) { m.Lock(id, source) acc0 -= 100 acc1 += 100 - m.Unlock(context.Background()) + m.Unlock(b.Context()) } else { for i := 0; i < len(data); i += 4 { data[i]++ @@ -440,7 +442,7 @@ func BenchmarkMutexSpin(b *testing.B) { m.Lock(id, source) acc0 -= 100 acc1 += 100 - m.Unlock(context.Background()) + m.Unlock(b.Context()) for i := 0; i < len(data); i += 4 { data[i]++ } diff --git a/internal/dsync/lock-args.go b/internal/dsync/lock-args.go index 8d7c2e7aee4b8..6fba08f972b46 100644 --- a/internal/dsync/lock-args.go +++ b/internal/dsync/lock-args.go @@ -27,16 +27,16 @@ type LockArgs struct { // Resources contains single or multiple entries to be locked/unlocked. Resources []string - // Source contains the line number, function and file name of the code - // on the client node that requested the lock. - Source string - // Owner represents unique ID for this instance, an owner who originally requested // the locked resource, useful primarily in figuring out stale locks. Owner string + // Source contains the line number, function and file name of the code + // on the client node that requested the lock. + Source string `msgp:"omitempty"` + // Quorum represents the expected quorum for this lock type. - Quorum int + Quorum *int `msgp:"omitempty"` } // ResponseCode is the response code for a locking request. diff --git a/internal/dsync/lock-args_gen.go b/internal/dsync/lock-args_gen.go index 1ac930ab6c068..98a5fe3e36ed4 100644 --- a/internal/dsync/lock-args_gen.go +++ b/internal/dsync/lock-args_gen.go @@ -1,7 +1,7 @@ -package dsync - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package dsync + import ( "github.com/tinylib/msgp/msgp" ) @@ -49,24 +49,36 @@ func (z *LockArgs) DecodeMsg(dc *msgp.Reader) (err error) { return } } - case "Source": - z.Source, err = dc.ReadString() - if err != nil { - err = msgp.WrapError(err, "Source") - return - } case "Owner": z.Owner, err = dc.ReadString() if err != nil { err = msgp.WrapError(err, "Owner") return } - case "Quorum": - z.Quorum, err = dc.ReadInt() + case "Source": + z.Source, err = dc.ReadString() if err != nil { - err = msgp.WrapError(err, "Quorum") + err = msgp.WrapError(err, "Source") return } + case "Quorum": + if dc.IsNil() { + err = dc.ReadNil() + if err != nil { + err = msgp.WrapError(err, "Quorum") + return + } + z.Quorum = nil + } else { + if z.Quorum == nil { + z.Quorum = new(int) + } + *z.Quorum, err = dc.ReadInt() + if err != nil { + err = msgp.WrapError(err, "Quorum") + return + } + } default: err = dc.Skip() if err != nil { @@ -108,24 +120,24 @@ func (z *LockArgs) EncodeMsg(en *msgp.Writer) (err error) { return } } - // write "Source" - err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) + // write "Owner" + err = en.Append(0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72) if err != nil { return } - err = en.WriteString(z.Source) + err = en.WriteString(z.Owner) if err != nil { - err = msgp.WrapError(err, "Source") + err = msgp.WrapError(err, "Owner") return } - // write "Owner" - err = en.Append(0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72) + // write "Source" + err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) if err != nil { return } - err = en.WriteString(z.Owner) + err = en.WriteString(z.Source) if err != nil { - err = msgp.WrapError(err, "Owner") + err = msgp.WrapError(err, "Source") return } // write "Quorum" @@ -133,10 +145,17 @@ func (z *LockArgs) EncodeMsg(en *msgp.Writer) (err error) { if err != nil { return } - err = en.WriteInt(z.Quorum) - if err != nil { - err = msgp.WrapError(err, "Quorum") - return + if z.Quorum == nil { + err = en.WriteNil() + if err != nil { + return + } + } else { + err = en.WriteInt(*z.Quorum) + if err != nil { + err = msgp.WrapError(err, "Quorum") + return + } } return } @@ -154,15 +173,19 @@ func (z *LockArgs) MarshalMsg(b []byte) (o []byte, err error) { for za0001 := range z.Resources { o = msgp.AppendString(o, z.Resources[za0001]) } - // string "Source" - o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) - o = msgp.AppendString(o, z.Source) // string "Owner" o = append(o, 0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72) o = msgp.AppendString(o, z.Owner) + // string "Source" + o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) + o = msgp.AppendString(o, z.Source) // string "Quorum" o = append(o, 0xa6, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d) - o = msgp.AppendInt(o, z.Quorum) + if z.Quorum == nil { + o = msgp.AppendNil(o) + } else { + o = msgp.AppendInt(o, *z.Quorum) + } return } @@ -209,24 +232,35 @@ func (z *LockArgs) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - case "Source": - z.Source, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Source") - return - } case "Owner": z.Owner, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Owner") return } - case "Quorum": - z.Quorum, bts, err = msgp.ReadIntBytes(bts) + case "Source": + z.Source, bts, err = msgp.ReadStringBytes(bts) if err != nil { - err = msgp.WrapError(err, "Quorum") + err = msgp.WrapError(err, "Source") return } + case "Quorum": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Quorum = nil + } else { + if z.Quorum == nil { + z.Quorum = new(int) + } + *z.Quorum, bts, err = msgp.ReadIntBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Quorum") + return + } + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -245,7 +279,12 @@ func (z *LockArgs) Msgsize() (s int) { for za0001 := range z.Resources { s += msgp.StringPrefixSize + len(z.Resources[za0001]) } - s += 7 + msgp.StringPrefixSize + len(z.Source) + 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.IntSize + s += 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.StringPrefixSize + len(z.Source) + 7 + if z.Quorum == nil { + s += msgp.NilSize + } else { + s += msgp.IntSize + } return } diff --git a/internal/dsync/lock-args_gen_test.go b/internal/dsync/lock-args_gen_test.go index d94a51525d2d8..c5de2cc081757 100644 --- a/internal/dsync/lock-args_gen_test.go +++ b/internal/dsync/lock-args_gen_test.go @@ -1,7 +1,7 @@ -package dsync - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package dsync + import ( "bytes" "testing" diff --git a/internal/dsync/locked_rand.go b/internal/dsync/locked_rand.go index 4c728ba439cdc..680b73842f46e 100644 --- a/internal/dsync/locked_rand.go +++ b/internal/dsync/locked_rand.go @@ -33,7 +33,7 @@ func (r *lockedRandSource) Int63() (n int64) { r.lk.Lock() n = r.src.Int63() r.lk.Unlock() - return + return n } // Seed uses the provided seed value to initialize the generator to a diff --git a/internal/dsync/utils.go b/internal/dsync/utils.go index 9debd558fa93d..6a6d2914861d6 100644 --- a/internal/dsync/utils.go +++ b/internal/dsync/utils.go @@ -22,16 +22,16 @@ import ( "time" ) -func backoffWait(min, unit, cap time.Duration) func(*rand.Rand, uint) time.Duration { +func backoffWait(minSleep, unit, maxSleep time.Duration) func(*rand.Rand, uint) time.Duration { if unit > time.Hour { // Protect against integer overflow panic("unit cannot exceed one hour") } return func(r *rand.Rand, attempt uint) time.Duration { - sleep := min + sleep := minSleep sleep += unit * time.Duration(attempt) - if sleep > cap { - sleep = cap + if sleep > maxSleep { + sleep = maxSleep } sleep -= time.Duration(r.Float64() * float64(sleep)) return sleep diff --git a/internal/etag/etag.go b/internal/etag/etag.go index b2b082771e3ea..78d0e5d4d7078 100644 --- a/internal/etag/etag.go +++ b/internal/etag/etag.go @@ -117,7 +117,6 @@ import ( "strconv" "strings" - "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/hash/sha256" xhttp "github.com/minio/minio/internal/http" "github.com/minio/sio" @@ -346,8 +345,7 @@ func Decrypt(key []byte, etag ETag) (ETag, error) { plaintext := make([]byte, 0, 16) etag, err := sio.DecryptBuffer(plaintext, etag, sio.Config{ - Key: decryptionKey, - CipherSuites: fips.DARECiphers(), + Key: decryptionKey, }) if err != nil { return nil, err @@ -389,7 +387,7 @@ func parse(s string, strict bool) (ETag, error) { // An S3 ETag may be a multipart ETag that // contains a '-' followed by a number. - // If the ETag does not a '-' is is either + // If the ETag does not a '-' is either // a singlepart or encrypted ETag. n := strings.IndexRune(s, '-') if n == -1 { diff --git a/internal/etag/etag_test.go b/internal/etag/etag_test.go index 288be862ee071..4d18d4f472f6b 100644 --- a/internal/etag/etag_test.go +++ b/internal/etag/etag_test.go @@ -18,7 +18,6 @@ package etag import ( - "context" "io" "net/http" "strings" @@ -138,7 +137,7 @@ var readerTests = []struct { // Reference values computed by: echo | m func TestReader(t *testing.T) { for i, test := range readerTests { - reader := NewReader(context.Background(), strings.NewReader(test.Content), test.ETag, nil) + reader := NewReader(t.Context(), strings.NewReader(test.Content), test.ETag, nil) if _, err := io.Copy(io.Discard, reader); err != nil { t.Fatalf("Test %d: read failed: %v", i, err) } diff --git a/internal/etag/reader.go b/internal/etag/reader.go index 9bbbd5036120d..56da3da95490a 100644 --- a/internal/etag/reader.go +++ b/internal/etag/reader.go @@ -178,9 +178,7 @@ func (u UUIDHash) Sum(b []byte) []byte { } // Reset - implement hash.Hash Reset -func (u UUIDHash) Reset() { - return -} +func (u UUIDHash) Reset() {} // Size - implement hash.Hash Size func (u UUIDHash) Size() int { diff --git a/internal/event/arn.go b/internal/event/arn.go index 4355338ba745c..6c2635603cf29 100644 --- a/internal/event/arn.go +++ b/internal/event/arn.go @@ -30,7 +30,7 @@ type ARN struct { // String - returns string representation. func (arn ARN) String() string { - if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { + if arn.ID == "" && arn.Name == "" && arn.region == "" { return "" } diff --git a/internal/event/config.go b/internal/event/config.go index dc3990184b7aa..de21764bfbfc4 100644 --- a/internal/event/config.go +++ b/internal/event/config.go @@ -30,7 +30,7 @@ import ( // ValidateFilterRuleValue - checks if given value is filter rule value or not. func ValidateFilterRuleValue(value string) error { - for _, segment := range strings.Split(value, "/") { + for segment := range strings.SplitSeq(value, "/") { if segment == "." || segment == ".." { return &ErrInvalidFilterValue{value} } @@ -139,7 +139,7 @@ func (ruleList FilterRuleList) Pattern() string { // S3Key - represents elements inside ... type S3Key struct { - RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"` + RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key"` } // MarshalXML implements a custom marshaller to support `omitempty` feature. diff --git a/internal/event/config_test.go b/internal/event/config_test.go index df0b2d9ba09f5..5190de38fca54 100644 --- a/internal/event/config_test.go +++ b/internal/event/config_test.go @@ -18,7 +18,6 @@ package event import ( - "context" "encoding/xml" "reflect" "strings" @@ -252,9 +251,9 @@ func TestQueueValidate(t *testing.T) { panic(err) } - targetList1 := NewTargetList(context.Background()) + targetList1 := NewTargetList(t.Context()) - targetList2 := NewTargetList(context.Background()) + targetList2 := NewTargetList(t.Context()) if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { panic(err) } @@ -596,9 +595,9 @@ func TestConfigValidate(t *testing.T) { panic(err) } - targetList1 := NewTargetList(context.Background()) + targetList1 := NewTargetList(t.Context()) - targetList2 := NewTargetList(context.Background()) + targetList2 := NewTargetList(t.Context()) if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { panic(err) } @@ -928,9 +927,9 @@ func TestParseConfig(t *testing.T) { `) - targetList1 := NewTargetList(context.Background()) + targetList1 := NewTargetList(t.Context()) - targetList2 := NewTargetList(context.Background()) + targetList2 := NewTargetList(t.Context()) if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { panic(err) } diff --git a/internal/event/name.go b/internal/event/name.go index 3f13cad821522..0dee9cc3b761c 100644 --- a/internal/event/name.go +++ b/internal/event/name.go @@ -47,6 +47,7 @@ const ( ObjectCreatedDeleteTagging ObjectRemovedDelete ObjectRemovedDeleteMarkerCreated + ObjectRemovedDeleteAllVersions ObjectRemovedNoOP BucketCreated BucketRemoved @@ -60,7 +61,9 @@ const ( ObjectTransitionFailed ObjectTransitionComplete ObjectManyVersions + ObjectLargeVersions PrefixManyFolders + ILMDelMarkerExpirationDelete objectSingleTypesEnd // Start Compound types that require expansion: @@ -82,7 +85,6 @@ var _ = uint64(1 << objectSingleTypesEnd) // Expand - returns expanded values of abbreviated event type. func (name Name) Expand() []Name { switch name { - case ObjectAccessedAll: return []Name{ ObjectAccessedGet, ObjectAccessedHead, @@ -100,6 +102,7 @@ func (name Name) Expand() []Name { ObjectRemovedDelete, ObjectRemovedDeleteMarkerCreated, ObjectRemovedNoOP, + ObjectRemovedDeleteAllVersions, } case ObjectReplicationAll: return []Name{ @@ -122,6 +125,7 @@ func (name Name) Expand() []Name { case ObjectScannerAll: return []Name{ ObjectManyVersions, + ObjectLargeVersions, PrefixManyFolders, } case Everything: @@ -193,6 +197,10 @@ func (name Name) String() string { return "s3:ObjectRemoved:DeleteMarkerCreated" case ObjectRemovedNoOP: return "s3:ObjectRemoved:NoOP" + case ObjectRemovedDeleteAllVersions: + return "s3:ObjectRemoved:DeleteAllVersions" + case ILMDelMarkerExpirationDelete: + return "s3:LifecycleDelMarkerExpiration:Delete" case ObjectReplicationAll: return "s3:Replication:*" case ObjectReplicationFailed: @@ -219,6 +227,9 @@ func (name Name) String() string { return "s3:ObjectTransition:Complete" case ObjectManyVersions: return "s3:Scanner:ManyVersions" + case ObjectLargeVersions: + return "s3:Scanner:LargeVersions" + case PrefixManyFolders: return "s3:Scanner:BigPrefix" } @@ -313,6 +324,10 @@ func ParseName(s string) (Name, error) { return ObjectRemovedDeleteMarkerCreated, nil case "s3:ObjectRemoved:NoOP": return ObjectRemovedNoOP, nil + case "s3:ObjectRemoved:DeleteAllVersions": + return ObjectRemovedDeleteAllVersions, nil + case "s3:LifecycleDelMarkerExpiration:Delete": + return ILMDelMarkerExpirationDelete, nil case "s3:Replication:*": return ObjectReplicationAll, nil case "s3:Replication:OperationFailedReplication": @@ -339,6 +354,8 @@ func ParseName(s string) (Name, error) { return ObjectTransitionAll, nil case "s3:Scanner:ManyVersions": return ObjectManyVersions, nil + case "s3:Scanner:LargeVersions": + return ObjectLargeVersions, nil case "s3:Scanner:BigPrefix": return PrefixManyFolders, nil default: diff --git a/internal/event/name_test.go b/internal/event/name_test.go index 130ff70368115..7bafa2ee95ccc 100644 --- a/internal/event/name_test.go +++ b/internal/event/name_test.go @@ -36,7 +36,7 @@ func TestNameExpand(t *testing.T) { ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut, ObjectCreatedPutRetention, ObjectCreatedPutLegalHold, ObjectCreatedPutTagging, ObjectCreatedDeleteTagging, }}, - {ObjectRemovedAll, []Name{ObjectRemovedDelete, ObjectRemovedDeleteMarkerCreated, ObjectRemovedNoOP}}, + {ObjectRemovedAll, []Name{ObjectRemovedDelete, ObjectRemovedDeleteMarkerCreated, ObjectRemovedNoOP, ObjectRemovedDeleteAllVersions}}, {ObjectAccessedHead, []Name{ObjectAccessedHead}}, } @@ -68,6 +68,8 @@ func TestNameString(t *testing.T) { {ObjectCreatedPut, "s3:ObjectCreated:Put"}, {ObjectRemovedAll, "s3:ObjectRemoved:*"}, {ObjectRemovedDelete, "s3:ObjectRemoved:Delete"}, + {ObjectRemovedDeleteAllVersions, "s3:ObjectRemoved:DeleteAllVersions"}, + {ILMDelMarkerExpirationDelete, "s3:LifecycleDelMarkerExpiration:Delete"}, {ObjectRemovedNoOP, "s3:ObjectRemoved:NoOP"}, {ObjectCreatedPutRetention, "s3:ObjectCreated:PutRetention"}, {ObjectCreatedPutLegalHold, "s3:ObjectCreated:PutLegalHold"}, @@ -219,6 +221,7 @@ func TestParseName(t *testing.T) { {"s3:ObjectAccessed:*", ObjectAccessedAll, false}, {"s3:ObjectRemoved:Delete", ObjectRemovedDelete, false}, {"s3:ObjectRemoved:NoOP", ObjectRemovedNoOP, false}, + {"s3:LifecycleDelMarkerExpiration:Delete", ILMDelMarkerExpirationDelete, false}, {"", blankName, true}, } diff --git a/internal/event/rules.go b/internal/event/rules.go index ccab53f41c325..0218aabc66cbe 100644 --- a/internal/event/rules.go +++ b/internal/event/rules.go @@ -20,7 +20,7 @@ package event import ( "strings" - "github.com/minio/pkg/v2/wildcard" + "github.com/minio/pkg/v3/wildcard" ) // NewPattern - create new pattern for prefix/suffix. diff --git a/internal/event/target/amqp.go b/internal/event/target/amqp.go index 3f828f6561576..8b001f5dbbea0 100644 --- a/internal/event/target/amqp.go +++ b/internal/event/target/amqp.go @@ -32,32 +32,34 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/rabbitmq/amqp091-go" ) // AMQPArgs - AMQP target arguments. type AMQPArgs struct { - Enable bool `json:"enable"` - URL xnet.URL `json:"url"` - Exchange string `json:"exchange"` - RoutingKey string `json:"routingKey"` - ExchangeType string `json:"exchangeType"` - DeliveryMode uint8 `json:"deliveryMode"` - Mandatory bool `json:"mandatory"` - Immediate bool `json:"immediate"` - Durable bool `json:"durable"` - Internal bool `json:"internal"` - NoWait bool `json:"noWait"` - AutoDeleted bool `json:"autoDeleted"` - PublisherConfirms bool `json:"publisherConfirms"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` + Enable bool `json:"enable"` + URL amqp091.URI `json:"url"` + Exchange string `json:"exchange"` + RoutingKey string `json:"routingKey"` + ExchangeType string `json:"exchangeType"` + DeliveryMode uint8 `json:"deliveryMode"` + Mandatory bool `json:"mandatory"` + Immediate bool `json:"immediate"` + Durable bool `json:"durable"` + Internal bool `json:"internal"` + NoWait bool `json:"noWait"` + AutoDeleted bool `json:"autoDeleted"` + PublisherConfirms bool `json:"publisherConfirms"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` } -//lint:file-ignore ST1003 We cannot change these exported names. - // AMQP input constants. +// +// ST1003 We cannot change these exported names. +// +//nolint:staticcheck const ( AmqpQueueDir = "queue_dir" AmqpQueueLimit = "queue_limit" @@ -276,7 +278,8 @@ func (target *AMQPTarget) send(eventData event.Event, ch *amqp091.Channel, confi // Save - saves the events to the store which will be replayed when the amqp connection is active. func (target *AMQPTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -302,7 +305,7 @@ func (target *AMQPTarget) SendFromStore(key store.Key) error { } defer ch.Close() - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -317,7 +320,7 @@ func (target *AMQPTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - does nothing and available for interface compatibility. diff --git a/internal/event/target/elasticsearch.go b/internal/event/target/elasticsearch.go index f80a48a954125..9cdd861bb18e4 100644 --- a/internal/event/target/elasticsearch.go +++ b/internal/event/target/elasticsearch.go @@ -38,7 +38,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/pkg/errors" ) @@ -81,13 +81,13 @@ func getESVersionSupportStatus(version string) (res ESSupportStatus, err error) parts := strings.Split(version, ".") if len(parts) < 1 { err = fmt.Errorf("bad ES version string: %s", version) - return + return res, err } majorVersion, err := strconv.Atoi(parts[0]) if err != nil { err = fmt.Errorf("bad ES version string: %s", version) - return + return res, err } switch { @@ -96,7 +96,7 @@ func getESVersionSupportStatus(version string) (res ESSupportStatus, err error) default: res = ESSSupported } - return + return res, err } // magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key. @@ -202,7 +202,8 @@ func (target *ElasticsearchTarget) isActive() (bool, error) { // Save - saves the events to the store if queuestore is configured, which will be replayed when the elasticsearch connection is active. func (target *ElasticsearchTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -278,7 +279,7 @@ func (target *ElasticsearchTarget) SendFromStore(key store.Key) error { return err } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -296,7 +297,7 @@ func (target *ElasticsearchTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - does nothing and available for interface compatibility. @@ -426,13 +427,13 @@ func (c *esClientV7) getServerSupportStatus(ctx context.Context) (ESSupportStatu defer resp.Body.Close() - m := make(map[string]interface{}) + m := make(map[string]any) err = json.NewDecoder(resp.Body).Decode(&m) if err != nil { return ESSUnknown, "", fmt.Errorf("unable to get ES Server version - json parse error: %v", err) } - if v, ok := m["version"].(map[string]interface{}); ok { + if v, ok := m["version"].(map[string]any); ok { if ver, ok := v["number"].(string); ok { status, err := getESVersionSupportStatus(ver) return status, ver, err @@ -453,17 +454,16 @@ func (c *esClientV7) createIndex(args ElasticsearchArgs) error { } defer res.Body.Close() - var v map[string]interface{} + var v map[string]any found := false if err := json.NewDecoder(res.Body).Decode(&v); err != nil { return fmt.Errorf("Error parsing response body: %v", err) } - indices, ok := v["indices"].([]interface{}) + indices, ok := v["indices"].([]any) if ok { for _, index := range indices { - name := index.(map[string]interface{})["name"] - if name == args.Index { + if name, ok := index.(map[string]any); ok && name["name"] == args.Index { found = true break } @@ -529,7 +529,7 @@ func (c *esClientV7) removeEntry(ctx context.Context, index string, key string) } func (c *esClientV7) updateEntry(ctx context.Context, index string, key string, eventData event.Event) error { - doc := map[string]interface{}{ + doc := map[string]any{ "Records": []event.Event{eventData}, } var buf bytes.Buffer @@ -556,7 +556,7 @@ func (c *esClientV7) updateEntry(ctx context.Context, index string, key string, } func (c *esClientV7) addEntry(ctx context.Context, index string, eventData event.Event) error { - doc := map[string]interface{}{ + doc := map[string]any{ "Records": []event.Event{eventData}, } var buf bytes.Buffer diff --git a/internal/event/target/kafka.go b/internal/event/target/kafka.go index c57837593e29c..d6af69b8bca82 100644 --- a/internal/event/target/kafka.go +++ b/internal/event/target/kafka.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "net/url" "os" "path/filepath" @@ -34,7 +35,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/IBM/sarama" saramatls "github.com/IBM/sarama/tools/tls" @@ -42,23 +43,24 @@ import ( // Kafka input constants const ( - KafkaBrokers = "brokers" - KafkaTopic = "topic" - KafkaQueueDir = "queue_dir" - KafkaQueueLimit = "queue_limit" - KafkaTLS = "tls" - KafkaTLSSkipVerify = "tls_skip_verify" - KafkaTLSClientAuth = "tls_client_auth" - KafkaSASL = "sasl" - KafkaSASLUsername = "sasl_username" - KafkaSASLPassword = "sasl_password" - KafkaSASLMechanism = "sasl_mechanism" - KafkaClientTLSCert = "client_tls_cert" - KafkaClientTLSKey = "client_tls_key" - KafkaVersion = "version" - KafkaBatchSize = "batch_size" - KafkaCompressionCodec = "compression_codec" - KafkaCompressionLevel = "compression_level" + KafkaBrokers = "brokers" + KafkaTopic = "topic" + KafkaQueueDir = "queue_dir" + KafkaQueueLimit = "queue_limit" + KafkaTLS = "tls" + KafkaTLSSkipVerify = "tls_skip_verify" + KafkaTLSClientAuth = "tls_client_auth" + KafkaSASL = "sasl" + KafkaSASLUsername = "sasl_username" + KafkaSASLPassword = "sasl_password" + KafkaSASLMechanism = "sasl_mechanism" + KafkaClientTLSCert = "client_tls_cert" + KafkaClientTLSKey = "client_tls_key" + KafkaVersion = "version" + KafkaBatchSize = "batch_size" + KafkaBatchCommitTimeout = "batch_commit_timeout" + KafkaCompressionCodec = "compression_codec" + KafkaCompressionLevel = "compression_level" EnvKafkaEnable = "MINIO_NOTIFY_KAFKA_ENABLE" EnvKafkaBrokers = "MINIO_NOTIFY_KAFKA_BROKERS" @@ -76,6 +78,7 @@ const ( EnvKafkaClientTLSKey = "MINIO_NOTIFY_KAFKA_CLIENT_TLS_KEY" EnvKafkaVersion = "MINIO_NOTIFY_KAFKA_VERSION" EnvKafkaBatchSize = "MINIO_NOTIFY_KAFKA_BATCH_SIZE" + EnvKafkaBatchCommitTimeout = "MINIO_NOTIFY_KAFKA_BATCH_COMMIT_TIMEOUT" EnvKafkaProducerCompressionCodec = "MINIO_NOTIFY_KAFKA_PRODUCER_COMPRESSION_CODEC" EnvKafkaProducerCompressionLevel = "MINIO_NOTIFY_KAFKA_PRODUCER_COMPRESSION_LEVEL" ) @@ -90,14 +93,15 @@ var codecs = map[string]sarama.CompressionCodec{ // KafkaArgs - Kafka target arguments. type KafkaArgs struct { - Enable bool `json:"enable"` - Brokers []xnet.Host `json:"brokers"` - Topic string `json:"topic"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` - Version string `json:"version"` - BatchSize uint32 `json:"batchSize"` - TLS struct { + Enable bool `json:"enable"` + Brokers []xnet.Host `json:"brokers"` + Topic string `json:"topic"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` + Version string `json:"version"` + BatchSize uint32 `json:"batchSize"` + BatchCommitTimeout time.Duration `json:"batchCommitTimeout"` + TLS struct { Enable bool `json:"enable"` RootCAs *x509.CertPool `json:"-"` SkipVerify bool `json:"skipVerify"` @@ -145,6 +149,11 @@ func (k KafkaArgs) Validate() error { return errors.New("batch should be enabled only if queue dir is enabled") } } + if k.BatchCommitTimeout > 0 { + if k.QueueDir == "" || k.BatchSize <= 1 { + return errors.New("batch commit timeout should be set only if queue dir is enabled and batch size > 1") + } + } return nil } @@ -158,7 +167,7 @@ type KafkaTarget struct { producer sarama.SyncProducer config *sarama.Config store store.Store[event.Event] - batch *store.Batch[string, *sarama.ProducerMessage] + batch *store.Batch[event.Event] loggerOnce logger.LogOnce quitCh chan struct{} } @@ -198,7 +207,11 @@ func (target *KafkaTarget) isActive() (bool, error) { // Save - saves the events to the store which will be replayed when the Kafka connection is active. func (target *KafkaTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + if target.batch != nil { + return target.batch.Add(eventData) + } + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -219,80 +232,59 @@ func (target *KafkaTarget) send(eventData event.Event) error { return err } -// SendFromStore - reads an event from store and sends it to Kafka. -func (target *KafkaTarget) SendFromStore(key store.Key) error { - if err := target.init(); err != nil { - return err - } - - // If batch is enabled, the event will be batched in memory - // and will be committed once the batch is full. - if target.batch != nil { - return target.addToBatch(key) - } - - eventData, eErr := target.store.Get(key.Name) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr +// sendMultiple sends multiple messages to the kafka. +func (target *KafkaTarget) sendMultiple(events []event.Event) error { + if target.producer == nil { + return store.ErrNotConnected } - - if err := target.send(eventData); err != nil { - if isKafkaConnErr(err) { - return store.ErrNotConnected + var msgs []*sarama.ProducerMessage + for _, event := range events { + msg, err := target.toProducerMessage(event) + if err != nil { + return err } - return err + msgs = append(msgs, msg) } - - // Delete the event from store. - return target.store.Del(key.Name) + return target.producer.SendMessages(msgs) } -func (target *KafkaTarget) addToBatch(key store.Key) error { - if target.batch.IsFull() { - if err := target.commitBatch(); err != nil { - return err - } +// SendFromStore - reads an event from store and sends it to Kafka. +func (target *KafkaTarget) SendFromStore(key store.Key) (err error) { + if err = target.init(); err != nil { + return err } - if _, ok := target.batch.GetByKey(key.Name); !ok { - eventData, err := target.store.Get(key.Name) + switch { + case key.ItemCount == 1: + var event event.Event + event, err = target.store.Get(key) if err != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and wouldve been already been sent successfully. if os.IsNotExist(err) { return nil } return err } - msg, err := target.toProducerMessage(eventData) + err = target.send(event) + case key.ItemCount > 1: + var events []event.Event + events, err = target.store.GetMultiple(key) if err != nil { + if os.IsNotExist(err) { + return nil + } return err } - if err = target.batch.Add(key.Name, msg); err != nil { - return err - } + err = target.sendMultiple(events) } - // commit the batch if the key is the last one present in the store. - if key.IsLast || target.batch.IsFull() { - return target.commitBatch() - } - return nil -} - -func (target *KafkaTarget) commitBatch() error { - keys, msgs, err := target.batch.GetAll() if err != nil { - return err - } - if err = target.producer.SendMessages(msgs); err != nil { if isKafkaConnErr(err) { return store.ErrNotConnected } return err } - return target.store.DelList(keys) + // Delete the event from store. + return target.store.Del(key) } func (target *KafkaTarget) toProducerMessage(eventData event.Event) (*sarama.ProducerMessage, error) { @@ -318,7 +310,18 @@ func (target *KafkaTarget) toProducerMessage(eventData event.Event) (*sarama.Pro func (target *KafkaTarget) Close() error { close(target.quitCh) + if target.batch != nil { + target.batch.Close() + } + if target.producer != nil { + if target.store != nil { + // It is safe to abort the current transaction if + // queue_dir is configured + target.producer.AbortTxn() + } else { + target.producer.CommitTxn() + } target.producer.Close() return target.client.Close() } @@ -331,6 +334,10 @@ func (target *KafkaTarget) init() error { } func (target *KafkaTarget) initKafka() error { + if os.Getenv("_MINIO_KAFKA_DEBUG") != "" { + sarama.DebugLogger = log.Default() + } + args := target.args config := sarama.NewConfig() @@ -437,10 +444,14 @@ func NewKafkaTarget(id string, args KafkaArgs, loggerOnce logger.LogOnce) (*Kafk loggerOnce: loggerOnce, quitCh: make(chan struct{}), } - if target.store != nil { if args.BatchSize > 1 { - target.batch = store.NewBatch[string, *sarama.ProducerMessage](args.BatchSize) + target.batch = store.NewBatch[event.Event](store.BatchConfig[event.Event]{ + Limit: args.BatchSize, + Log: loggerOnce, + Store: queueStore, + CommitTimeout: args.BatchCommitTimeout, + }) } store.StreamItems(target.store, target, target.quitCh, target.loggerOnce) } diff --git a/internal/event/target/kafka_scram_client_contrib.go b/internal/event/target/kafka_scram_client_contrib.go index 6bb02ed4174bc..a602720833edc 100644 --- a/internal/event/target/kafka_scram_client_contrib.go +++ b/internal/event/target/kafka_scram_client_contrib.go @@ -62,11 +62,11 @@ type XDGSCRAMClient struct { // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If // SASLprep fails, the method returns an error. func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + x.Client, err = x.NewClient(userName, password, authzID) if err != nil { return err } - x.ClientConversation = x.Client.NewConversation() + x.ClientConversation = x.NewConversation() return nil } @@ -77,7 +77,7 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { // completes is also an error. func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { response, err = x.ClientConversation.Step(challenge) - return + return response, err } // Done returns true if the conversation is completed or has errored. diff --git a/internal/event/target/mqtt.go b/internal/event/target/mqtt.go index 8a4b2a389b820..8f568cd3a91d2 100644 --- a/internal/event/target/mqtt.go +++ b/internal/event/target/mqtt.go @@ -33,7 +33,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) const ( @@ -180,7 +180,7 @@ func (target *MQTTTarget) SendFromStore(key store.Key) error { return err } - eventData, err := target.store.Get(key.Name) + eventData, err := target.store.Get(key) if err != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -195,14 +195,15 @@ func (target *MQTTTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Save - saves the events to the store if queuestore is configured, which will // be replayed when the mqtt connection is active. func (target *MQTTTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err diff --git a/internal/event/target/mysql.go b/internal/event/target/mysql.go index 2b6f93183814e..0f311232a59fc 100644 --- a/internal/event/target/mysql.go +++ b/internal/event/target/mysql.go @@ -35,7 +35,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) const ( @@ -198,7 +198,8 @@ func (target *MySQLTarget) isActive() (bool, error) { // Save - saves the events to the store which will be replayed when the SQL connection is active. func (target *MySQLTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -273,7 +274,7 @@ func (target *MySQLTarget) SendFromStore(key store.Key) error { } } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -291,7 +292,7 @@ func (target *MySQLTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - closes underneath connections to MySQL database. @@ -374,7 +375,7 @@ func (target *MySQLTarget) initMySQL() error { err = target.db.Ping() if err != nil { - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } } else { diff --git a/internal/event/target/mysql_test.go b/internal/event/target/mysql_test.go index 9a59b28d52b90..d8799dc7362fb 100644 --- a/internal/event/target/mysql_test.go +++ b/internal/event/target/mysql_test.go @@ -19,6 +19,7 @@ package target import ( "database/sql" + "slices" "testing" ) @@ -26,11 +27,8 @@ import ( // is registered and fails otherwise. func TestMySQLRegistration(t *testing.T) { var found bool - for _, drv := range sql.Drivers() { - if drv == "mysql" { - found = true - break - } + if slices.Contains(sql.Drivers(), "mysql") { + found = true } if !found { t.Fatal("mysql driver not registered") diff --git a/internal/event/target/nats.go b/internal/event/target/nats.go index b67ac36b77405..c96833bc45b88 100644 --- a/internal/event/target/nats.go +++ b/internal/event/target/nats.go @@ -33,26 +33,28 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/nats-io/nats.go" "github.com/nats-io/stan.go" ) // NATS related constants const ( - NATSAddress = "address" - NATSSubject = "subject" - NATSUsername = "username" - NATSPassword = "password" - NATSToken = "token" - NATSTLS = "tls" - NATSTLSSkipVerify = "tls_skip_verify" - NATSPingInterval = "ping_interval" - NATSQueueDir = "queue_dir" - NATSQueueLimit = "queue_limit" - NATSCertAuthority = "cert_authority" - NATSClientCert = "client_cert" - NATSClientKey = "client_key" + NATSAddress = "address" + NATSSubject = "subject" + NATSUsername = "username" + NATSPassword = "password" + NATSToken = "token" + NATSNKeySeed = "nkey_seed" + NATSTLS = "tls" + NATSTLSSkipVerify = "tls_skip_verify" + NATSTLSHandshakeFirst = "tls_handshake_first" + NATSPingInterval = "ping_interval" + NATSQueueDir = "queue_dir" + NATSQueueLimit = "queue_limit" + NATSCertAuthority = "cert_authority" + NATSClientCert = "client_cert" + NATSClientKey = "client_key" // Streaming constants - deprecated NATSStreaming = "streaming" @@ -63,21 +65,23 @@ const ( // JetStream constants NATSJetStream = "jetstream" - EnvNATSEnable = "MINIO_NOTIFY_NATS_ENABLE" - EnvNATSAddress = "MINIO_NOTIFY_NATS_ADDRESS" - EnvNATSSubject = "MINIO_NOTIFY_NATS_SUBJECT" - EnvNATSUsername = "MINIO_NOTIFY_NATS_USERNAME" - NATSUserCredentials = "MINIO_NOTIFY_NATS_USER_CREDENTIALS" - EnvNATSPassword = "MINIO_NOTIFY_NATS_PASSWORD" - EnvNATSToken = "MINIO_NOTIFY_NATS_TOKEN" - EnvNATSTLS = "MINIO_NOTIFY_NATS_TLS" - EnvNATSTLSSkipVerify = "MINIO_NOTIFY_NATS_TLS_SKIP_VERIFY" - EnvNATSPingInterval = "MINIO_NOTIFY_NATS_PING_INTERVAL" - EnvNATSQueueDir = "MINIO_NOTIFY_NATS_QUEUE_DIR" - EnvNATSQueueLimit = "MINIO_NOTIFY_NATS_QUEUE_LIMIT" - EnvNATSCertAuthority = "MINIO_NOTIFY_NATS_CERT_AUTHORITY" - EnvNATSClientCert = "MINIO_NOTIFY_NATS_CLIENT_CERT" - EnvNATSClientKey = "MINIO_NOTIFY_NATS_CLIENT_KEY" + EnvNATSEnable = "MINIO_NOTIFY_NATS_ENABLE" + EnvNATSAddress = "MINIO_NOTIFY_NATS_ADDRESS" + EnvNATSSubject = "MINIO_NOTIFY_NATS_SUBJECT" + EnvNATSUsername = "MINIO_NOTIFY_NATS_USERNAME" + NATSUserCredentials = "MINIO_NOTIFY_NATS_USER_CREDENTIALS" + EnvNATSPassword = "MINIO_NOTIFY_NATS_PASSWORD" + EnvNATSToken = "MINIO_NOTIFY_NATS_TOKEN" + EnvNATSNKeySeed = "MINIO_NOTIFY_NATS_NKEY_SEED" + EnvNATSTLS = "MINIO_NOTIFY_NATS_TLS" + EnvNATSTLSSkipVerify = "MINIO_NOTIFY_NATS_TLS_SKIP_VERIFY" + EnvNatsTLSHandshakeFirst = "MINIO_NOTIFY_NATS_TLS_HANDSHAKE_FIRST" + EnvNATSPingInterval = "MINIO_NOTIFY_NATS_PING_INTERVAL" + EnvNATSQueueDir = "MINIO_NOTIFY_NATS_QUEUE_DIR" + EnvNATSQueueLimit = "MINIO_NOTIFY_NATS_QUEUE_LIMIT" + EnvNATSCertAuthority = "MINIO_NOTIFY_NATS_CERT_AUTHORITY" + EnvNATSClientCert = "MINIO_NOTIFY_NATS_CLIENT_CERT" + EnvNATSClientKey = "MINIO_NOTIFY_NATS_CLIENT_KEY" // Streaming constants - deprecated EnvNATSStreaming = "MINIO_NOTIFY_NATS_STREAMING" @@ -91,23 +95,25 @@ const ( // NATSArgs - NATS target arguments. type NATSArgs struct { - Enable bool `json:"enable"` - Address xnet.Host `json:"address"` - Subject string `json:"subject"` - Username string `json:"username"` - UserCredentials string `json:"userCredentials"` - Password string `json:"password"` - Token string `json:"token"` - TLS bool `json:"tls"` - TLSSkipVerify bool `json:"tlsSkipVerify"` - Secure bool `json:"secure"` - CertAuthority string `json:"certAuthority"` - ClientCert string `json:"clientCert"` - ClientKey string `json:"clientKey"` - PingInterval int64 `json:"pingInterval"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` - JetStream struct { + Enable bool `json:"enable"` + Address xnet.Host `json:"address"` + Subject string `json:"subject"` + Username string `json:"username"` + UserCredentials string `json:"userCredentials"` + Password string `json:"password"` + Token string `json:"token"` + NKeySeed string `json:"nKeySeed"` + TLS bool `json:"tls"` + TLSSkipVerify bool `json:"tlsSkipVerify"` + TLSHandshakeFirst bool `json:"tlsHandshakeFirst"` + Secure bool `json:"secure"` + CertAuthority string `json:"certAuthority"` + ClientCert string `json:"clientCert"` + ClientKey string `json:"clientKey"` + PingInterval int64 `json:"pingInterval"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` + JetStream struct { Enable bool `json:"enable"` } `json:"jetStream"` Streaming struct { @@ -175,11 +181,21 @@ func (n NATSArgs) connectNats() (*nats.Conn, error) { if n.Token != "" { connOpts = append(connOpts, nats.Token(n.Token)) } + if n.NKeySeed != "" { + nkeyOpt, err := nats.NkeyOptionFromSeed(n.NKeySeed) + if err != nil { + return nil, err + } + connOpts = append(connOpts, nkeyOpt) + } if n.Secure || n.TLS && n.TLSSkipVerify { connOpts = append(connOpts, nats.Secure(nil)) } else if n.TLS { connOpts = append(connOpts, nats.Secure(&tls.Config{RootCAs: n.RootCAs})) } + if n.TLSHandshakeFirst { + connOpts = append(connOpts, nats.TLSHandshakeFirst()) + } if n.CertAuthority != "" { connOpts = append(connOpts, nats.RootCAs(n.CertAuthority)) } @@ -299,7 +315,8 @@ func (target *NATSTarget) isActive() (bool, error) { // Save - saves the events to the store which will be replayed when the Nats connection is active. func (target *NATSTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { @@ -353,7 +370,7 @@ func (target *NATSTarget) SendFromStore(key store.Key) error { return err } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -367,7 +384,7 @@ func (target *NATSTarget) SendFromStore(key store.Key) error { return err } - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - closes underneath connections to NATS server. diff --git a/internal/event/target/nats_contrib_test.go b/internal/event/target/nats_contrib_test.go index 9d2cb9501a821..42a5f0609905e 100644 --- a/internal/event/target/nats_contrib_test.go +++ b/internal/event/target/nats_contrib_test.go @@ -19,7 +19,9 @@ package target import ( "testing" - xnet "github.com/minio/pkg/v2/net" + "github.com/nats-io/nats-server/v2/server" + + xnet "github.com/minio/pkg/v3/net" natsserver "github.com/nats-io/nats-server/v2/test" ) @@ -96,3 +98,34 @@ func TestNatsConnToken(t *testing.T) { } defer con.Close() } + +func TestNatsConnNKeySeed(t *testing.T) { + opts := natsserver.DefaultTestOptions + opts.Port = 14223 + opts.Nkeys = []*server.NkeyUser{ + { + // Not a real NKey + // Taken from https://docs.nats.io/running-a-nats-service/configuration/securing_nats/auth_intro/nkey_auth + Nkey: "UDXU4RCSJNZOIQHZNWXHXORDPRTGNJAHAHFRGZNEEJCPQTT2M7NLCNF4", + }, + } + s := natsserver.RunServer(&opts) + defer s.Shutdown() + + clientConfig := &NATSArgs{ + Enable: true, + Address: xnet.Host{ + Name: "localhost", + Port: (xnet.Port(opts.Port)), + IsPortSet: true, + }, + Subject: "test", + NKeySeed: "testdata/contrib/test.nkey", + } + + con, err := clientConfig.connectNats() + if err != nil { + t.Errorf("Could not connect to nats: %v", err) + } + defer con.Close() +} diff --git a/internal/event/target/nats_tls_contrib_test.go b/internal/event/target/nats_tls_contrib_test.go index a89ecb2222084..30cf5b46b9b23 100644 --- a/internal/event/target/nats_tls_contrib_test.go +++ b/internal/event/target/nats_tls_contrib_test.go @@ -21,7 +21,7 @@ import ( "path/filepath" "testing" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" natsserver "github.com/nats-io/nats-server/v2/test" ) @@ -48,6 +48,30 @@ func TestNatsConnTLSCustomCA(t *testing.T) { defer con.Close() } +func TestNatsConnTLSCustomCAHandshakeFirst(t *testing.T) { + s, opts := natsserver.RunServerWithConfig(filepath.Join("testdata", "contrib", "nats_tls_handshake_first.conf")) + defer s.Shutdown() + + clientConfig := &NATSArgs{ + Enable: true, + Address: xnet.Host{ + Name: "localhost", + Port: (xnet.Port(opts.Port)), + IsPortSet: true, + }, + Subject: "test", + Secure: true, + CertAuthority: path.Join("testdata", "contrib", "certs", "root_ca_cert.pem"), + TLSHandshakeFirst: true, + } + + con, err := clientConfig.connectNats() + if err != nil { + t.Errorf("Could not connect to nats: %v", err) + } + defer con.Close() +} + func TestNatsConnTLSClientAuthorization(t *testing.T) { s, opts := natsserver.RunServerWithConfig(filepath.Join("testdata", "contrib", "nats_tls_client_cert.conf")) defer s.Shutdown() diff --git a/internal/event/target/nsq.go b/internal/event/target/nsq.go index e8f68e2c10d4c..ec04d9937470b 100644 --- a/internal/event/target/nsq.go +++ b/internal/event/target/nsq.go @@ -33,7 +33,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // NSQ constants @@ -147,7 +147,8 @@ func (target *NSQTarget) isActive() (bool, error) { // Save - saves the events to the store which will be replayed when the nsq connection is active. func (target *NSQTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { @@ -188,7 +189,7 @@ func (target *NSQTarget) SendFromStore(key store.Key) error { return err } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -203,7 +204,7 @@ func (target *NSQTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - closes underneath connections to NSQD server. @@ -242,7 +243,7 @@ func (target *NSQTarget) initNSQ() error { err = target.producer.Ping() if err != nil { // To treat "connection refused" errors as errNotConnected. - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } target.producer.Stop() diff --git a/internal/event/target/nsq_test.go b/internal/event/target/nsq_test.go index 0b225ac72b70b..32926ab58899a 100644 --- a/internal/event/target/nsq_test.go +++ b/internal/event/target/nsq_test.go @@ -20,7 +20,7 @@ package target import ( "testing" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) func TestNSQArgs_Validate(t *testing.T) { diff --git a/internal/event/target/postgresql.go b/internal/event/target/postgresql.go index bb10fdaf60c24..9bd9a886f2f95 100644 --- a/internal/event/target/postgresql.go +++ b/internal/event/target/postgresql.go @@ -26,9 +26,11 @@ import ( "net/url" "os" "path/filepath" + "regexp" "strconv" "strings" "time" + "unicode" _ "github.com/lib/pq" // Register postgres driver @@ -36,7 +38,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) const ( @@ -101,6 +103,10 @@ func (p PostgreSQLArgs) Validate() error { if p.Table == "" { return fmt.Errorf("empty table name") } + if err := validatePsqlTableName(p.Table); err != nil { + return err + } + if p.Format != "" { f := strings.ToLower(p.Format) if f != event.NamespaceFormat && f != event.AccessFormat { @@ -190,7 +196,8 @@ func (target *PostgreSQLTarget) isActive() (bool, error) { // Save - saves the events to the store if questore is configured, which will be replayed when the PostgreSQL connection is active. func (target *PostgreSQLTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { @@ -269,7 +276,7 @@ func (target *PostgreSQLTarget) SendFromStore(key store.Key) error { } } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and wouldve been already been sent successfully. @@ -287,7 +294,7 @@ func (target *PostgreSQLTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - closes underneath connections to PostgreSQL database. @@ -369,7 +376,7 @@ func (target *PostgreSQLTarget) initPostgreSQL() error { err = target.db.Ping() if err != nil { - if !(xnet.IsConnRefusedErr(err) || xnet.IsConnResetErr(err)) { + if !xnet.IsConnRefusedErr(err) && !xnet.IsConnResetErr(err) { target.loggerOnce(context.Background(), err, target.ID().String()) } } else { @@ -444,3 +451,43 @@ func NewPostgreSQLTarget(id string, args PostgreSQLArgs, loggerOnce logger.LogOn return target, nil } + +var errInvalidPsqlTablename = errors.New("invalid PostgreSQL table") + +func validatePsqlTableName(name string) error { + // check for quoted string (string may not contain a quote) + if match, err := regexp.MatchString("^\"[^\"]+\"$", name); err != nil { + return err + } else if match { + return nil + } + + // normalize the name to letters, digits, _ or $ + valid := true + cleaned := strings.Map(func(r rune) rune { + switch { + case unicode.IsLetter(r): + return 'a' + case unicode.IsDigit(r): + return '0' + case r == '_', r == '$': + return r + default: + valid = false + return -1 + } + }, name) + + if valid { + // check for simple name or quoted name + // - letter/underscore followed by one or more letter/digit/underscore + // - any text between quotes (text cannot contain a quote itself) + if match, err := regexp.MatchString("^[a_][a0_$]*$", cleaned); err != nil { + return err + } else if match { + return nil + } + } + + return errInvalidPsqlTablename +} diff --git a/internal/event/target/postgresql_test.go b/internal/event/target/postgresql_test.go index 0ec94f6f18dd1..9b5130e2ebba7 100644 --- a/internal/event/target/postgresql_test.go +++ b/internal/event/target/postgresql_test.go @@ -19,6 +19,7 @@ package target import ( "database/sql" + "slices" "testing" ) @@ -26,13 +27,26 @@ import ( // is registered and fails otherwise. func TestPostgreSQLRegistration(t *testing.T) { var found bool - for _, drv := range sql.Drivers() { - if drv == "postgres" { - found = true - break - } + if slices.Contains(sql.Drivers(), "postgres") { + found = true } if !found { t.Fatal("postgres driver not registered") } } + +func TestPsqlTableNameValidation(t *testing.T) { + validTables := []string{"táblë", "table", "TableName", "\"Table name\"", "\"✅✅\"", "table$one", "\"táblë\""} + invalidTables := []string{"table name", "table \"name\"", "✅✅", "$table$"} + + for _, name := range validTables { + if err := validatePsqlTableName(name); err != nil { + t.Errorf("Should be valid: %s - %s", name, err) + } + } + for _, name := range invalidTables { + if err := validatePsqlTableName(name); err != errInvalidPsqlTablename { + t.Errorf("Should be invalid: %s - %s", name, err) + } + } +} diff --git a/internal/event/target/redis.go b/internal/event/target/redis.go index a52bb09f7cb00..8f7d4272971aa 100644 --- a/internal/event/target/redis.go +++ b/internal/event/target/redis.go @@ -33,7 +33,7 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // Redis constants @@ -41,6 +41,7 @@ const ( RedisFormat = "format" RedisAddress = "address" RedisPassword = "password" + RedisUser = "user" RedisKey = "key" RedisQueueDir = "queue_dir" RedisQueueLimit = "queue_limit" @@ -49,6 +50,7 @@ const ( EnvRedisFormat = "MINIO_NOTIFY_REDIS_FORMAT" EnvRedisAddress = "MINIO_NOTIFY_REDIS_ADDRESS" EnvRedisPassword = "MINIO_NOTIFY_REDIS_PASSWORD" + EnvRedisUser = "MINIO_NOTIFY_REDIS_USER" EnvRedisKey = "MINIO_NOTIFY_REDIS_KEY" EnvRedisQueueDir = "MINIO_NOTIFY_REDIS_QUEUE_DIR" EnvRedisQueueLimit = "MINIO_NOTIFY_REDIS_QUEUE_LIMIT" @@ -60,6 +62,7 @@ type RedisArgs struct { Format string `json:"format"` Addr xnet.Host `json:"address"` Password string `json:"password"` + User string `json:"user"` Key string `json:"key"` QueueDir string `json:"queueDir"` QueueLimit uint64 `json:"queueLimit"` @@ -170,7 +173,8 @@ func (target *RedisTarget) isActive() (bool, error) { // Save - saves the events to the store if questore is configured, which will be replayed when the redis connection is active. func (target *RedisTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -249,7 +253,7 @@ func (target *RedisTarget) SendFromStore(key store.Key) error { target.firstPing = true } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and would've been already been sent successfully. @@ -267,7 +271,7 @@ func (target *RedisTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - releases the resources used by the pool. @@ -289,7 +293,7 @@ func (target *RedisTarget) initRedis() error { _, pingErr := conn.Do("PING") if pingErr != nil { - if !(xnet.IsConnRefusedErr(pingErr) || xnet.IsConnResetErr(pingErr)) { + if !xnet.IsConnRefusedErr(pingErr) && !xnet.IsConnResetErr(pingErr) { target.loggerOnce(context.Background(), pingErr, target.ID().String()) } return pingErr @@ -334,9 +338,16 @@ func NewRedisTarget(id string, args RedisArgs, loggerOnce logger.LogOnce) (*Redi } if args.Password != "" { - if _, err = conn.Do("AUTH", args.Password); err != nil { - conn.Close() - return nil, err + if args.User != "" { + if _, err = conn.Do("AUTH", args.User, args.Password); err != nil { + conn.Close() + return nil, err + } + } else { + if _, err = conn.Do("AUTH", args.Password); err != nil { + conn.Close() + return nil, err + } } } diff --git a/internal/event/target/testdata/contrib/nats_tls_handshake_first.conf b/internal/event/target/testdata/contrib/nats_tls_handshake_first.conf new file mode 100644 index 0000000000000..069eac489a121 --- /dev/null +++ b/internal/event/target/testdata/contrib/nats_tls_handshake_first.conf @@ -0,0 +1,8 @@ +port: 14227 +net: localhost + +tls { + cert_file: "./testdata/contrib/certs/nats_server_cert.pem" + key_file: "./testdata/contrib/certs/nats_server_key.pem" + handshake_first: true +} diff --git a/internal/event/target/testdata/contrib/test.nkey b/internal/event/target/testdata/contrib/test.nkey new file mode 100644 index 0000000000000..e75f2719bb8f2 --- /dev/null +++ b/internal/event/target/testdata/contrib/test.nkey @@ -0,0 +1 @@ +SUACSSL3UAHUDXKFSNVUZRF5UHPMWZ6BFDTJ7M6USDXIEDNPPQYYYCU3VY \ No newline at end of file diff --git a/internal/event/target/webhook.go b/internal/event/target/webhook.go index 9a723eb7d5079..e5dc4f6996a86 100644 --- a/internal/event/target/webhook.go +++ b/internal/event/target/webhook.go @@ -38,8 +38,8 @@ import ( "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - "github.com/minio/pkg/v2/certs" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/certs" + xnet "github.com/minio/pkg/v3/net" ) // Webhook constants @@ -146,7 +146,8 @@ func (target *WebhookTarget) isActive() (bool, error) { // which will be replayed when the webhook connection is active. func (target *WebhookTarget) Save(eventData event.Event) error { if target.store != nil { - return target.store.Put(eventData) + _, err := target.store.Put(eventData) + return err } if err := target.init(); err != nil { return err @@ -196,13 +197,15 @@ func (target *WebhookTarget) send(eventData event.Event) error { if err != nil { return err } - defer xhttp.DrainBody(resp.Body) + xhttp.DrainBody(resp.Body) - if resp.StatusCode < 200 || resp.StatusCode > 299 { - return fmt.Errorf("sending event failed with %v", resp.Status) + if resp.StatusCode >= 200 && resp.StatusCode <= 299 { + // accepted HTTP status codes. + return nil + } else if resp.StatusCode == http.StatusForbidden { + return fmt.Errorf("%s returned '%s', please check if your auth token is correctly set", target.args.Endpoint, resp.Status) } - - return nil + return fmt.Errorf("%s returned '%s', please check your endpoint configuration", target.args.Endpoint, resp.Status) } // SendFromStore - reads an event from store and sends it to webhook. @@ -211,7 +214,7 @@ func (target *WebhookTarget) SendFromStore(key store.Key) error { return err } - eventData, eErr := target.store.Get(key.Name) + eventData, eErr := target.store.Get(key) if eErr != nil { // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() // Such events will not exist and would've been already been sent successfully. @@ -229,7 +232,7 @@ func (target *WebhookTarget) SendFromStore(key store.Key) error { } // Delete the event from store. - return target.store.Del(key.Name) + return target.store.Del(key) } // Close - does nothing and available for interface compatibility. diff --git a/internal/event/targetidset.go b/internal/event/targetidset.go index 34cee4ddc3caf..88fa917708d5f 100644 --- a/internal/event/targetidset.go +++ b/internal/event/targetidset.go @@ -17,20 +17,15 @@ package event +import "maps" + // TargetIDSet - Set representation of TargetIDs. type TargetIDSet map[TargetID]struct{} -// IsEmpty returns true if the set is empty. -func (set TargetIDSet) IsEmpty() bool { - return len(set) != 0 -} - // Clone - returns copy of this set. func (set TargetIDSet) Clone() TargetIDSet { setCopy := NewTargetIDSet() - for k, v := range set { - setCopy[k] = v - } + maps.Copy(setCopy, set) return setCopy } diff --git a/internal/event/targetlist.go b/internal/event/targetlist.go index 5d261cd6cfd35..3aeee5d26197a 100644 --- a/internal/event/targetlist.go +++ b/internal/event/targetlist.go @@ -20,16 +20,19 @@ package event import ( "context" "fmt" + "maps" "runtime" "sync" "sync/atomic" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/store" - "github.com/minio/pkg/v2/workers" + "github.com/minio/pkg/v3/workers" ) const ( + logSubsys = "notify" + // The maximum allowed number of concurrent Send() calls to all configured notifications targets maxConcurrentAsyncSend = 50000 ) @@ -112,7 +115,6 @@ func (list *TargetList) incCurrentSendCalls(id TargetID) { stats.currentSendCalls++ list.targetStats[id] = stats - return } func (list *TargetList) decCurrentSendCalls(id TargetID) { @@ -127,7 +129,6 @@ func (list *TargetList) decCurrentSendCalls(id TargetID) { stats.currentSendCalls-- list.targetStats[id] = stats - return } func (list *TargetList) incFailedEvents(id TargetID) { @@ -141,7 +142,6 @@ func (list *TargetList) incFailedEvents(id TargetID) { stats.failedEvents++ list.targetStats[id] = stats - return } func (list *TargetList) incTotalEvents(id TargetID) { @@ -155,7 +155,6 @@ func (list *TargetList) incTotalEvents(id TargetID) { stats.totalEvents++ list.targetStats[id] = stats - return } type asyncEvent struct { @@ -254,9 +253,7 @@ func (list *TargetList) TargetMap() map[TargetID]Target { defer list.RUnlock() ntargets := make(map[TargetID]Target, len(list.targets)) - for k, v := range list.targets { - ntargets[k] = v - } + maps.Copy(ntargets, list.targets) return ntargets } @@ -290,7 +287,7 @@ func (list *TargetList) sendSync(event Event, targetIDset TargetIDSet) { list.incFailedEvents(id) reqInfo := &logger.ReqInfo{} reqInfo.AppendTags("targetID", id.String()) - logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String()) + logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String()) } }(id, target) } @@ -309,11 +306,11 @@ func (list *TargetList) sendAsync(event Event, targetIDset TargetIDSet) { return default: list.eventsSkipped.Add(1) - err := fmt.Errorf("concurrent target notifications exceeded %d, notification endpoint is too slow to accept events on incoming requests", maxConcurrentAsyncSend) + err := fmt.Errorf("concurrent target notifications exceeded %d, configured notification target is too slow to accept events for the incoming request rate", maxConcurrentAsyncSend) for id := range targetIDset { reqInfo := &logger.ReqInfo{} reqInfo.AppendTags("targetID", id.String()) - logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), err, id.String()) + logger.LogOnceIf(logger.SetReqInfo(context.Background(), reqInfo), logSubsys, err, id.String()) } return } @@ -359,7 +356,7 @@ func (list *TargetList) startSendWorkers(workerCount int) { if err != nil { panic(err) } - for i := 0; i < workerCount; i++ { + for range workerCount { wk.Take() go func() { defer wk.Give() diff --git a/internal/event/targetlist_test.go b/internal/event/targetlist_test.go index 51b9678b5acd5..f6aed6584b46d 100644 --- a/internal/event/targetlist_test.go +++ b/internal/event/targetlist_test.go @@ -18,7 +18,6 @@ package event import ( - "context" "crypto/rand" "errors" "reflect" @@ -86,14 +85,14 @@ func (target ExampleTarget) FlushQueueStore() error { } func TestTargetListAdd(t *testing.T) { - targetListCase1 := NewTargetList(context.Background()) + targetListCase1 := NewTargetList(t.Context()) - targetListCase2 := NewTargetList(context.Background()) + targetListCase2 := NewTargetList(t.Context()) if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { panic(err) } - targetListCase3 := NewTargetList(context.Background()) + targetListCase3 := NewTargetList(t.Context()) if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { panic(err) } @@ -141,14 +140,14 @@ func TestTargetListAdd(t *testing.T) { } func TestTargetListExists(t *testing.T) { - targetListCase1 := NewTargetList(context.Background()) + targetListCase1 := NewTargetList(t.Context()) - targetListCase2 := NewTargetList(context.Background()) + targetListCase2 := NewTargetList(t.Context()) if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { panic(err) } - targetListCase3 := NewTargetList(context.Background()) + targetListCase3 := NewTargetList(t.Context()) if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { panic(err) } @@ -173,14 +172,14 @@ func TestTargetListExists(t *testing.T) { } func TestTargetListList(t *testing.T) { - targetListCase1 := NewTargetList(context.Background()) + targetListCase1 := NewTargetList(t.Context()) - targetListCase2 := NewTargetList(context.Background()) + targetListCase2 := NewTargetList(t.Context()) if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { panic(err) } - targetListCase3 := NewTargetList(context.Background()) + targetListCase3 := NewTargetList(t.Context()) if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { panic(err) } @@ -220,7 +219,7 @@ func TestTargetListList(t *testing.T) { } func TestNewTargetList(t *testing.T) { - if result := NewTargetList(context.Background()); result == nil { + if result := NewTargetList(t.Context()); result == nil { t.Fatalf("test: result: expected: , got: ") } } diff --git a/internal/fips/fips.go b/internal/fips/fips.go deleted file mode 100644 index 94b3ed00c52d9..0000000000000 --- a/internal/fips/fips.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -//go:build fips && linux && amd64 -// +build fips,linux,amd64 - -package fips - -const enabled = true diff --git a/internal/fips/go19.go b/internal/fips/go19.go deleted file mode 100644 index 2f61bcab8fccb..0000000000000 --- a/internal/fips/go19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -//go:build go1.19 -// +build go1.19 - -package fips - -const go19 = true diff --git a/internal/fips/no_fips.go b/internal/fips/no_fips.go deleted file mode 100644 index 96cfd3aa87f0f..0000000000000 --- a/internal/fips/no_fips.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -//go:build !fips -// +build !fips - -package fips - -const enabled = false diff --git a/internal/fips/no_go19.go b/internal/fips/no_go19.go deleted file mode 100644 index 5879bf9d7130c..0000000000000 --- a/internal/fips/no_go19.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -//go:build !go1.19 -// +build !go1.19 - -package fips - -const go19 = false diff --git a/internal/grid/README.md b/internal/grid/README.md index e43d44aa5aafc..d098b24fe1a1d 100644 --- a/internal/grid/README.md +++ b/internal/grid/README.md @@ -23,7 +23,7 @@ and other connections will be blocked while the large payload is being sent. ## Handlers & Routes -Handlers have a predefined Handler ID. +Handlers have a predefined Handler ID. In addition, there can be several *static* subroutes used to differentiate between different handlers of the same ID. A subroute on a client must match a subroute on the server. So routes cannot be used for dynamic routing, unlike HTTP. @@ -33,14 +33,14 @@ Handlers should remain backwards compatible. If a breaking API change is require A **Manager** is used to manage all incoming and outgoing connections to a server. -On startup all remote servers must be specified. -From that individual connections will be spawned to each remote server, +On startup all remote servers must be specified. +From that individual connections will be spawned to each remote server, or incoming requests will be hooked up to the appropriate connection. To get a connection to a specific server, use `Manager.Connection(host)` to get a connection to the specified host. From this connection individual requests can be made. -Each handler, with optional subroutes can be registered with the manager using +Each handler, with optional subroutes can be registered with the manager using `Manager.RegisterXHandler(handlerID, handler, subroutes...)`. A `Handler()` function provides an HTTP handler, which should be hooked up to the appropriate route on the server. @@ -75,7 +75,7 @@ Sample call: ```go // Get a connection to the remote host conn := manager.Connection(host) - + payload := []byte("request") response, err := conn.SingleRequest(ctx, grid.HandlerDiskInfo, payload) ``` @@ -85,7 +85,7 @@ If the error type is `*RemoteErr`, then the error was returned by the remote ser Context timeouts are propagated, and a default timeout of 1 minute is added if none is specified. There is no cancellation propagation for single payload requests. -When the context is canceled, the request will return at once with an appropriate error. +When the context is canceled, the request will return at once with an appropriate error. However, the remote call will not see the cancellation - as can be seen from the 'missing' context on the handler. The result will be discarded. @@ -102,14 +102,14 @@ In the examples we use a `MSS` type, which is a `map[string]string` that is `msg // Do something with payload return NewMSSWith(map[string]string{"result": "ok"}), nil } - + // Create a typed handler. // Due to current generics limitations, a constructor of the empty type must be provided. instance := grid.NewSingleHandler[*grid.MSS, *grid.MSS](h, grid.NewMSS, grid.NewMSS) - + // Register the handler on the manager instance.Register(manager, handler) - + // The typed instance is also used for calls conn := manager.Connection("host") resp, err := instance.Call(ctx, conn, grid.NewMSSWith(map[string]string{"myfield": "myvalue"})) @@ -118,7 +118,7 @@ In the examples we use a `MSS` type, which is a `map[string]string` that is `msg } ``` -The wrapper will handle all serialization and de-seralization of the request and response, +The wrapper will handle all serialization and de-serialization of the request and response, and furthermore provides reuse of the structs used for the request and response. Note that Responses sent for serialization are automatically reused for similar requests. @@ -143,7 +143,7 @@ Sample handler: case req, ok := <-in: if !ok { break - } + } // Do something with payload out <- []byte("response") @@ -167,7 +167,7 @@ Sample call: ```go // Get a connection to the remote host conn := manager.Connection(host).Subroute("asubroute") - + payload := []byte("request") stream, err := conn.NewStream(ctx, grid.HandlerDiskInfo, payload) if err != nil { @@ -183,12 +183,12 @@ Sample call: }) ``` -Context cancellation and timeouts are propagated to the handler. +Context cancellation and timeouts are propagated to the handler. The client does not wait for the remote handler to finish before returning. Returning any error will also cancel the stream remotely. CAREFUL: When utilizing two-way communication, it is important to ensure that the remote handler is not blocked on a send. -If the remote handler is blocked on a send, and the client is trying to send without the remote receiving, +If the remote handler is blocked on a send, and the client is trying to send without the remote receiving, the operation would become deadlocked if the channels are full. ### Typed handlers @@ -215,24 +215,24 @@ Typed handlers are handlers that have a specific type for the request and respon // out is closed by the caller and should never be closed by the handler. return nil } - + // Create a typed handler. // Due to current generics limitations, a constructor of the empty type must be provided. instance := grid.NewStream[*Payload, *Req, *Resp](h, newPayload, newReq, newResp) - + // Tweakable options instance.WithPayload = true // default true when newPayload != nil instance.OutCapacity = 1 // default instance.InCapacity = 1 // default true when newReq != nil - + // Register the handler on the manager instance.Register(manager, handler, "asubroute") - + // The typed instance is also used for calls conn := manager.Connection("host").Subroute("asubroute") stream, err := instance.Call(ctx, conn, &Payload{"request payload"}) if err != nil { ... } - + // Read results from the stream err = stream.Results(func(resp *Resp) error { fmt.Println("Got result", resp) diff --git a/internal/grid/benchmark_test.go b/internal/grid/benchmark_test.go index 54feb9aa206fa..9bd17d1a9a4ba 100644 --- a/internal/grid/benchmark_test.go +++ b/internal/grid/benchmark_test.go @@ -78,7 +78,7 @@ func benchmarkGridRequests(b *testing.B, n int) { for par := 1; par <= 32; par *= 2 { b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) { defer timeout(60 * time.Second)() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) defer cancel() b.ReportAllocs() b.SetBytes(int64(len(payload) * 2)) @@ -135,7 +135,7 @@ func benchmarkGridRequests(b *testing.B, n int) { for par := 1; par <= 32; par *= 2 { b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) { defer timeout(60 * time.Second)() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) defer cancel() b.ReportAllocs() b.ResetTimer() @@ -200,6 +200,7 @@ func BenchmarkStream(b *testing.B) { }{ {name: "request", fn: benchmarkGridStreamReqOnly}, {name: "responses", fn: benchmarkGridStreamRespOnly}, + {name: "twoway", fn: benchmarkGridStreamTwoway}, } for _, test := range tests { b.Run(test.name, func(b *testing.B) { @@ -230,7 +231,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) { errFatal(remote.RegisterStreamingHandler(handlerTest, StreamHandler{ // Send 10x response. Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, out chan<- []byte) *RemoteErr { - for i := 0; i < responses; i++ { + for i := range responses { toSend := GetByteBuffer()[:0] toSend = append(toSend, byte(i)) toSend = append(toSend, payload...) @@ -284,7 +285,7 @@ func benchmarkGridStreamRespOnly(b *testing.B, n int) { if conn == nil { b.Fatal("No connection") } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) // Send the payload. t := time.Now() st, err := conn.NewStream(ctx, handlerTest, payload) @@ -395,7 +396,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) { if conn == nil { b.Fatal("No connection") } - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) // Send the payload. t := time.Now() st, err := conn.NewStream(ctx, handlerTest, payload) @@ -406,7 +407,7 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) { b.Fatal(err.Error()) } got := 0 - for i := 0; i < requests; i++ { + for range requests { got++ st.Requests <- append(GetByteBuffer()[:0], payload...) } @@ -438,3 +439,129 @@ func benchmarkGridStreamReqOnly(b *testing.B, n int) { }) } } + +func benchmarkGridStreamTwoway(b *testing.B, n int) { + defer testlogger.T.SetErrorTB(b)() + + errFatal := func(err error) { + b.Helper() + if err != nil { + b.Fatal(err) + } + } + grid, err := SetupTestGrid(n) + errFatal(err) + b.Cleanup(grid.Cleanup) + const messages = 10 + // Create n managers. + const payloadSize = 512 + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + payload := make([]byte, payloadSize) + _, err = rng.Read(payload) + errFatal(err) + + for _, remote := range grid.Managers { + // Register a single handler which echos the payload. + errFatal(remote.RegisterStreamingHandler(handlerTest, StreamHandler{ + // Send 10x requests. + Handle: func(ctx context.Context, payload []byte, in <-chan []byte, out chan<- []byte) *RemoteErr { + got := 0 + for b := range in { + out <- b + got++ + } + if got != messages { + return NewRemoteErrf("wrong number of requests. want %d, got %d", messages, got) + } + return nil + }, + + Subroute: "some-subroute", + OutCapacity: 1, + InCapacity: 1, // Only one message buffered. + })) + errFatal(err) + } + + // Wait for all to connect + // Parallel writes per server. + for par := 1; par <= 32; par *= 2 { + b.Run("par="+strconv.Itoa(par*runtime.GOMAXPROCS(0)), func(b *testing.B) { + defer timeout(30 * time.Second)() + b.ReportAllocs() + b.SetBytes(int64(len(payload) * (2*messages + 1))) + b.ResetTimer() + t := time.Now() + var ops int64 + var lat int64 + b.SetParallelism(par) + b.RunParallel(func(pb *testing.PB) { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + n := 0 + var latency int64 + managers := grid.Managers + hosts := grid.Hosts + for pb.Next() { + // Pick a random manager. + src, dst := rng.Intn(len(managers)), rng.Intn(len(managers)) + if src == dst { + dst = (dst + 1) % len(managers) + } + local := managers[src] + conn := local.Connection(hosts[dst]).Subroute("some-subroute") + if conn == nil { + b.Fatal("No connection") + } + ctx, cancel := context.WithTimeout(b.Context(), 30*time.Second) + // Send the payload. + t := time.Now() + st, err := conn.NewStream(ctx, handlerTest, payload) + if err != nil { + if debugReqs { + fmt.Println(err.Error()) + } + b.Fatal(err.Error()) + } + got := 0 + sent := 0 + go func() { + for range messages { + st.Requests <- append(GetByteBuffer()[:0], payload...) + if sent++; sent == messages { + close(st.Requests) + return + } + } + }() + err = st.Results(func(b []byte) error { + got++ + PutByteBuffer(b) + return nil + }) + if err != nil { + if debugReqs { + fmt.Println(err.Error()) + } + b.Fatal(err.Error()) + } + if got != messages { + b.Fatalf("wrong number of responses. want %d, got %d", messages, got) + } + latency += time.Since(t).Nanoseconds() + cancel() + n += got + } + atomic.AddInt64(&ops, int64(n*2)) + atomic.AddInt64(&lat, latency) + }) + spent := time.Since(t) + if spent > 0 && n > 0 { + // Since we are benchmarking n parallel servers we need to multiply by n. + // This will give an estimate of the total ops/s. + latency := float64(atomic.LoadInt64(&lat)) / float64(time.Millisecond) + b.ReportMetric(float64(n)*float64(ops)/spent.Seconds(), "vops/s") + b.ReportMetric(latency/float64(ops), "ms/op") + } + }) + } +} diff --git a/internal/grid/connection.go b/internal/grid/connection.go index 6a6979c74a737..576f4229af1ca 100644 --- a/internal/grid/connection.go +++ b/internal/grid/connection.go @@ -20,7 +20,6 @@ package grid import ( "bytes" "context" - "crypto/tls" "encoding/binary" "errors" "fmt" @@ -28,7 +27,7 @@ import ( "math" "math/rand" "net" - "net/http" + "runtime" "runtime/debug" "strings" "sync" @@ -42,10 +41,24 @@ import ( xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/pubsub" + xnet "github.com/minio/pkg/v3/net" + "github.com/puzpuzpuz/xsync/v3" "github.com/tinylib/msgp/msgp" "github.com/zeebo/xxh3" ) +func gridLogIf(ctx context.Context, err error, errKind ...any) { + logger.LogIf(ctx, "grid", err, errKind...) +} + +func gridLogIfNot(ctx context.Context, err error, ignored ...error) { + logger.LogIfNot(ctx, "grid", err, ignored...) +} + +func gridLogOnceIf(ctx context.Context, err error, id string, errKind ...any) { + logger.LogOnceIf(ctx, "grid", err, id, errKind...) +} + // A Connection is a remote connection. // There is no distinction externally whether the connection was initiated from // this server or from the remote. @@ -75,10 +88,10 @@ type Connection struct { ctx context.Context // Active mux connections. - outgoing *lockedClientMap + outgoing *xsync.MapOf[uint64, *muxClient] // Incoming streams - inStream *lockedServerMap + inStream *xsync.MapOf[uint64, *muxServer] // outQueue is the output queue outQueue chan []byte @@ -86,9 +99,9 @@ type Connection struct { // Client or serverside. side ws.State - // Transport for outgoing connections. - dialer ContextDialer - header http.Header + // Dialer for outgoing connections. + dial ConnDialer + authFn AuthFn handleMsgWg sync.WaitGroup @@ -98,22 +111,28 @@ type Connection struct { handlers *handlers remote *RemoteClient - auth AuthFn clientPingInterval time.Duration connPingInterval time.Duration - tlsConfig *tls.Config blockConnect chan struct{} incomingBytes func(n int64) // Record incoming bytes. outgoingBytes func(n int64) // Record outgoing bytes. trace *tracer // tracer for this connection. baseFlags Flags + outBytes atomic.Int64 + inBytes atomic.Int64 + inMessages atomic.Int64 + outMessages atomic.Int64 + reconnects atomic.Int64 + lastConnect atomic.Pointer[time.Time] + lastPingDur atomic.Int64 // For testing only - debugInConn net.Conn - debugOutConn net.Conn - addDeadline time.Duration - connMu sync.Mutex + debugInConn net.Conn + debugOutConn net.Conn + blockMessages atomic.Pointer[<-chan struct{}] + addDeadline time.Duration + connMu sync.Mutex } // Subroute is a connection subroute that can be used to route to a specific handler with the same handler ID. @@ -186,13 +205,12 @@ type connectionParams struct { ctx context.Context id uuid.UUID local, remote string - dial ContextDialer handlers *handlers - auth AuthFn - tlsConfig *tls.Config incomingBytes func(n int64) // Record incoming bytes. outgoingBytes func(n int64) // Record outgoing bytes. publisher *pubsub.PubSub[madmin.TraceInfo, madmin.TraceType] + dialer ConnDialer + authFn AuthFn blockConnect chan struct{} } @@ -205,26 +223,36 @@ func newConnection(o connectionParams) *Connection { Local: o.local, id: o.id, ctx: o.ctx, - outgoing: &lockedClientMap{m: make(map[uint64]*muxClient, 1000)}, - inStream: &lockedServerMap{m: make(map[uint64]*muxServer, 1000)}, + outgoing: xsync.NewMapOfPresized[uint64, *muxClient](1000), + inStream: xsync.NewMapOfPresized[uint64, *muxServer](1000), outQueue: make(chan []byte, defaultOutQueue), - dialer: o.dial, side: ws.StateServerSide, connChange: &sync.Cond{L: &sync.Mutex{}}, handlers: o.handlers, - auth: o.auth, - header: make(http.Header, 1), remote: &RemoteClient{Name: o.remote}, clientPingInterval: clientPingInterval, connPingInterval: connPingInterval, - tlsConfig: o.tlsConfig, - incomingBytes: o.incomingBytes, - outgoingBytes: o.outgoingBytes, + dial: o.dialer, + authFn: o.authFn, } if debugPrint { // Random Mux ID c.NextID = rand.Uint64() } + + // Record per connection stats. + c.outgoingBytes = func(n int64) { + if o.outgoingBytes != nil { + o.outgoingBytes(n) + } + c.outBytes.Add(n) + } + c.incomingBytes = func(n int64) { + if o.incomingBytes != nil { + o.incomingBytes(n) + } + c.inBytes.Add(n) + } if !strings.HasPrefix(o.remote, "https://") && !strings.HasPrefix(o.remote, "wss://") { c.baseFlags |= FlagCRCxxh3 } @@ -549,10 +577,9 @@ func (c *Connection) queueMsg(msg message, payload sender) error { // This cannot encode subroute. msg.Flags.Clear(FlagSubroute) if payload != nil { - if cap(msg.Payload) < payload.Msgsize() { - old := msg.Payload - msg.Payload = GetByteBuffer()[:0] - PutByteBuffer(old) + if sz := payload.Msgsize(); cap(msg.Payload) < sz { + PutByteBuffer(msg.Payload) + msg.Payload = GetByteBufferCap(sz) } var err error msg.Payload, err = payload.MarshalMsg(msg.Payload[:0]) @@ -562,7 +589,7 @@ func (c *Connection) queueMsg(msg message, payload sender) error { } } defer PutByteBuffer(msg.Payload) - dst := GetByteBuffer()[:0] + dst := GetByteBufferCap(msg.Msgsize()) dst, err := msg.MarshalMsg(dst) if err != nil { return err @@ -577,9 +604,9 @@ func (c *Connection) queueMsg(msg message, payload sender) error { // sendMsg will send func (c *Connection) sendMsg(conn net.Conn, msg message, payload msgp.MarshalSizer) error { if payload != nil { - if cap(msg.Payload) < payload.Msgsize() { + if sz := payload.Msgsize(); cap(msg.Payload) < sz { PutByteBuffer(msg.Payload) - msg.Payload = GetByteBuffer()[:0] + msg.Payload = GetByteBufferCap(sz)[:0] } var err error msg.Payload, err = payload.MarshalMsg(msg.Payload) @@ -588,7 +615,7 @@ func (c *Connection) sendMsg(conn net.Conn, msg message, payload msgp.MarshalSiz } defer PutByteBuffer(msg.Payload) } - dst := GetByteBuffer()[:0] + dst := GetByteBufferCap(msg.Msgsize()) dst, err := msg.MarshalMsg(dst) if err != nil { return err @@ -618,56 +645,28 @@ func (c *Connection) connect() { if c.State() == StateShutdown { return } - toDial := strings.Replace(c.Remote, "http://", "ws://", 1) - toDial = strings.Replace(toDial, "https://", "wss://", 1) - toDial += RoutePath - - dialer := ws.DefaultDialer - dialer.ReadBufferSize = readBufferSize - dialer.WriteBufferSize = writeBufferSize - dialer.Timeout = defaultDialTimeout - if c.dialer != nil { - dialer.NetDial = c.dialer.DialContext - } - if c.header == nil { - c.header = make(http.Header, 2) - } - c.header.Set("Authorization", "Bearer "+c.auth("")) - c.header.Set("X-Minio-Time", time.Now().UTC().Format(time.RFC3339)) - - if len(c.header) > 0 { - dialer.Header = ws.HandshakeHeaderHTTP(c.header) - } - dialer.TLSConfig = c.tlsConfig dialStarted := time.Now() if debugPrint { - fmt.Println(c.Local, "Connecting to ", toDial) - } - conn, br, _, err := dialer.Dial(c.ctx, toDial) - if br != nil { - ws.PutReader(br) + fmt.Println(c.Local, "Connecting to ", c.Remote) } + conn, err := c.dial(c.ctx, c.Remote) c.connMu.Lock() c.debugOutConn = conn c.connMu.Unlock() retry := func(err error) { if debugPrint { - fmt.Printf("%v Connecting to %v: %v. Retrying.\n", c.Local, toDial, err) + fmt.Printf("%v Connecting to %v: %v. Retrying.\n", c.Local, c.Remote, err) } sleep := defaultDialTimeout + time.Duration(rng.Int63n(int64(defaultDialTimeout))) next := dialStarted.Add(sleep / 2) - sleep = time.Until(next).Round(time.Millisecond) - if sleep < 0 { - sleep = 0 - } + sleep = max(time.Until(next).Round(time.Millisecond), 0) gotState := c.State() if gotState == StateShutdown { return } if gotState != StateConnecting { - // Don't print error on first attempt, - // and after that only once per hour. - logger.LogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial) + // Don't print error on first attempt, and after that only once per hour. + gridLogOnceIf(c.ctx, fmt.Errorf("grid: %s re-connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, c.Remote, err, err, sleep, gotState), c.Remote) } c.updateState(StateConnectionError) time.Sleep(sleep) @@ -683,7 +682,9 @@ func (c *Connection) connect() { req := connectReq{ Host: c.Local, ID: c.id, + Time: time.Now(), } + req.addToken(c.authFn) err = c.sendMsg(conn, m, &req) if err != nil { retry(err) @@ -706,6 +707,8 @@ func (c *Connection) connect() { retry(fmt.Errorf("connection rejected: %s", r.RejectedReason)) continue } + t := time.Now().UTC() + c.lastConnect.Store(&t) c.reconnectMu.Lock() remoteUUID := uuid.UUID(r.ID) if c.remoteID != nil { @@ -806,6 +809,8 @@ func (c *Connection) handleIncoming(ctx context.Context, conn net.Conn, req conn if err != nil { return err } + t := time.Now().UTC() + c.lastConnect.Store(&t) // Signal that we are reconnected, update state and handle messages. // Prevent other connections from connecting while we process. c.reconnectMu.Lock() @@ -825,6 +830,38 @@ func (c *Connection) handleIncoming(ctx context.Context, conn net.Conn, req conn // caller *must* hold reconnectMu. func (c *Connection) reconnected() { c.updateState(StateConnectionError) + c.reconnects.Add(1) + + // Drain the outQueue, so any blocked messages can be sent. + // We keep the queue, but start draining it, if it gets full. + stopDraining := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + defer func() { + close(stopDraining) + wg.Wait() + }() + go func() { + defer wg.Done() + for { + select { + case <-stopDraining: + return + default: + if cap(c.outQueue)-len(c.outQueue) > 100 { + // Queue is not full, wait a bit. + time.Sleep(1 * time.Millisecond) + continue + } + select { + case v := <-c.outQueue: + PutByteBuffer(v) + case <-stopDraining: + return + } + } + } + }() // Close all active requests. if debugReqs { fmt.Println(c.String(), "Reconnected. Clearing outgoing.") @@ -894,125 +931,147 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { c.handleMsgWg.Add(2) c.reconnectMu.Unlock() - // Read goroutine - go func() { - defer func() { - if rec := recover(); rec != nil { - logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) - debug.PrintStack() - } - c.connChange.L.Lock() - if atomic.CompareAndSwapUint32((*uint32)(&c.state), StateConnected, StateConnectionError) { - c.connChange.Broadcast() - } - c.connChange.L.Unlock() - conn.Close() - c.handleMsgWg.Done() - }() + // Start reader and writer + go c.readStream(ctx, conn, cancel) + c.writeStream(ctx, conn, cancel) +} - controlHandler := wsutil.ControlFrameHandler(conn, c.side) - wsReader := wsutil.Reader{ - Source: conn, - State: c.side, - CheckUTF8: true, - SkipHeaderCheck: false, - OnIntermediate: controlHandler, - } - readDataInto := func(dst []byte, rw io.ReadWriter, s ws.State, want ws.OpCode) ([]byte, error) { - dst = dst[:0] - for { - hdr, err := wsReader.NextFrame() - if err != nil { +// readStream handles the read side of the connection. +// It will read messages and send them to c.handleMsg. +// If an error occurs the cancel function will be called and conn be closed. +// The function will block until the connection is closed or an error occurs. +func (c *Connection) readStream(ctx context.Context, conn net.Conn, cancel context.CancelCauseFunc) { + defer func() { + if rec := recover(); rec != nil { + gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) + debug.PrintStack() + } + cancel(ErrDisconnected) + c.connChange.L.Lock() + if atomic.CompareAndSwapUint32((*uint32)(&c.state), StateConnected, StateConnectionError) { + c.connChange.Broadcast() + } + c.connChange.L.Unlock() + conn.Close() + c.handleMsgWg.Done() + }() + + controlHandler := wsutil.ControlFrameHandler(conn, c.side) + wsReader := wsutil.Reader{ + Source: conn, + State: c.side, + CheckUTF8: true, + SkipHeaderCheck: false, + OnIntermediate: controlHandler, + } + readDataInto := func(dst []byte, s ws.State, want ws.OpCode) ([]byte, error) { + dst = dst[:0] + for { + hdr, err := wsReader.NextFrame() + if err != nil { + return nil, err + } + if hdr.OpCode.IsControl() { + if err := controlHandler(hdr, &wsReader); err != nil { return nil, err } - if hdr.OpCode.IsControl() { - if err := controlHandler(hdr, &wsReader); err != nil { - return nil, err - } - continue - } - if hdr.OpCode&want == 0 { - if err := wsReader.Discard(); err != nil { - return nil, err - } - continue - } - if int64(cap(dst)) < hdr.Length+1 { - dst = make([]byte, 0, hdr.Length+hdr.Length>>3) + continue + } + if hdr.OpCode&want == 0 { + if err := wsReader.Discard(); err != nil { + return nil, err } - return readAllInto(dst[:0], &wsReader) + continue + } + if int64(cap(dst)) < hdr.Length+1 { + dst = make([]byte, 0, hdr.Length+hdr.Length>>3) } + if !hdr.Fin { + hdr.Length = -1 + } + return readAllInto(dst[:0], &wsReader, hdr.Length) } + } - // Keep reusing the same buffer. - var msg []byte - for { - if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected { - cancel(ErrDisconnected) - return - } - if cap(msg) > readBufferSize*4 { - // Don't keep too much memory around. - msg = nil + // Keep reusing the same buffer. + var msg []byte + for atomic.LoadUint32((*uint32)(&c.state)) == StateConnected { + if cap(msg) > readBufferSize*4 { + // Don't keep too much memory around. + msg = nil + } + + var err error + msg, err = readDataInto(msg, c.side, ws.OpBinary) + if err != nil { + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF) } + return + } + block := c.blockMessages.Load() + if block != nil && *block != nil { + <-*block + } - var err error - msg, err = readDataInto(msg, conn, c.side, ws.OpBinary) - if err != nil { - cancel(ErrDisconnected) - logger.LogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF) - return + if c.incomingBytes != nil { + c.incomingBytes(int64(len(msg))) + } + + // Parse the received message + var m message + subID, remain, err := m.parse(msg) + if err != nil { + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws parse package: %w", err)) } - if c.incomingBytes != nil { - c.incomingBytes(int64(len(msg))) + return + } + if debugPrint { + fmt.Printf("%s Got msg: %v\n", c.Local, m) + } + if m.Op != OpMerged { + c.inMessages.Add(1) + c.handleMsg(ctx, m, subID) + continue + } + // Handle merged messages. + messages := int(m.Seq) + c.inMessages.Add(int64(messages)) + for range messages { + if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected { + return } - - // Parse the received message - var m message - subID, remain, err := m.parse(msg) + var next []byte + next, remain, err = msgp.ReadBytesZC(remain) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws parse package: %w", err)) - cancel(ErrDisconnected) + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws read merged: %w", err)) + } return } - if debugPrint { - fmt.Printf("%s Got msg: %v\n", c.Local, m) - } - if m.Op != OpMerged { - c.handleMsg(ctx, m, subID) - continue - } - // Handle merged messages. - messages := int(m.Seq) - for i := 0; i < messages; i++ { - if atomic.LoadUint32((*uint32)(&c.state)) != StateConnected { - cancel(ErrDisconnected) - return - } - var next []byte - next, remain, err = msgp.ReadBytesZC(remain) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws read merged: %w", err)) - cancel(ErrDisconnected) - return - } - m.Payload = nil - subID, _, err = m.parse(next) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws parse merged: %w", err)) - cancel(ErrDisconnected) - return + m.Payload = nil + subID, _, err = m.parse(next) + if err != nil { + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws parse merged: %w", err)) } - c.handleMsg(ctx, m, subID) + return } + c.handleMsg(ctx, m, subID) } - }() + } +} - // Write function. +// writeStream handles the read side of the connection. +// It will grab messages from c.outQueue and write them to the connection. +// If an error occurs the cancel function will be called and conn be closed. +// The function will block until the connection is closed or an error occurs. +func (c *Connection) writeStream(ctx context.Context, conn net.Conn, cancel context.CancelCauseFunc) { defer func() { if rec := recover(); rec != nil { - logger.LogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) + gridLogIf(ctx, fmt.Errorf("handleMessages: panic recovered: %v", rec)) debug.PrintStack() } if debugPrint { @@ -1036,15 +1095,49 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { ping := time.NewTicker(connPingInterval) pingFrame := message{ Op: OpPing, - DeadlineMS: 5000, + DeadlineMS: uint32(connPingInterval.Milliseconds()), + Payload: make([]byte, pingMsg{}.Msgsize()), } defer ping.Stop() queue := make([][]byte, 0, maxMergeMessages) - merged := make([]byte, 0, writeBufferSize) var queueSize int var buf bytes.Buffer var wsw wsWriter + var lastSetDeadline time.Time + + // Helper to write everything in buf. + // Return false if an error occurred and the connection is unusable. + // Buffer will be reset empty when returning successfully. + writeBuffer := func() (ok bool) { + now := time.Now() + // Only set write deadline once every second + if now.Sub(lastSetDeadline) > time.Second { + err := conn.SetWriteDeadline(now.Add(connWriteTimeout + time.Second)) + if err != nil { + gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) + return false + } + lastSetDeadline = now + } + + _, err := buf.WriteTo(conn) + if err != nil { + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws write: %w", err)) + } + return false + } + if buf.Cap() > writeBufferSize*8 { + // Reset buffer if it gets too big, so we don't keep it around. + buf = bytes.Buffer{} + } + buf.Reset() + return true + } + + // Merge buffer to keep between calls + merged := make([]byte, 0, writeBufferSize) for { var toSend []byte select { @@ -1056,18 +1149,25 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { } lastPong := atomic.LoadInt64(&c.LastPong) if lastPong > 0 { - lastPongTime := time.Unix(lastPong, 0) + lastPongTime := time.Unix(0, lastPong) if d := time.Since(lastPongTime); d > connPingInterval*2 { - logger.LogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond))) + gridLogIf(ctx, fmt.Errorf("host %s last pong too old (%v); disconnecting", c.Remote, d.Round(time.Millisecond))) return } } + ping := pingMsg{ + T: time.Now(), + } var err error + if pingFrame.Payload, err = ping.MarshalMsg(pingFrame.Payload[:0]); err != nil { + gridLogIf(ctx, err) // Fake it... Though this should never fail. + atomic.StoreInt64(&c.LastPong, time.Now().UnixNano()) + continue + } toSend, err = pingFrame.MarshalMsg(GetByteBuffer()[:0]) if err != nil { - logger.LogIf(ctx, err) - // Fake it... - atomic.StoreInt64(&c.LastPong, time.Now().Unix()) + gridLogIf(ctx, err) // Fake it... Though this should never fail. + atomic.StoreInt64(&c.LastPong, time.Now().UnixNano()) continue } case toSend = <-c.outQueue: @@ -1075,11 +1175,22 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { continue } } - if len(queue) < maxMergeMessages && queueSize+len(toSend) < writeBufferSize-1024 && len(c.outQueue) > 0 { - queue = append(queue, toSend) - queueSize += len(toSend) - continue + if len(queue) < maxMergeMessages && queueSize+len(toSend) < writeBufferSize-1024 { + if len(c.outQueue) == 0 { + // Yield to allow more messages to fill. + runtime.Gosched() + } + if len(c.outQueue) > 0 { + queue = append(queue, toSend) + queueSize += len(toSend) + continue + } + } + c.outMessages.Add(int64(len(queue) + 1)) + if c.outgoingBytes != nil { + c.outgoingBytes(int64(len(toSend) + queueSize)) } + c.connChange.L.Lock() for { state := c.State() @@ -1103,22 +1214,17 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { } c.connChange.L.Unlock() if len(queue) == 0 { - // Combine writes. - buf.Reset() + // Send single message without merging. err := wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + } return } PutByteBuffer(toSend) - err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout)) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) - return - } - _, err = buf.WriteTo(conn) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws write: %w", err)) + + if !writeBuffer() { return } continue @@ -1130,17 +1236,17 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { fmt.Println("Merging", len(queue), "messages") } - toSend = merged[:0] + merged = merged[:0] m := message{Op: OpMerged, Seq: uint32(len(queue))} var err error - toSend, err = m.MarshalMsg(toSend) + merged, err = m.MarshalMsg(merged) if err != nil { - logger.LogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err)) + gridLogIf(ctx, fmt.Errorf("msg.MarshalMsg: %w", err)) return } // Append as byte slices. for _, q := range queue { - toSend = msgp.AppendBytes(toSend, q) + merged = msgp.AppendBytes(merged, q) PutByteBuffer(q) } queue = queue[:0] @@ -1148,28 +1254,20 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) { // Combine writes. // Consider avoiding buffer copy. - buf.Reset() - err = wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend) + err = wsw.writeMessage(&buf, c.side, ws.OpBinary, merged) if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + if !xnet.IsNetworkOrHostDown(err, true) { + gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err)) + } return } - // buf is our local buffer, so we can reuse it. - err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout)) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err)) - return + if cap(merged) > writeBufferSize*8 { + // If we had to send an excessively large package, reset size. + merged = make([]byte, 0, writeBufferSize) } - _, err = buf.WriteTo(conn) - if err != nil { - logger.LogIf(ctx, fmt.Errorf("ws write: %w", err)) + if !writeBuffer() { return } - - if buf.Cap() > writeBufferSize*4 { - // Reset buffer if it gets too big, so we don't keep it around. - buf = bytes.Buffer{} - } } } @@ -1202,7 +1300,7 @@ func (c *Connection) handleMsg(ctx context.Context, m message, subID *subHandler case OpMuxConnectError: c.handleConnectMuxError(ctx, m) default: - logger.LogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op)) + gridLogIf(ctx, fmt.Errorf("unknown message type: %v", m.Op)) } } @@ -1211,7 +1309,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub if m.Flags&FlagStateless != 0 { // Reject for now, so we can safely add it later. if true { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Stateless streams not supported"})) return } @@ -1222,7 +1320,11 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub handler = c.handlers.subStateless[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + msg := fmt.Sprintf("Invalid Handler for type: %v", m.Handler) + if subID != nil { + msg = fmt.Sprintf("Invalid Handler for type: %v", *subID) + } + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: msg})) return } _, _ = c.inStream.LoadOrCompute(m.MuxID, func() *muxServer { @@ -1233,7 +1335,7 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub var handler *StreamHandler if subID == nil { if !m.Handler.valid() { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) return } handler = c.handlers.streams[m.Handler] @@ -1241,7 +1343,11 @@ func (c *Connection) handleConnectMux(ctx context.Context, m message, subID *sub handler = c.handlers.subStreams[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + msg := fmt.Sprintf("Invalid Handler for type: %v", m.Handler) + if subID != nil { + msg = fmt.Sprintf("Invalid Handler for type: %v", *subID) + } + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: msg})) return } @@ -1257,7 +1363,7 @@ func (c *Connection) handleConnectMuxError(ctx context.Context, m message) { if v, ok := c.outgoing.Load(m.MuxID); ok { var cErr muxConnectError _, err := cErr.UnmarshalMsg(m.Payload) - logger.LogIf(ctx, err) + gridLogIf(ctx, err) v.error(RemoteErr(cErr.Error)) return } @@ -1269,7 +1375,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) { v, ok := c.outgoing.Load(m.MuxID) if !ok { if m.Flags&FlagEOF == 0 { - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } return } @@ -1281,7 +1387,7 @@ func (c *Connection) handleAckMux(ctx context.Context, m message) { func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHandlerID) { if !m.Handler.valid() { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler"})) return } if debugReqs { @@ -1295,7 +1401,11 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan handler = c.handlers.subSingle[*subID] } if handler == nil { - logger.LogIf(ctx, c.queueMsg(m, muxConnectError{Error: "Invalid Handler for type"})) + msg := fmt.Sprintf("Invalid Handler for type: %v", m.Handler) + if subID != nil { + msg = fmt.Sprintf("Invalid Handler for type: %v", *subID) + } + gridLogIf(ctx, c.queueMsg(m, muxConnectError{Error: msg})) return } @@ -1313,7 +1423,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan if rec := recover(); rec != nil { err = NewRemoteErrString(fmt.Sprintf("handleMessages: panic recovered: %v", rec)) debug.PrintStack() - logger.LogIf(ctx, err) + gridLogIf(ctx, err) } }() b, err = handler(m.Payload) @@ -1346,7 +1456,7 @@ func (c *Connection) handleRequest(ctx context.Context, m message, subID *subHan m.Payload = b m.setZeroPayloadFlag() } - logger.LogIf(ctx, c.queueMsg(m, nil)) + gridLogIf(ctx, c.queueMsg(m, nil)) }(m) } @@ -1354,9 +1464,16 @@ func (c *Connection) handlePong(ctx context.Context, m message) { var pong pongMsg _, err := pong.UnmarshalMsg(m.Payload) PutByteBuffer(m.Payload) - logger.LogIf(ctx, err) + m.Payload = nil + + if m.MuxID == 0 { + atomic.StoreInt64(&c.LastPong, time.Now().UnixNano()) + c.lastPingDur.Store(int64(time.Since(pong.T))) + return + } + gridLogIf(ctx, err) if m.MuxID == 0 { - atomic.StoreInt64(&c.LastPong, time.Now().Unix()) + atomic.StoreInt64(&c.LastPong, time.Now().UnixNano()) return } if v, ok := c.outgoing.Load(m.MuxID); ok { @@ -1364,24 +1481,33 @@ func (c *Connection) handlePong(ctx context.Context, m message) { } else { // We don't care if the client was removed in the meantime, // but we send a disconnect message to the server just in case. - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } } func (c *Connection) handlePing(ctx context.Context, m message) { + var ping pingMsg + if len(m.Payload) > 0 { + _, err := ping.UnmarshalMsg(m.Payload) + if err != nil { + gridLogIf(ctx, err) + } + } + // c.queueMsg will reuse m.Payload + if m.MuxID == 0 { - logger.LogIf(ctx, c.queueMsg(m, &pongMsg{})) + gridLogIf(ctx, c.queueMsg(m, &pongMsg{T: ping.T})) return } // Single calls do not support pinging. if v, ok := c.inStream.Load(m.MuxID); ok { pong := v.ping(m.Seq) - logger.LogIf(ctx, c.queueMsg(m, &pong)) + pong.T = ping.T + gridLogIf(ctx, c.queueMsg(m, &pong)) } else { - pong := pongMsg{NotFound: true} - logger.LogIf(ctx, c.queueMsg(m, &pong)) + pong := pongMsg{NotFound: true, T: ping.T} + gridLogIf(ctx, c.queueMsg(m, &pong)) } - return } func (c *Connection) handleDisconnectClientMux(m message) { @@ -1442,7 +1568,7 @@ func (c *Connection) handleMuxClientMsg(ctx context.Context, m message) { if debugPrint { fmt.Println(c.Local, "OpMuxClientMsg: Unknown Mux:", m.MuxID) } - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) PutByteBuffer(m.Payload) return } @@ -1486,7 +1612,7 @@ func (c *Connection) handleMuxServerMsg(ctx context.Context, m message) { v, ok := c.outgoing.Load(m.MuxID) if !ok { if m.Flags&FlagEOF == 0 { - logger.LogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) + gridLogIf(ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: m.MuxID}, nil)) } PutByteBuffer(m.Payload) return @@ -1496,18 +1622,28 @@ func (c *Connection) handleMuxServerMsg(ctx context.Context, m message) { Msg: nil, Err: RemoteErr(m.Payload), }) + if v.cancelFn != nil { + v.cancelFn(RemoteErr(m.Payload)) + } PutByteBuffer(m.Payload) - } else if m.Payload != nil { + v.close() + c.outgoing.Delete(m.MuxID) + return + } + // Return payload. + if m.Payload != nil { v.response(m.Seq, Response{ Msg: m.Payload, Err: nil, }) } + // Close when EOF. if m.Flags&FlagEOF != 0 { - if v.cancelFn != nil && m.Flags&FlagPayloadIsErr == 0 { - v.cancelFn(errStreamEOF) - } - v.close() + // We must obtain the lock before closing + // Otherwise others may pick up the error before close is called. + v.respMu.Lock() + v.closeLocked() + v.respMu.Unlock() if debugReqs { fmt.Println(m.MuxID, c.String(), "handleMuxServerMsg: DELETING MUX") } @@ -1522,7 +1658,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) { } v, loaded := c.inStream.LoadAndDelete(muxID) if loaded && v != nil { - logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil)) + gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectClientMux, MuxID: muxID}, nil)) v.close() } } else { @@ -1535,7 +1671,7 @@ func (c *Connection) deleteMux(incoming bool, muxID uint64) { fmt.Println(muxID, c.String(), "deleteMux: DELETING MUX") } v.close() - logger.LogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil)) + gridLogIf(c.ctx, c.queueMsg(message{Op: OpDisconnectServerMux, MuxID: muxID}, nil)) } } } @@ -1546,11 +1682,37 @@ func (c *Connection) State() State { } // Stats returns the current connection stats. -func (c *Connection) Stats() ConnectionStats { - return ConnectionStats{ - IncomingStreams: c.inStream.Size(), - OutgoingStreams: c.outgoing.Size(), - } +func (c *Connection) Stats() madmin.RPCMetrics { + conn := 0 + if c.State() == StateConnected { + conn++ + } + var lastConn time.Time + if t := c.lastConnect.Load(); t != nil { + lastConn = *t + } + pingMS := float64(c.lastPingDur.Load()) / float64(time.Millisecond) + m := madmin.RPCMetrics{ + CollectedAt: time.Now(), + Connected: conn, + Disconnected: 1 - conn, + IncomingStreams: c.inStream.Size(), + OutgoingStreams: c.outgoing.Size(), + IncomingBytes: c.inBytes.Load(), + OutgoingBytes: c.outBytes.Load(), + IncomingMessages: c.inMessages.Load(), + OutgoingMessages: c.outMessages.Load(), + OutQueue: len(c.outQueue), + LastPongTime: time.Unix(0, c.LastPong).UTC(), + LastConnectTime: lastConn, + ReconnectCount: int(c.reconnects.Load()), + LastPingMS: pingMS, + MaxPingDurMS: pingMS, + } + m.ByDestination = map[string]madmin.RPCMetrics{ + c.Remote: m, + } + return m } func (c *Connection) debugMsg(d debugMsg, args ...any) { @@ -1586,15 +1748,20 @@ func (c *Connection) debugMsg(d debugMsg, args ...any) { case debugSetConnPingDuration: c.connMu.Lock() defer c.connMu.Unlock() - c.connPingInterval = args[0].(time.Duration) + c.connPingInterval, _ = args[0].(time.Duration) + if c.connPingInterval < time.Second { + panic("CONN ping interval too low") + } case debugSetClientPingDuration: - c.clientPingInterval = args[0].(time.Duration) + c.connMu.Lock() + defer c.connMu.Unlock() + c.clientPingInterval, _ = args[0].(time.Duration) case debugAddToDeadline: - c.addDeadline = args[0].(time.Duration) + c.addDeadline, _ = args[0].(time.Duration) case debugIsOutgoingClosed: // params: muxID uint64, isClosed func(bool) - muxID := args[0].(uint64) - resp := args[1].(func(b bool)) + muxID, _ := args[0].(uint64) + resp, _ := args[1].(func(b bool)) mid, ok := c.outgoing.Load(muxID) if !ok || mid == nil { resp(true) @@ -1603,6 +1770,12 @@ func (c *Connection) debugMsg(d debugMsg, args ...any) { mid.respMu.Lock() resp(mid.closed) mid.respMu.Unlock() + case debugBlockInboundMessages: + c.connMu.Lock() + a, _ := args[0].(chan struct{}) + block := (<-chan struct{})(a) + c.blockMessages.Store(&block) + c.connMu.Unlock() } } diff --git a/internal/grid/connection_test.go b/internal/grid/connection_test.go index f95b122e1bb63..b81e48601aeaa 100644 --- a/internal/grid/connection_test.go +++ b/internal/grid/connection_test.go @@ -51,12 +51,14 @@ func TestDisconnect(t *testing.T) { // We fake a local and remote server. localHost := hosts[0] remoteHost := hosts[1] - local, err := NewManager(context.Background(), ManagerOptions{ - Dialer: dialer.DialContext, + local, err := NewManager(t.Context(), ManagerOptions{ + Dialer: ConnectWS(dialer.DialContext, + dummyNewToken, + nil), Local: localHost, Hosts: hosts, - AddAuth: func(aud string) string { return aud }, - AuthRequest: dummyRequestValidate, + AuthFn: dummyNewToken, + AuthToken: dummyTokenValidate, BlockConnect: connReady, }) errFatal(err) @@ -73,18 +75,20 @@ func TestDisconnect(t *testing.T) { return nil, &err })) - remote, err := NewManager(context.Background(), ManagerOptions{ - Dialer: dialer.DialContext, + remote, err := NewManager(t.Context(), ManagerOptions{ + Dialer: ConnectWS(dialer.DialContext, + dummyNewToken, + nil), Local: remoteHost, Hosts: hosts, - AddAuth: func(aud string) string { return aud }, - AuthRequest: dummyRequestValidate, + AuthFn: dummyNewToken, + AuthToken: dummyTokenValidate, BlockConnect: connReady, }) errFatal(err) - localServer := startServer(t, listeners[0], wrapServer(local.Handler())) - remoteServer := startServer(t, listeners[1], wrapServer(remote.Handler())) + localServer := startServer(t, listeners[0], wrapServer(local.Handler(dummyRequestValidate))) + remoteServer := startServer(t, listeners[1], wrapServer(remote.Handler(dummyRequestValidate))) close(connReady) defer func() { @@ -127,14 +131,14 @@ func TestDisconnect(t *testing.T) { // local to remote remoteConn := local.Connection(remoteHost) - errFatal(remoteConn.WaitForConnect(context.Background())) + errFatal(remoteConn.WaitForConnect(t.Context())) const testPayload = "Hello Grid World!" gotResp := make(chan struct{}) go func() { start := time.Now() t.Log("Roundtrip: sending request") - resp, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload)) + resp, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload)) t.Log("Roundtrip:", time.Since(start), resp, err) gotResp <- struct{}{} }() @@ -144,9 +148,9 @@ func TestDisconnect(t *testing.T) { <-gotResp // Must reconnect - errFatal(remoteConn.WaitForConnect(context.Background())) + errFatal(remoteConn.WaitForConnect(t.Context())) - stream, err := remoteConn.NewStream(context.Background(), handlerTest2, []byte(testPayload)) + stream, err := remoteConn.NewStream(t.Context(), handlerTest2, []byte(testPayload)) errFatal(err) go func() { for resp := range stream.responses { @@ -158,17 +162,13 @@ func TestDisconnect(t *testing.T) { <-gotCall remote.debugMsg(debugKillOutbound) local.debugMsg(debugKillOutbound) - errFatal(remoteConn.WaitForConnect(context.Background())) + errFatal(remoteConn.WaitForConnect(t.Context())) <-gotResp // Killing should cancel the context on the request. <-gotCall } -func dummyRequestValidate(r *http.Request) error { - return nil -} - func TestShouldConnect(t *testing.T) { var c Connection var cReverse Connection diff --git a/internal/grid/debug.go b/internal/grid/debug.go index 0172f87e26dc2..f80559f3fd82d 100644 --- a/internal/grid/debug.go +++ b/internal/grid/debug.go @@ -26,7 +26,6 @@ import ( "sync" "time" - xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/mux" ) @@ -50,6 +49,7 @@ const ( debugSetClientPingDuration debugAddToDeadline debugIsOutgoingClosed + debugBlockInboundMessages ) // TestGrid contains a grid of servers for testing purposes. @@ -81,26 +81,27 @@ func SetupTestGrid(n int) (*TestGrid, error) { res.cancel = cancel for i, host := range hosts { manager, err := NewManager(ctx, ManagerOptions{ - Dialer: dialer.DialContext, - Local: host, - Hosts: hosts, - AuthRequest: func(r *http.Request) error { - return nil - }, - AddAuth: func(aud string) string { return aud }, + Dialer: ConnectWS(dialer.DialContext, + dummyNewToken, + nil), + Local: host, + Hosts: hosts, + AuthFn: dummyNewToken, + AuthToken: dummyTokenValidate, BlockConnect: ready, + RoutePath: RoutePath, }) if err != nil { return nil, err } m := mux.NewRouter() - m.Handle(RoutePath, manager.Handler()) + m.Handle(RoutePath, manager.Handler(dummyRequestValidate)) res.Managers = append(res.Managers, manager) res.Servers = append(res.Servers, startHTTPServer(listeners[i], m)) res.Listeners = append(res.Listeners, listeners[i]) res.Mux = append(res.Mux, m) } - xioutil.SafeClose(ready) + close(ready) for _, m := range res.Managers { for _, remote := range m.Targets() { if err := m.Connection(remote).WaitForConnect(ctx); err != nil { @@ -142,7 +143,7 @@ func (t *TestGrid) WaitAllConnect(ctx context.Context) { } func getHosts(n int) (hosts []string, listeners []net.Listener, err error) { - for i := 0; i < n; i++ { + for range n { l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { if l, err = net.Listen("tcp6", "[::1]:0"); err != nil { @@ -153,7 +154,7 @@ func getHosts(n int) (hosts []string, listeners []net.Listener, err error) { hosts = append(hosts, "http://"+addr.String()) listeners = append(listeners, l) } - return + return hosts, listeners, err } func startHTTPServer(listener net.Listener, handler http.Handler) (server *httptest.Server) { @@ -163,3 +164,18 @@ func startHTTPServer(listener net.Listener, handler http.Handler) (server *httpt server.Start() return server } + +func dummyRequestValidate(r *http.Request) error { + return nil +} + +func dummyTokenValidate(token string) error { + if token == "debug" { + return nil + } + return fmt.Errorf("invalid token. want empty, got %s", token) +} + +func dummyNewToken() string { + return "debug" +} diff --git a/internal/grid/debugmsg_string.go b/internal/grid/debugmsg_string.go index a84f811b5624a..52c92cb4b28a8 100644 --- a/internal/grid/debugmsg_string.go +++ b/internal/grid/debugmsg_string.go @@ -16,11 +16,12 @@ func _() { _ = x[debugSetClientPingDuration-5] _ = x[debugAddToDeadline-6] _ = x[debugIsOutgoingClosed-7] + _ = x[debugBlockInboundMessages-8] } -const _debugMsg_name = "debugShutdowndebugKillInbounddebugKillOutbounddebugWaitForExitdebugSetConnPingDurationdebugSetClientPingDurationdebugAddToDeadlinedebugIsOutgoingClosed" +const _debugMsg_name = "debugShutdowndebugKillInbounddebugKillOutbounddebugWaitForExitdebugSetConnPingDurationdebugSetClientPingDurationdebugAddToDeadlinedebugIsOutgoingCloseddebugBlockInboundMessages" -var _debugMsg_index = [...]uint8{0, 13, 29, 46, 62, 86, 112, 130, 151} +var _debugMsg_index = [...]uint8{0, 13, 29, 46, 62, 86, 112, 130, 151, 176} func (i debugMsg) String() string { if i < 0 || i >= debugMsg(len(_debugMsg_index)-1) { diff --git a/internal/grid/grid.go b/internal/grid/grid.go index 5034e1a8e4294..42872b06365e8 100644 --- a/internal/grid/grid.go +++ b/internal/grid/grid.go @@ -20,13 +20,19 @@ package grid import ( "context" + "crypto/tls" "errors" "fmt" "io" - "sync" + "net" + "net/http" + "strconv" + "strings" "time" + "github.com/gobwas/ws" "github.com/gobwas/ws/wsutil" + "github.com/minio/minio/internal/bpool" ) // ErrDisconnected is returned when the connection to the remote has been lost during the call. @@ -42,10 +48,16 @@ const ( // maxBufferSize is the maximum buffer size. // Buffers larger than this is not reused. - maxBufferSize = 64 << 10 + maxBufferSize = 96 << 10 + + // This is the assumed size of bigger buffers and allocation size. + biggerBufMin = 32 << 10 + + // This is the maximum size of bigger buffers. + biggerBufMax = maxBufferSize // If there is a queue, merge up to this many messages. - maxMergeMessages = 30 + maxMergeMessages = 50 // clientPingInterval will ping the remote handler every 15 seconds. // Clients disconnect when we exceed 2 intervals. @@ -56,26 +68,57 @@ const ( defaultSingleRequestTimeout = time.Minute ) -var internalByteBuffer = sync.Pool{ - New: func() any { +var internalByteBuffer = bpool.Pool[*[]byte]{ + New: func() *[]byte { m := make([]byte, 0, defaultBufferSize) return &m }, } +var internal32KByteBuffer = bpool.Pool[*[]byte]{ + New: func() *[]byte { + m := make([]byte, 0, biggerBufMin) + return &m + }, +} + // GetByteBuffer can be replaced with a function that returns a small // byte buffer. // When replacing PutByteBuffer should also be replaced // There is no minimum size. var GetByteBuffer = func() []byte { - b := *internalByteBuffer.Get().(*[]byte) + b := *internalByteBuffer.Get() return b[:0] } +// GetByteBufferCap returns a length 0 byte buffer with at least the given capacity. +func GetByteBufferCap(wantSz int) []byte { + if wantSz < defaultBufferSize { + b := GetByteBuffer()[:0] + if cap(b) >= wantSz { + return b + } + PutByteBuffer(b) + } + if wantSz <= maxBufferSize { + b := *internal32KByteBuffer.Get() + if cap(b) >= wantSz { + return b[:0] + } + internal32KByteBuffer.Put(&b) + } + return make([]byte, 0, wantSz) +} + // PutByteBuffer is for returning byte buffers. var PutByteBuffer = func(b []byte) { - if cap(b) >= minBufferSize && cap(b) < maxBufferSize { + if cap(b) >= biggerBufMin && cap(b) < biggerBufMax { + internal32KByteBuffer.Put(&b) + return + } + if cap(b) >= minBufferSize && cap(b) < biggerBufMin { internalByteBuffer.Put(&b) + return } } @@ -83,7 +126,8 @@ var PutByteBuffer = func(b []byte) { // A successful call returns err == nil, not err == EOF. Because readAllInto is // defined to read from src until EOF, it does not treat an EOF from Read // as an error to be reported. -func readAllInto(b []byte, r *wsutil.Reader) ([]byte, error) { +func readAllInto(b []byte, r *wsutil.Reader, want int64) ([]byte, error) { + read := int64(0) for { if len(b) == cap(b) { // Add more capacity (let append pick how much). @@ -93,10 +137,18 @@ func readAllInto(b []byte, r *wsutil.Reader) ([]byte, error) { b = b[:len(b)+n] if err != nil { if errors.Is(err, io.EOF) { + if want >= 0 && read+int64(n) != want { + return nil, io.ErrUnexpectedEOF + } err = nil } return b, err } + read += int64(n) + if want >= 0 && read == want { + // No need to read more... + return b, nil + } } } @@ -117,11 +169,7 @@ type writerWrapper struct { } func (w *writerWrapper) Write(p []byte) (n int, err error) { - buf := GetByteBuffer() - if cap(buf) < len(p) { - PutByteBuffer(buf) - buf = make([]byte, len(p)) - } + buf := GetByteBufferCap(len(p)) buf = buf[:len(p)] copy(buf, p) select { @@ -144,144 +192,52 @@ func bytesOrLength(b []byte) string { if len(b) > 100 { return fmt.Sprintf("%d bytes", len(b)) } - return fmt.Sprint(b) -} - -type lockedClientMap struct { - m map[uint64]*muxClient - mu sync.Mutex -} - -func (m *lockedClientMap) Load(id uint64) (*muxClient, bool) { - m.mu.Lock() - v, ok := m.m[id] - m.mu.Unlock() - return v, ok -} - -func (m *lockedClientMap) LoadAndDelete(id uint64) (*muxClient, bool) { - m.mu.Lock() - v, ok := m.m[id] - if ok { - delete(m.m, id) - } - m.mu.Unlock() - return v, ok -} - -func (m *lockedClientMap) Size() int { - m.mu.Lock() - v := len(m.m) - m.mu.Unlock() - return v -} - -func (m *lockedClientMap) Delete(id uint64) { - m.mu.Lock() - delete(m.m, id) - m.mu.Unlock() -} - -func (m *lockedClientMap) Range(fn func(key uint64, value *muxClient) bool) { - m.mu.Lock() - defer m.mu.Unlock() - for k, v := range m.m { - if !fn(k, v) { - break + return fmt.Sprint(string(b)) +} + +// ConnDialer is a function that dials a connection to the given address. +// There should be no retries in this function, +// and should have a timeout of something like 2 seconds. +// The returned net.Conn should also have quick disconnect on errors. +// The net.Conn must support all features as described by the net.Conn interface. +type ConnDialer func(ctx context.Context, address string) (net.Conn, error) + +// ConnectWSWithRoutePath is like ConnectWS but with a custom grid route path. +func ConnectWSWithRoutePath(dial ContextDialer, auth AuthFn, tls *tls.Config, routePath string) func(ctx context.Context, remote string) (net.Conn, error) { + return func(ctx context.Context, remote string) (net.Conn, error) { + toDial := strings.Replace(remote, "http://", "ws://", 1) + toDial = strings.Replace(toDial, "https://", "wss://", 1) + toDial += routePath + + dialer := ws.DefaultDialer + dialer.ReadBufferSize = readBufferSize + dialer.WriteBufferSize = writeBufferSize + dialer.Timeout = defaultDialTimeout + if dial != nil { + dialer.NetDial = dial } - } -} - -func (m *lockedClientMap) Clear() { - m.mu.Lock() - m.m = map[uint64]*muxClient{} - m.mu.Unlock() -} - -func (m *lockedClientMap) LoadOrStore(id uint64, v *muxClient) (*muxClient, bool) { - m.mu.Lock() - v2, ok := m.m[id] - if ok { - m.mu.Unlock() - return v2, true - } - m.m[id] = v - m.mu.Unlock() - return v, false -} - -type lockedServerMap struct { - m map[uint64]*muxServer - mu sync.Mutex -} - -func (m *lockedServerMap) Load(id uint64) (*muxServer, bool) { - m.mu.Lock() - v, ok := m.m[id] - m.mu.Unlock() - return v, ok -} + header := make(http.Header, 2) + header.Set("Authorization", "Bearer "+auth()) + header.Set("X-Minio-Time", strconv.FormatInt(time.Now().UnixNano(), 10)) -func (m *lockedServerMap) LoadAndDelete(id uint64) (*muxServer, bool) { - m.mu.Lock() - v, ok := m.m[id] - if ok { - delete(m.m, id) - } - m.mu.Unlock() - return v, ok -} - -func (m *lockedServerMap) Size() int { - m.mu.Lock() - v := len(m.m) - m.mu.Unlock() - return v -} - -func (m *lockedServerMap) Delete(id uint64) { - m.mu.Lock() - delete(m.m, id) - m.mu.Unlock() -} + if len(header) > 0 { + dialer.Header = ws.HandshakeHeaderHTTP(header) + } + dialer.TLSConfig = tls -func (m *lockedServerMap) Range(fn func(key uint64, value *muxServer) bool) { - m.mu.Lock() - for k, v := range m.m { - if !fn(k, v) { - break + conn, br, _, err := dialer.Dial(ctx, toDial) + if br != nil { + ws.PutReader(br) } + return conn, err } - m.mu.Unlock() -} - -func (m *lockedServerMap) Clear() { - m.mu.Lock() - m.m = map[uint64]*muxServer{} - m.mu.Unlock() } -func (m *lockedServerMap) LoadOrStore(id uint64, v *muxServer) (*muxServer, bool) { - m.mu.Lock() - v2, ok := m.m[id] - if ok { - m.mu.Unlock() - return v2, true - } - m.m[id] = v - m.mu.Unlock() - return v, false +// ConnectWS returns a function that dials a websocket connection to the given address. +// Route and auth are added to the connection. +func ConnectWS(dial ContextDialer, auth AuthFn, tls *tls.Config) func(ctx context.Context, remote string) (net.Conn, error) { + return ConnectWSWithRoutePath(dial, auth, tls, RoutePath) } -func (m *lockedServerMap) LoadOrCompute(id uint64, fn func() *muxServer) (*muxServer, bool) { - m.mu.Lock() - v2, ok := m.m[id] - if ok { - m.mu.Unlock() - return v2, true - } - v := fn() - m.m[id] = v - m.mu.Unlock() - return v, false -} +// ValidateTokenFn must validate the token and return an error if it is invalid. +type ValidateTokenFn func(token string) error diff --git a/internal/grid/grid_test.go b/internal/grid/grid_test.go index 75ce9d35d9fec..3d1156e6807ef 100644 --- a/internal/grid/grid_test.go +++ b/internal/grid/grid_test.go @@ -22,10 +22,12 @@ import ( "context" "errors" "fmt" + "maps" "os" "runtime" "strconv" "strings" + "sync" "testing" "time" @@ -73,14 +75,14 @@ func TestSingleRoundtrip(t *testing.T) { // local to remote remoteConn := local.Connection(remoteHost) - remoteConn.WaitForConnect(context.Background()) + remoteConn.WaitForConnect(t.Context()) defer testlogger.T.SetErrorTB(t)() t.Run("localToRemote", func(t *testing.T) { const testPayload = "Hello Grid World!" start := time.Now() - resp, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload)) + resp, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload)) errFatal(err) if string(resp) != testPayload { t.Errorf("want %q, got %q", testPayload, string(resp)) @@ -91,7 +93,7 @@ func TestSingleRoundtrip(t *testing.T) { t.Run("localToRemoteErr", func(t *testing.T) { const testPayload = "Hello Grid World!" start := time.Now() - resp, err := remoteConn.Request(context.Background(), handlerTest2, []byte(testPayload)) + resp, err := remoteConn.Request(t.Context(), handlerTest2, []byte(testPayload)) t.Log("Roundtrip:", time.Since(start)) if len(resp) != 0 { t.Errorf("want nil, got %q", string(resp)) @@ -106,7 +108,7 @@ func TestSingleRoundtrip(t *testing.T) { testPayload := bytes.Repeat([]byte("?"), 1<<20) start := time.Now() - resp, err := remoteConn.Request(context.Background(), handlerTest, testPayload) + resp, err := remoteConn.Request(t.Context(), handlerTest, testPayload) errFatal(err) if string(resp) != string(testPayload) { t.Errorf("want %q, got %q", testPayload, string(resp)) @@ -118,7 +120,7 @@ func TestSingleRoundtrip(t *testing.T) { testPayload := bytes.Repeat([]byte("!"), 1<<10) start := time.Now() - resp, err := remoteConn.Request(context.Background(), handlerTest2, testPayload) + resp, err := remoteConn.Request(t.Context(), handlerTest2, testPayload) if len(resp) != 0 { t.Errorf("want nil, got %q", string(resp)) } @@ -158,19 +160,19 @@ func TestSingleRoundtripNotReady(t *testing.T) { // local to remote remoteConn := local.Connection(remoteHost) - remoteConn.WaitForConnect(context.Background()) + remoteConn.WaitForConnect(t.Context()) defer testlogger.T.SetErrorTB(t)() t.Run("localToRemote", func(t *testing.T) { const testPayload = "Hello Grid World!" // Single requests should have remote errors. - _, err := remoteConn.Request(context.Background(), handlerTest, []byte(testPayload)) - if v, ok := err.(*RemoteErr); !ok || v.Error() != "Invalid Handler for type" { + _, err := remoteConn.Request(t.Context(), handlerTest, []byte(testPayload)) + if _, ok := err.(*RemoteErr); !ok { t.Fatalf("Unexpected error: %v, %T", err, err) } // Streams should not be able to set up until registered. // Thus, the error is a local error. - _, err = remoteConn.NewStream(context.Background(), handlerTest, []byte(testPayload)) + _, err = remoteConn.NewStream(t.Context(), handlerTest, []byte(testPayload)) if !errors.Is(err, ErrUnknownHandler) { t.Fatalf("Unexpected error: %v, %T", err, err) } @@ -225,7 +227,7 @@ func TestSingleRoundtripGenerics(t *testing.T) { start := time.Now() req := testRequest{Num: 1, String: testPayload} - resp, err := h1.Call(context.Background(), remoteConn, &req) + resp, err := h1.Call(t.Context(), remoteConn, &req) errFatal(err) if resp.OrgString != testPayload { t.Errorf("want %q, got %q", testPayload, resp.OrgString) @@ -234,7 +236,7 @@ func TestSingleRoundtripGenerics(t *testing.T) { h1.PutResponse(resp) start = time.Now() - resp, err = h2.Call(context.Background(), remoteConn, &testRequest{Num: 1, String: testPayload}) + resp, err = h2.Call(t.Context(), remoteConn, &testRequest{Num: 1, String: testPayload}) t.Log("Roundtrip:", time.Since(start)) if err != RemoteErr(testPayload) { t.Errorf("want error %v(%T), got %v(%T)", RemoteErr(testPayload), RemoteErr(testPayload), err, err) @@ -265,9 +267,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) { // Handles incoming requests, returns a response handler1 := func(req *MSS) (resp *MSS, err *RemoteErr) { resp = h1.NewResponse() - for k, v := range *req { - (*resp)[k] = v - } + maps.Copy((*resp), *req) return resp, nil } // Return error @@ -289,7 +289,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) { start := time.Now() req := NewMSSWith(map[string]string{"test": testPayload}) - resp, err := h1.Call(context.Background(), remoteConn, req) + resp, err := h1.Call(t.Context(), remoteConn, req) errFatal(err) if resp.Get("test") != testPayload { t.Errorf("want %q, got %q", testPayload, resp.Get("test")) @@ -298,7 +298,7 @@ func TestSingleRoundtripGenericsRecycle(t *testing.T) { h1.PutResponse(resp) start = time.Now() - resp, err = h2.Call(context.Background(), remoteConn, NewMSSWith(map[string]string{"err": testPayload})) + resp, err = h2.Call(t.Context(), remoteConn, NewMSSWith(map[string]string{"err": testPayload})) t.Log("Roundtrip:", time.Since(start)) if err != RemoteErr(testPayload) { t.Errorf("want error %v(%T), got %v(%T)", RemoteErr(testPayload), RemoteErr(testPayload), err, err) @@ -378,6 +378,54 @@ func TestStreamSuite(t *testing.T) { assertNoActive(t, connRemoteLocal) assertNoActive(t, connLocalToRemote) }) + t.Run("testServerStreamOnewayNoPing", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamNoPing(t, local, remote, 0) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamTwowayNoPing", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamNoPing(t, local, remote, 1) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamTwowayPing", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 1, false, false) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamTwowayPingReq", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 1, false, true) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamTwowayPingResp", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 1, true, false) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamTwowayPingReqResp", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 1, true, true) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamOnewayPing", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 0, false, true) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) + t.Run("testServerStreamOnewayPingUnblocked", func(t *testing.T) { + defer timeout(1 * time.Minute)() + testServerStreamPingRunning(t, local, remote, 0, false, false) + assertNoActive(t, connRemoteLocal) + assertNoActive(t, connLocalToRemote) + }) } func testStreamRoundtrip(t *testing.T, local, remote *Manager) { @@ -430,7 +478,7 @@ func testStreamRoundtrip(t *testing.T, local, remote *Manager) { const testPayload = "Hello Grid World!" start := time.Now() - stream, err := remoteConn.NewStream(context.Background(), handlerTest, []byte(testPayload)) + stream, err := remoteConn.NewStream(t.Context(), handlerTest, []byte(testPayload)) errFatal(err) var n int stream.Requests <- []byte(strconv.Itoa(n)) @@ -470,7 +518,7 @@ func testStreamCancel(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr { <-ctx.Done() serverCanceled <- struct{}{} - t.Log(GetCaller(ctx).Name, "Server Context canceled") + fmt.Println(GetCaller(ctx).Name, "Server Context canceled") return nil }, OutCapacity: 1, @@ -480,7 +528,7 @@ func testStreamCancel(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr { <-ctx.Done() serverCanceled <- struct{}{} - t.Log(GetCaller(ctx).Name, "Server Context canceled") + fmt.Println(GetCaller(ctx).Name, "Server Context canceled") return nil }, OutCapacity: 1, @@ -491,12 +539,12 @@ func testStreamCancel(t *testing.T, local, remote *Manager) { register(remote) // local to remote - testHandler := func(t *testing.T, handler HandlerID) { + testHandler := func(t *testing.T, handler HandlerID, sendReq bool) { remoteConn := local.Connection(remoteHost) const testPayload = "Hello Grid World!" - ctx, cancel := context.WithCancel(context.Background()) - st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) + ctx, cancel := context.WithCancel(t.Context()) + st, err := remoteConn.NewStream(ctx, handler, []byte(testPayload)) errFatal(err) clientCanceled := make(chan time.Time, 1) err = nil @@ -513,6 +561,18 @@ func testStreamCancel(t *testing.T, local, remote *Manager) { clientCanceled <- time.Now() }(t) start := time.Now() + if st.Requests != nil { + defer close(st.Requests) + } + // Fill up queue. + for sendReq { + select { + case st.Requests <- []byte("Hello"): + time.Sleep(10 * time.Millisecond) + default: + sendReq = false + } + } cancel() <-serverCanceled t.Log("server cancel time:", time.Since(start)) @@ -524,11 +584,13 @@ func testStreamCancel(t *testing.T, local, remote *Manager) { } // local to remote, unbuffered t.Run("unbuffered", func(t *testing.T) { - testHandler(t, handlerTest) + testHandler(t, handlerTest, false) }) - t.Run("buffered", func(t *testing.T) { - testHandler(t, handlerTest2) + testHandler(t, handlerTest2, false) + }) + t.Run("buffered", func(t *testing.T) { + testHandler(t, handlerTest2, true) }) } @@ -596,7 +658,7 @@ func testStreamDeadline(t *testing.T, local, remote *Manager) { remoteConn := local.Connection(remoteHost) const testPayload = "Hello Grid World!" - ctx, cancel := context.WithTimeout(context.Background(), wantDL) + ctx, cancel := context.WithTimeout(t.Context(), wantDL) defer cancel() st, err := remoteConn.NewStream(ctx, handler, []byte(testPayload)) errFatal(err) @@ -645,7 +707,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, request <-chan []byte, resp chan<- []byte) *RemoteErr { // Send many responses. // Test that this doesn't block. - for i := byte(0); i < 100; i++ { + for i := range byte(100) { select { case resp <- []byte{i}: // ok @@ -672,7 +734,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) { remoteConn := local.Connection(remoteHost) const testPayload = "Hello Grid World!" - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(t.Context(), time.Minute) defer cancel() st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) errFatal(err) @@ -681,7 +743,7 @@ func testServerOutCongestion(t *testing.T, local, remote *Manager) { <-serverSent // Now do 100 other requests to ensure that the server doesn't block. - for i := 0; i < 100; i++ { + for range 100 { _, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload)) errFatal(err) } @@ -750,20 +812,20 @@ func testServerInCongestion(t *testing.T, local, remote *Manager) { remoteConn := local.Connection(remoteHost) const testPayload = "Hello Grid World!" - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(t.Context(), time.Minute) defer cancel() st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) errFatal(err) // Start sending requests. go func() { - for i := byte(0); i < 100; i++ { + for i := range byte(100) { st.Requests <- []byte{i} } close(st.Requests) }() // Now do 100 other requests to ensure that the server doesn't block. - for i := 0; i < 100; i++ { + for range 100 { _, err := remoteConn.Request(ctx, handlerTest2, []byte(testPayload)) errFatal(err) } @@ -830,11 +892,11 @@ func testGenericsStreamRoundtrip(t *testing.T, local, remote *Manager) { const testPayload = "Hello Grid World!" start := time.Now() - stream, err := handler.Call(context.Background(), remoteConn, &testRequest{Num: 1, String: testPayload}) + stream, err := handler.Call(t.Context(), remoteConn, &testRequest{Num: 1, String: testPayload}) errFatal(err) go func() { defer close(stream.Requests) - for i := 0; i < payloads; i++ { + for i := range payloads { // t.Log("sending new client request") stream.Requests <- &testRequest{Num: i, String: testPayload} } @@ -907,11 +969,11 @@ func testGenericsStreamRoundtripSubroute(t *testing.T, local, remote *Manager) { remoteSub := remoteConn.Subroute(strings.Join([]string{"subroute", "1"}, "/")) start := time.Now() - stream, err := handler.Call(context.Background(), remoteSub, &testRequest{Num: 1, String: testPayload}) + stream, err := handler.Call(t.Context(), remoteSub, &testRequest{Num: 1, String: testPayload}) errFatal(err) go func() { defer close(stream.Requests) - for i := 0; i < payloads; i++ { + for i := range payloads { // t.Log("sending new client request") stream.Requests <- &testRequest{Num: i, String: testPayload} } @@ -956,7 +1018,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) { Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, resp chan<- []byte) *RemoteErr { // Send many responses. // Test that this doesn't block. - for i := byte(0); i < 100; i++ { + for i := range byte(100) { select { case resp <- []byte{i}: // ok @@ -980,7 +1042,7 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) { remoteConn := local.Connection(remoteHost) const testPayload = "Hello Grid World!" - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 5*time.Second) st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) errFatal(err) @@ -1025,6 +1087,170 @@ func testServerStreamResponseBlocked(t *testing.T, local, remote *Manager) { } } +// testServerStreamNoPing will test if server and client handle no pings. +func testServerStreamNoPing(t *testing.T, local, remote *Manager, inCap int) { + defer testlogger.T.SetErrorTB(t)() + errFatal := func(err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } + } + + // We fake a local and remote server. + remoteHost := remote.HostName() + + // 1: Echo + reqStarted := make(chan struct{}) + serverCanceled := make(chan struct{}) + register := func(manager *Manager) { + errFatal(manager.RegisterStreamingHandler(handlerTest, StreamHandler{ + Handle: func(ctx context.Context, payload []byte, _ <-chan []byte, resp chan<- []byte) *RemoteErr { + close(reqStarted) + // Just wait for it to cancel. + <-ctx.Done() + close(serverCanceled) + return NewRemoteErr(ctx.Err()) + }, + OutCapacity: 1, + InCapacity: inCap, + })) + } + register(local) + register(remote) + + remoteConn := local.Connection(remoteHost) + const testPayload = "Hello Grid World!" + remoteConn.debugMsg(debugSetClientPingDuration, 100*time.Millisecond) + defer remoteConn.debugMsg(debugSetClientPingDuration, clientPingInterval) + + ctx, cancel := context.WithTimeout(t.Context(), time.Minute) + defer cancel() + st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) + errFatal(err) + + // Wait for the server start the request. + <-reqStarted + + // Stop processing requests + nowBlocking := make(chan struct{}) + remoteConn.debugMsg(debugBlockInboundMessages, nowBlocking) + + // Check that local returned. + err = st.Results(func(b []byte) error { + return nil + }) + if err == nil { + t.Fatal("expected error, got nil") + } + t.Logf("response: %v", err) + + // Check that remote is canceled. + <-serverCanceled + close(nowBlocking) +} + +// testServerStreamPingRunning will test if server and client handle ping even when blocked. +func testServerStreamPingRunning(t *testing.T, local, remote *Manager, inCap int, blockResp, blockReq bool) { + defer testlogger.T.SetErrorTB(t)() + errFatal := func(err error) { + t.Helper() + if err != nil { + t.Fatal(err) + } + } + + // We fake a local and remote server. + remoteHost := remote.HostName() + + // 1: Echo + reqStarted := make(chan struct{}) + serverCanceled := make(chan struct{}) + register := func(manager *Manager) { + errFatal(manager.RegisterStreamingHandler(handlerTest, StreamHandler{ + Handle: func(ctx context.Context, payload []byte, req <-chan []byte, resp chan<- []byte) *RemoteErr { + close(reqStarted) + // Just wait for it to cancel. + for blockResp { + select { + case <-ctx.Done(): + close(serverCanceled) + return NewRemoteErr(ctx.Err()) + case resp <- []byte{1}: + time.Sleep(10 * time.Millisecond) + } + } + // Just wait for it to cancel. + <-ctx.Done() + close(serverCanceled) + return NewRemoteErr(ctx.Err()) + }, + OutCapacity: 1, + InCapacity: inCap, + })) + } + register(local) + register(remote) + + remoteConn := local.Connection(remoteHost) + const testPayload = "Hello Grid World!" + remoteConn.debugMsg(debugSetClientPingDuration, 100*time.Millisecond) + defer remoteConn.debugMsg(debugSetClientPingDuration, clientPingInterval) + + ctx, cancel := context.WithTimeout(t.Context(), time.Minute) + defer cancel() + st, err := remoteConn.NewStream(ctx, handlerTest, []byte(testPayload)) + errFatal(err) + + // Wait for the server start the request. + <-reqStarted + + // Block until we have exceeded the deadline several times over. + nowBlocking := make(chan struct{}) + var mu sync.Mutex + time.AfterFunc(time.Second, func() { + mu.Lock() + cancel() + close(nowBlocking) + mu.Unlock() + }) + if inCap > 0 { + go func() { + defer close(st.Requests) + if !blockReq { + <-nowBlocking + return + } + for { + select { + case <-nowBlocking: + return + case <-st.Done(): + case st.Requests <- []byte{1}: + time.Sleep(10 * time.Millisecond) + } + } + }() + } + // Check that local returned. + err = st.Results(func(b []byte) error { + <-st.Done() + return ctx.Err() + }) + mu.Lock() + select { + case <-nowBlocking: + default: + t.Fatal("expected to be blocked. got err", err) + } + if err == nil { + t.Fatal("expected error, got nil") + } + t.Logf("response: %v", err) + // Check that remote is canceled. + <-serverCanceled +} + func timeout(after time.Duration) (cancel func()) { c := time.After(after) cc := make(chan struct{}) diff --git a/internal/grid/grid_types_msgp_test.go b/internal/grid/grid_types_msgp_test.go index 7252ae3606a89..6ac8a6ddde70c 100644 --- a/internal/grid/grid_types_msgp_test.go +++ b/internal/grid/grid_types_msgp_test.go @@ -1,7 +1,7 @@ -package grid - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package grid + import ( "github.com/tinylib/msgp/msgp" ) diff --git a/internal/grid/handlers.go b/internal/grid/handlers.go index 59b03ba6959d1..1d20fa9b94a61 100644 --- a/internal/grid/handlers.go +++ b/internal/grid/handlers.go @@ -23,11 +23,10 @@ import ( "errors" "fmt" "strings" - "sync" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/hash/sha256" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/tinylib/msgp/msgp" ) @@ -111,6 +110,12 @@ const ( HandlerGetBandwidth HandlerWriteAll HandlerListBuckets + HandlerRenameDataInline + HandlerRenameData2 + HandlerCheckParts2 + HandlerRenamePart + HandlerClearUploadID + HandlerCheckParts3 // Add more above here ^^^ // If all handlers are used, the type of Handler can be changed. @@ -189,6 +194,12 @@ var handlerPrefixes = [handlerLast]string{ HandlerConsoleLog: peerPrefix, HandlerListDir: storagePrefix, HandlerListBuckets: peerPrefixS3, + HandlerRenameDataInline: storagePrefix, + HandlerRenameData2: storagePrefix, + HandlerCheckParts2: storagePrefix, + HandlerCheckParts3: storagePrefix, + HandlerRenamePart: storagePrefix, + HandlerClearUploadID: peerPrefix, } const ( @@ -203,7 +214,7 @@ const ( func init() { // Static check if we exceed 255 handler ids. // Extend the type to uint16 when hit. - if handlerLast > 255 { + if uint32(handlerLast) > 255 { panic(fmt.Sprintf("out of handler IDs. %d > %d", handlerLast, 255)) } } @@ -420,14 +431,15 @@ func recycleFunc[RT RoundTripper](newRT func() RT) (newFn func() RT, recycle fun } } } - pool := sync.Pool{ - New: func() interface{} { + pool := bpool.Pool[RT]{ + New: func() RT { return newRT() }, } - return func() RT { return pool.Get().(RT) }, + return pool.Get, func(r RT) { if r != rZero { + //nolint:staticcheck // SA6002 IT IS A GENERIC VALUE! pool.Put(r) } } @@ -465,7 +477,7 @@ func (h *SingleHandler[Req, Resp]) AllowCallRequestPool(b bool) *SingleHandler[R // This may only be set ONCE before use. func (h *SingleHandler[Req, Resp]) IgnoreNilConn() *SingleHandler[Req, Resp] { if h.ignoreNilConn { - logger.LogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn") + gridLogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn") } h.ignoreNilConn = true return h @@ -546,7 +558,7 @@ func (h *SingleHandler[Req, Resp]) Call(ctx context.Context, c Requester, req Re } return resp, ErrDisconnected } - payload, err := req.MarshalMsg(GetByteBuffer()[:0]) + payload, err := req.MarshalMsg(GetByteBufferCap(req.Msgsize())) if err != nil { return resp, err } @@ -594,15 +606,18 @@ func GetCaller(ctx context.Context) *RemoteClient { // GetSubroute returns caller information from contexts provided to handlers. func GetSubroute(ctx context.Context) string { + //nolint:staticcheck // SA1029 Staticcheck is drunk. val, _ := ctx.Value(ctxSubrouteKey{}).(string) return val } func setCaller(ctx context.Context, cl *RemoteClient) context.Context { + //nolint:staticcheck // SA1029 Staticcheck is drunk. return context.WithValue(ctx, ctxCallerKey{}, cl) } func setSubroute(ctx context.Context, s string) context.Context { + //nolint:staticcheck // SA1029 Staticcheck is drunk. return context.WithValue(ctx, ctxSubrouteKey{}, s) } @@ -617,8 +632,8 @@ type StreamTypeHandler[Payload, Req, Resp RoundTripper] struct { // Will be 0 if newReq is nil. InCapacity int - reqPool sync.Pool - respPool sync.Pool + reqPool bpool.Pool[Req] + respPool bpool.Pool[Resp] id HandlerID newPayload func() Payload nilReq Req @@ -638,13 +653,13 @@ func NewStream[Payload, Req, Resp RoundTripper](h HandlerID, newPayload func() P s := newStreamHandler[Payload, Req, Resp](h) if newReq != nil { - s.reqPool.New = func() interface{} { + s.reqPool.New = func() Req { return newReq() } } else { s.InCapacity = 0 } - s.respPool.New = func() interface{} { + s.respPool.New = func() Resp { return newResp() } s.newPayload = newPayload @@ -667,13 +682,14 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) NewPayload() Payload { // NewRequest creates a new request. // The struct may be reused, so caller should clear any fields. func (h *StreamTypeHandler[Payload, Req, Resp]) NewRequest() Req { - return h.reqPool.Get().(Req) + return h.reqPool.Get() } // PutRequest will accept a request for reuse. // These should be returned by the handler. func (h *StreamTypeHandler[Payload, Req, Resp]) PutRequest(r Req) { if r != h.nilReq { + //nolint:staticcheck // SA6002 IT IS A GENERIC VALUE! (and always a pointer) h.reqPool.Put(r) } } @@ -682,6 +698,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) PutRequest(r Req) { // These should be returned by the caller. func (h *StreamTypeHandler[Payload, Req, Resp]) PutResponse(r Resp) { if r != h.nilResp { + //nolint:staticcheck // SA6002 IT IS A GENERIC VALUE! (and always a pointer) h.respPool.Put(r) } } @@ -689,7 +706,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) PutResponse(r Resp) { // NewResponse creates a new response. // Handlers can use this to create a reusable response. func (h *StreamTypeHandler[Payload, Req, Resp]) NewResponse() Resp { - return h.respPool.Get().(Resp) + return h.respPool.Get() } func newStreamHandler[Payload, Req, Resp RoundTripper](h HandlerID) *StreamTypeHandler[Payload, Req, Resp] { @@ -766,7 +783,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func input := h.NewRequest() _, err := input.UnmarshalMsg(v) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } PutByteBuffer(v) // Send input @@ -788,10 +805,9 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) register(m *Manager, handle func if dropOutput { continue } - dst := GetByteBuffer() - dst, err := v.MarshalMsg(dst[:0]) + dst, err := v.MarshalMsg(GetByteBufferCap(v.Msgsize())) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } if !h.sharedResponse { h.PutResponse(v) @@ -853,7 +869,7 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Stre var payloadB []byte if h.WithPayload { var err error - payloadB, err = payload.MarshalMsg(GetByteBuffer()[:0]) + payloadB, err = payload.MarshalMsg(GetByteBufferCap(payload.Msgsize())) if err != nil { return nil, err } @@ -875,9 +891,9 @@ func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Stre go func() { defer xioutil.SafeClose(stream.Requests) for req := range reqT { - b, err := req.MarshalMsg(GetByteBuffer()[:0]) + b, err := req.MarshalMsg(GetByteBufferCap(req.Msgsize())) if err != nil { - logger.LogOnceIf(ctx, err, err.Error()) + gridLogOnceIf(ctx, err, err.Error()) } h.PutRequest(req) stream.Requests <- b diff --git a/internal/grid/handlers_string.go b/internal/grid/handlers_string.go index 6474ec2dad0ed..454c90b87f2cb 100644 --- a/internal/grid/handlers_string.go +++ b/internal/grid/handlers_string.go @@ -80,14 +80,20 @@ func _() { _ = x[HandlerGetBandwidth-69] _ = x[HandlerWriteAll-70] _ = x[HandlerListBuckets-71] - _ = x[handlerTest-72] - _ = x[handlerTest2-73] - _ = x[handlerLast-74] + _ = x[HandlerRenameDataInline-72] + _ = x[HandlerRenameData2-73] + _ = x[HandlerCheckParts2-74] + _ = x[HandlerRenamePart-75] + _ = x[HandlerClearUploadID-76] + _ = x[HandlerCheckParts3-77] + _ = x[handlerTest-78] + _ = x[handlerTest2-79] + _ = x[handlerLast-80] } -const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthWriteAllListBucketshandlerTesthandlerTest2handlerLast" +const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthWriteAllListBucketsRenameDataInlineRenameData2CheckParts2RenamePartClearUploadIDCheckParts3handlerTesthandlerTest2handlerLast" -var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 877, 888, 899, 911, 922} +var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 877, 888, 904, 915, 926, 936, 949, 960, 971, 983, 994} func (i HandlerID) String() string { if i >= HandlerID(len(_HandlerID_index)-1) { diff --git a/internal/grid/manager.go b/internal/grid/manager.go index 66afd3dab7c99..3e4829e3b42e3 100644 --- a/internal/grid/manager.go +++ b/internal/grid/manager.go @@ -19,17 +19,20 @@ package grid import ( "context" - "crypto/tls" + "errors" "fmt" + "io" + "net" "net/http" "runtime/debug" "strings" + "time" "github.com/gobwas/ws" "github.com/gobwas/ws/wsutil" "github.com/google/uuid" "github.com/minio/madmin-go/v3" - "github.com/minio/minio/internal/logger" + "github.com/minio/minio/internal/deadlineconn" "github.com/minio/minio/internal/pubsub" "github.com/minio/mux" ) @@ -43,6 +46,9 @@ const ( // RoutePath is the remote path to connect to. RoutePath = "/minio/grid/" + apiVersion + + // RouteLockPath is the remote lock path to connect to. + RouteLockPath = "/minio/grid/lock/" + apiVersion ) // Manager will contain all the connections to the grid. @@ -61,40 +67,53 @@ type Manager struct { // local host name. local string - // Validate incoming requests. - authRequest func(r *http.Request) error + // authToken is a function that will validate a token. + authToken ValidateTokenFn + + // routePath indicates the dial route path + routePath string } // ManagerOptions are options for creating a new grid manager. type ManagerOptions struct { - Dialer ContextDialer // Outgoing dialer. - Local string // Local host name. - Hosts []string // All hosts, including local in the grid. - AddAuth AuthFn // Add authentication to the given audience. - AuthRequest func(r *http.Request) error // Validate incoming requests. - TLSConfig *tls.Config // TLS to apply to the connections. - Incoming func(n int64) // Record incoming bytes. - Outgoing func(n int64) // Record outgoing bytes. - BlockConnect chan struct{} // If set, incoming and outgoing connections will be blocked until closed. + Local string // Local host name. + Hosts []string // All hosts, including local in the grid. + Incoming func(n int64) // Record incoming bytes. + Outgoing func(n int64) // Record outgoing bytes. + BlockConnect chan struct{} // If set, incoming and outgoing connections will be blocked until closed. + RoutePath string TraceTo *pubsub.PubSub[madmin.TraceInfo, madmin.TraceType] + Dialer ConnDialer + // Sign a token for the given audience. + AuthFn AuthFn + // Callbacks to validate incoming connections. + AuthToken ValidateTokenFn } // NewManager creates a new grid manager func NewManager(ctx context.Context, o ManagerOptions) (*Manager, error) { found := false - if o.AuthRequest == nil { - return nil, fmt.Errorf("grid: AuthRequest must be set") + if o.AuthToken == nil { + return nil, fmt.Errorf("grid: AuthToken not set") + } + if o.Dialer == nil { + return nil, fmt.Errorf("grid: Dialer not set") + } + if o.AuthFn == nil { + return nil, fmt.Errorf("grid: AuthFn not set") } m := &Manager{ - ID: uuid.New(), - targets: make(map[string]*Connection, len(o.Hosts)), - local: o.Local, - authRequest: o.AuthRequest, + ID: uuid.New(), + targets: make(map[string]*Connection, len(o.Hosts)), + local: o.Local, + authToken: o.AuthToken, + routePath: o.RoutePath, } m.handlers.init() if ctx == nil { ctx = context.Background() } + for _, host := range o.Hosts { if host == o.Local { if found { @@ -109,31 +128,30 @@ func NewManager(ctx context.Context, o ManagerOptions) (*Manager, error) { id: m.ID, local: o.Local, remote: host, - dial: o.Dialer, handlers: &m.handlers, - auth: o.AddAuth, blockConnect: o.BlockConnect, - tlsConfig: o.TLSConfig, publisher: o.TraceTo, incomingBytes: o.Incoming, outgoingBytes: o.Outgoing, + dialer: o.Dialer, + authFn: o.AuthFn, }) } if !found { - return nil, fmt.Errorf("grid: local host not found") + return nil, fmt.Errorf("grid: local host (%s) not found in cluster setup", o.Local) } return m, nil } // AddToMux will add the grid manager to the given mux. -func (m *Manager) AddToMux(router *mux.Router) { - router.Handle(RoutePath, m.Handler()) +func (m *Manager) AddToMux(router *mux.Router, authReq func(r *http.Request) error) { + router.Handle(m.routePath, m.Handler(authReq)) } // Handler returns a handler that can be used to serve grid requests. // This should be connected on RoutePath to the main server. -func (m *Manager) Handler() http.HandlerFunc { +func (m *Manager) Handler(authReq func(r *http.Request) error) http.HandlerFunc { return func(w http.ResponseWriter, req *http.Request) { defer func() { if debugPrint { @@ -142,7 +160,7 @@ func (m *Manager) Handler() http.HandlerFunc { if r := recover(); r != nil { debug.PrintStack() err := fmt.Errorf("grid: panic: %v\n", r) - logger.LogIf(context.Background(), err, err.Error()) + gridLogIf(context.Background(), err, err.Error()) w.WriteHeader(http.StatusInternalServerError) } }() @@ -150,8 +168,8 @@ func (m *Manager) Handler() http.HandlerFunc { fmt.Printf("grid: Got a %s request for: %v\n", req.Method, req.URL) } ctx := req.Context() - if err := m.authRequest(req); err != nil { - logger.LogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr+err.Error()) + if err := authReq(req); err != nil { + gridLogOnceIf(ctx, fmt.Errorf("auth %s: %w", req.RemoteAddr, err), req.RemoteAddr) w.WriteHeader(http.StatusForbidden) return } @@ -163,72 +181,97 @@ func (m *Manager) Handler() http.HandlerFunc { w.WriteHeader(http.StatusUpgradeRequired) return } - // will write an OpConnectResponse message to the remote and log it once locally. - writeErr := func(err error) { - if err == nil { - return - } - logger.LogOnceIf(ctx, err, err.Error()) - resp := connectResp{ - ID: m.ID, - Accepted: false, - RejectedReason: err.Error(), - } - if b, err := resp.MarshalMsg(nil); err == nil { - msg := message{ - Op: OpConnectResponse, - Payload: b, - } - if b, err := msg.MarshalMsg(nil); err == nil { - wsutil.WriteMessage(conn, ws.StateServerSide, ws.OpBinary, b) - } - } - } - defer conn.Close() - if debugPrint { - fmt.Printf("grid: Upgraded request: %v\n", req.URL) - } - - msg, _, err := wsutil.ReadClientData(conn) - if err != nil { - writeErr(fmt.Errorf("reading connect: %w", err)) - w.WriteHeader(http.StatusForbidden) - return - } - if debugPrint { - fmt.Printf("%s handler: Got message, length %v\n", m.local, len(msg)) - } + m.IncomingConn(ctx, conn) + } +} - var message message - _, _, err = message.parse(msg) - if err != nil { - writeErr(fmt.Errorf("error parsing grid connect: %w", err)) - return - } - if message.Op != OpConnect { - writeErr(fmt.Errorf("unexpected connect op: %v", message.Op)) +// IncomingConn will handle an incoming connection. +// This should be called with the incoming connection after accept. +// Auth is handled internally, as well as disconnecting any connections from the same host. +func (m *Manager) IncomingConn(ctx context.Context, conn net.Conn) { + // We manage our own deadlines. + conn = deadlineconn.Unwrap(conn) + remoteAddr := conn.RemoteAddr().String() + // will write an OpConnectResponse message to the remote and log it once locally. + defer conn.Close() + writeErr := func(err error) { + if err == nil { return } - var cReq connectReq - _, err = cReq.UnmarshalMsg(message.Payload) - if err != nil { - writeErr(fmt.Errorf("error parsing connectReq: %w", err)) + if errors.Is(err, io.EOF) { return } - remote := m.targets[cReq.Host] - if remote == nil { - writeErr(fmt.Errorf("unknown incoming host: %v", cReq.Host)) - return + gridLogOnceIf(ctx, err, remoteAddr) + resp := connectResp{ + ID: m.ID, + Accepted: false, + RejectedReason: err.Error(), } - if debugPrint { - fmt.Printf("handler: Got Connect Req %+v\n", cReq) + if b, err := resp.MarshalMsg(nil); err == nil { + msg := message{ + Op: OpConnectResponse, + Payload: b, + } + if b, err := msg.MarshalMsg(nil); err == nil { + wsutil.WriteMessage(conn, ws.StateServerSide, ws.OpBinary, b) + } } - writeErr(remote.handleIncoming(ctx, conn, cReq)) } + defer conn.Close() + if debugPrint { + fmt.Printf("grid: Upgraded request: %v\n", remoteAddr) + } + + msg, _, err := wsutil.ReadClientData(conn) + if err != nil { + writeErr(fmt.Errorf("reading connect: %w", err)) + return + } + if debugPrint { + fmt.Printf("%s handler: Got message, length %v\n", m.local, len(msg)) + } + + var message message + _, _, err = message.parse(msg) + if err != nil { + writeErr(fmt.Errorf("error parsing grid connect: %w", err)) + return + } + if message.Op != OpConnect { + writeErr(fmt.Errorf("unexpected connect op: %v", message.Op)) + return + } + var cReq connectReq + _, err = cReq.UnmarshalMsg(message.Payload) + if err != nil { + writeErr(fmt.Errorf("error parsing connectReq: %w", err)) + return + } + remote := m.targets[cReq.Host] + if remote == nil { + writeErr(fmt.Errorf("unknown incoming host: %v", cReq.Host)) + return + } + if time.Since(cReq.Time).Abs() > 5*time.Minute { + writeErr(fmt.Errorf("time difference too large between servers: %v", time.Since(cReq.Time).Abs())) + return + } + if err := m.authToken(cReq.Token); err != nil { + writeErr(fmt.Errorf("auth token: %w", err)) + return + } + + if debugPrint { + fmt.Printf("handler: Got Connect Req %+v\n", cReq) + } + writeErr(remote.handleIncoming(ctx, conn, cReq)) } // AuthFn should provide an authentication string for the given aud. -type AuthFn func(aud string) string +type AuthFn func() string + +// ValidateAuthFn should check authentication for the given aud. +type ValidateAuthFn func(auth string) string // Connection will return the connection for the specified host. // If the host does not exist nil will be returned. @@ -330,3 +373,13 @@ func (m *Manager) debugMsg(d debugMsg, args ...any) { c.debugMsg(d, args...) } } + +// ConnStats returns the connection statistics for all connections. +func (m *Manager) ConnStats() madmin.RPCMetrics { + var res madmin.RPCMetrics + for _, c := range m.targets { + t := c.Stats() + res.Merge(&t) + } + return res +} diff --git a/internal/grid/msg.go b/internal/grid/msg.go index f55230f401aec..b72520f08cf22 100644 --- a/internal/grid/msg.go +++ b/internal/grid/msg.go @@ -21,6 +21,7 @@ import ( "encoding/binary" "fmt" "strings" + "time" "github.com/tinylib/msgp/msgp" "github.com/zeebo/xxh3" @@ -255,8 +256,15 @@ type sender interface { } type connectReq struct { - ID [16]byte - Host string + ID [16]byte + Host string + Time time.Time + Token string +} + +// addToken will add the token to the connect request. +func (c *connectReq) addToken(fn AuthFn) { + c.Token = fn() } func (connectReq) Op() Op { @@ -282,10 +290,19 @@ func (muxConnectError) Op() Op { } type pongMsg struct { - NotFound bool `msg:"nf"` - Err *string `msg:"e,allownil"` + NotFound bool `msg:"nf"` + Err *string `msg:"e,allownil"` + T time.Time `msg:"t"` } func (pongMsg) Op() Op { return OpPong } + +type pingMsg struct { + T time.Time `msg:"t"` +} + +func (pingMsg) Op() Op { + return OpPing +} diff --git a/internal/grid/msg_gen.go b/internal/grid/msg_gen.go index 15f2a58f9affe..aff7a2bf8a5e2 100644 --- a/internal/grid/msg_gen.go +++ b/internal/grid/msg_gen.go @@ -1,7 +1,7 @@ -package grid - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package grid + import ( "github.com/tinylib/msgp/msgp" ) @@ -192,6 +192,18 @@ func (z *connectReq) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "Host") return } + case "Time": + z.Time, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "Token": + z.Token, err = dc.ReadString() + if err != nil { + err = msgp.WrapError(err, "Token") + return + } default: err = dc.Skip() if err != nil { @@ -205,9 +217,9 @@ func (z *connectReq) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *connectReq) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 + // map header, size 4 // write "ID" - err = en.Append(0x82, 0xa2, 0x49, 0x44) + err = en.Append(0x84, 0xa2, 0x49, 0x44) if err != nil { return } @@ -226,19 +238,45 @@ func (z *connectReq) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "Host") return } + // write "Time" + err = en.Append(0xa4, 0x54, 0x69, 0x6d, 0x65) + if err != nil { + return + } + err = en.WriteTime(z.Time) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + // write "Token" + err = en.Append(0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + if err != nil { + return + } + err = en.WriteString(z.Token) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *connectReq) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 + // map header, size 4 // string "ID" - o = append(o, 0x82, 0xa2, 0x49, 0x44) + o = append(o, 0x84, 0xa2, 0x49, 0x44) o = msgp.AppendBytes(o, (z.ID)[:]) // string "Host" o = append(o, 0xa4, 0x48, 0x6f, 0x73, 0x74) o = msgp.AppendString(o, z.Host) + // string "Time" + o = append(o, 0xa4, 0x54, 0x69, 0x6d, 0x65) + o = msgp.AppendTime(o, z.Time) + // string "Token" + o = append(o, 0xa5, 0x54, 0x6f, 0x6b, 0x65, 0x6e) + o = msgp.AppendString(o, z.Token) return } @@ -272,6 +310,18 @@ func (z *connectReq) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "Host") return } + case "Time": + z.Time, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Time") + return + } + case "Token": + z.Token, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Token") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -286,7 +336,7 @@ func (z *connectReq) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *connectReq) Msgsize() (s int) { - s = 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 5 + msgp.StringPrefixSize + len(z.Host) + s = 1 + 3 + msgp.ArrayHeaderSize + (16 * (msgp.ByteSize)) + 5 + msgp.StringPrefixSize + len(z.Host) + 5 + msgp.TimeSize + 6 + msgp.StringPrefixSize + len(z.Token) return } @@ -737,6 +787,109 @@ func (z muxConnectError) Msgsize() (s int) { return } +// DecodeMsg implements msgp.Decodable +func (z *pingMsg) DecodeMsg(dc *msgp.Reader) (err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, err = dc.ReadMapHeader() + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, err = dc.ReadMapKeyPtr() + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "t": + z.T, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "T") + return + } + default: + err = dc.Skip() + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z pingMsg) EncodeMsg(en *msgp.Writer) (err error) { + // map header, size 1 + // write "t" + err = en.Append(0x81, 0xa1, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.T) + if err != nil { + err = msgp.WrapError(err, "T") + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z pingMsg) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "t" + o = append(o, 0x81, 0xa1, 0x74) + o = msgp.AppendTime(o, z.T) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *pingMsg) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "t": + z.T, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "T") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z pingMsg) Msgsize() (s int) { + s = 1 + 2 + msgp.TimeSize + return +} + // DecodeMsg implements msgp.Decodable func (z *pongMsg) DecodeMsg(dc *msgp.Reader) (err error) { var field []byte @@ -779,6 +932,12 @@ func (z *pongMsg) DecodeMsg(dc *msgp.Reader) (err error) { return } } + case "t": + z.T, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "T") + return + } default: err = dc.Skip() if err != nil { @@ -792,9 +951,9 @@ func (z *pongMsg) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *pongMsg) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 2 + // map header, size 3 // write "nf" - err = en.Append(0x82, 0xa2, 0x6e, 0x66) + err = en.Append(0x83, 0xa2, 0x6e, 0x66) if err != nil { return } @@ -820,15 +979,25 @@ func (z *pongMsg) EncodeMsg(en *msgp.Writer) (err error) { return } } + // write "t" + err = en.Append(0xa1, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.T) + if err != nil { + err = msgp.WrapError(err, "T") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *pongMsg) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 2 + // map header, size 3 // string "nf" - o = append(o, 0x82, 0xa2, 0x6e, 0x66) + o = append(o, 0x83, 0xa2, 0x6e, 0x66) o = msgp.AppendBool(o, z.NotFound) // string "e" o = append(o, 0xa1, 0x65) @@ -837,6 +1006,9 @@ func (z *pongMsg) MarshalMsg(b []byte) (o []byte, err error) { } else { o = msgp.AppendString(o, *z.Err) } + // string "t" + o = append(o, 0xa1, 0x74) + o = msgp.AppendTime(o, z.T) return } @@ -881,6 +1053,12 @@ func (z *pongMsg) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } + case "t": + z.T, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "T") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -901,5 +1079,6 @@ func (z *pongMsg) Msgsize() (s int) { } else { s += msgp.StringPrefixSize + len(*z.Err) } + s += 2 + msgp.TimeSize return } diff --git a/internal/grid/msg_gen_test.go b/internal/grid/msg_gen_test.go index a3170c81107dd..7e5574af4b17d 100644 --- a/internal/grid/msg_gen_test.go +++ b/internal/grid/msg_gen_test.go @@ -1,7 +1,7 @@ -package grid - // Code generated by github.com/tinylib/msgp DO NOT EDIT. +package grid + import ( "bytes" "testing" @@ -461,6 +461,119 @@ func BenchmarkDecodemuxConnectError(b *testing.B) { } } +func TestMarshalUnmarshalpingMsg(t *testing.T) { + v := pingMsg{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgpingMsg(b *testing.B) { + v := pingMsg{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgpingMsg(b *testing.B) { + v := pingMsg{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalpingMsg(b *testing.B) { + v := pingMsg{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodepingMsg(t *testing.T) { + v := pingMsg{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Log("WARNING: TestEncodeDecodepingMsg Msgsize() is inaccurate") + } + + vn := pingMsg{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodepingMsg(b *testing.B) { + v := pingMsg{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodepingMsg(b *testing.B) { + v := pingMsg{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalpongMsg(t *testing.T) { v := pongMsg{} bts, err := v.MarshalMsg(nil) diff --git a/internal/grid/muxclient.go b/internal/grid/muxclient.go index 7fa4ce29afb63..769f0854eab11 100644 --- a/internal/grid/muxclient.go +++ b/internal/grid/muxclient.go @@ -27,30 +27,30 @@ import ( "time" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" "github.com/zeebo/xxh3" ) // muxClient is a stateful connection to a remote. type muxClient struct { - MuxID uint64 - SendSeq, RecvSeq uint32 - LastPong int64 - BaseFlags Flags - ctx context.Context - cancelFn context.CancelCauseFunc - parent *Connection - respWait chan<- Response - respMu sync.Mutex - singleResp bool - closed bool - stateless bool - acked bool - init bool - deadline time.Duration - outBlock chan struct{} - subroute *subHandlerID - respErr atomic.Pointer[error] + MuxID uint64 + SendSeq, RecvSeq uint32 + LastPong int64 + BaseFlags Flags + ctx context.Context + cancelFn context.CancelCauseFunc + parent *Connection + respWait chan<- Response + respMu sync.Mutex + singleResp bool + closed bool + stateless bool + acked bool + init bool + deadline time.Duration + outBlock chan struct{} + subroute *subHandlerID + respErr atomic.Pointer[error] + clientPingInterval time.Duration } // Response is a response from the server. @@ -62,12 +62,13 @@ type Response struct { func newMuxClient(ctx context.Context, muxID uint64, parent *Connection) *muxClient { ctx, cancelFn := context.WithCancelCause(ctx) return &muxClient{ - MuxID: muxID, - ctx: ctx, - cancelFn: cancelFn, - parent: parent, - LastPong: time.Now().Unix(), - BaseFlags: parent.baseFlags, + MuxID: muxID, + ctx: ctx, + cancelFn: cancelFn, + parent: parent, + LastPong: time.Now().UnixNano(), + BaseFlags: parent.baseFlags, + clientPingInterval: parent.clientPingInterval, } } @@ -145,7 +146,7 @@ func (m *muxClient) send(msg message) error { // sendLocked the message. msg.Seq and msg.MuxID will be set. // m.respMu must be held. func (m *muxClient) sendLocked(msg message) error { - dst := GetByteBuffer()[:0] + dst := GetByteBufferCap(msg.Msgsize()) msg.Seq = m.SendSeq msg.MuxID = m.MuxID msg.Flags |= m.BaseFlags @@ -289,7 +290,7 @@ func (m *muxClient) addErrorNonBlockingClose(respHandler chan<- Response, err er xioutil.SafeClose(respHandler) }() } - logger.LogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID})) + gridLogIf(m.ctx, m.sendLocked(message{Op: OpDisconnectServerMux, MuxID: m.MuxID})) m.closed = true } } @@ -310,11 +311,11 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer < } }() var pingTimer <-chan time.Time - if m.deadline == 0 || m.deadline > clientPingInterval { - ticker := time.NewTicker(clientPingInterval) + if m.deadline == 0 || m.deadline > m.clientPingInterval { + ticker := time.NewTicker(m.clientPingInterval) defer ticker.Stop() pingTimer = ticker.C - atomic.StoreInt64(&m.LastPong, time.Now().Unix()) + atomic.StoreInt64(&m.LastPong, time.Now().UnixNano()) } defer m.parent.deleteMux(false, m.MuxID) for { @@ -332,40 +333,77 @@ func (m *muxClient) handleOneWayStream(respHandler chan<- Response, respServer < if !ok { return } + sendResp: select { case respHandler <- resp: m.respMu.Lock() if !m.closed { - logger.LogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID})) + gridLogIf(m.ctx, m.sendLocked(message{Op: OpUnblockSrvMux, MuxID: m.MuxID})) } m.respMu.Unlock() case <-m.ctx.Done(): // Client canceled. Don't block. // Next loop will catch it. + case <-pingTimer: + if !m.doPing(respHandler) { + return + } + goto sendResp } case <-pingTimer: - if time.Since(time.Unix(atomic.LoadInt64(&m.LastPong), 0)) > clientPingInterval*2 { - m.addErrorNonBlockingClose(respHandler, ErrDisconnected) + if !m.doPing(respHandler) { return } - // Send new ping. - logger.LogIf(m.ctx, m.send(message{Op: OpPing, MuxID: m.MuxID})) } } } +// doPing checks last ping time and sends another ping. +func (m *muxClient) doPing(respHandler chan<- Response) (ok bool) { + m.respMu.Lock() + if m.closed { + m.respMu.Unlock() + // Already closed. This is not an error state; + // we may just be delivering the last responses. + return true + } + + // Only check ping when not closed. + if got := time.Since(time.Unix(0, atomic.LoadInt64(&m.LastPong))); got > m.clientPingInterval*2 { + m.respMu.Unlock() + if debugPrint { + fmt.Printf("Mux %d: last pong %v ago, disconnecting\n", m.MuxID, got) + } + m.addErrorNonBlockingClose(respHandler, ErrDisconnected) + return false + } + + // Send new ping + err := m.sendLocked(message{Op: OpPing, MuxID: m.MuxID}) + m.respMu.Unlock() + if err != nil { + m.addErrorNonBlockingClose(respHandler, err) + } + return err == nil +} + // responseCh is the channel to that goes to the requester. // internalResp is the channel that comes from the server. func (m *muxClient) handleTwowayResponses(responseCh chan<- Response, internalResp <-chan Response) { - defer m.parent.deleteMux(false, m.MuxID) - defer xioutil.SafeClose(responseCh) + defer func() { + m.parent.deleteMux(false, m.MuxID) + // addErrorNonBlockingClose will close the response channel. + xioutil.SafeClose(responseCh) + }() + + // Cancelation and errors are handled by handleTwowayRequests below. for resp := range internalResp { - responseCh <- resp m.send(message{Op: OpUnblockSrvMux, MuxID: m.MuxID}) + responseCh <- resp } } -func (m *muxClient) handleTwowayRequests(internalResp chan<- Response, requests <-chan []byte) { +func (m *muxClient) handleTwowayRequests(errResp chan<- Response, requests <-chan []byte) { var errState bool if debugPrint { start := time.Now() @@ -374,24 +412,30 @@ func (m *muxClient) handleTwowayRequests(internalResp chan<- Response, requests }() } + var pingTimer <-chan time.Time + if m.deadline == 0 || m.deadline > m.clientPingInterval { + ticker := time.NewTicker(m.clientPingInterval) + defer ticker.Stop() + pingTimer = ticker.C + atomic.StoreInt64(&m.LastPong, time.Now().UnixNano()) + } + // Listen for client messages. - for { - if errState { - go func() { - // Drain requests. - for range requests { - } - }() - return - } +reqLoop: + for !errState { select { case <-m.ctx.Done(): if debugPrint { fmt.Println("Client sending disconnect to mux", m.MuxID) } - m.addErrorNonBlockingClose(internalResp, context.Cause(m.ctx)) + m.addErrorNonBlockingClose(errResp, context.Cause(m.ctx)) errState = true continue + case <-pingTimer: + if !m.doPing(errResp) { + errState = true + continue + } case req, ok := <-requests: if !ok { // Done send EOF @@ -401,22 +445,28 @@ func (m *muxClient) handleTwowayRequests(internalResp chan<- Response, requests msg := message{ Op: OpMuxClientMsg, MuxID: m.MuxID, - Seq: 1, Flags: FlagEOF, } msg.setZeroPayloadFlag() err := m.send(msg) if err != nil { - m.addErrorNonBlockingClose(internalResp, err) + m.addErrorNonBlockingClose(errResp, err) } - return + break reqLoop } // Grab a send token. + sendReq: select { case <-m.ctx.Done(): - m.addErrorNonBlockingClose(internalResp, context.Cause(m.ctx)) + m.addErrorNonBlockingClose(errResp, context.Cause(m.ctx)) errState = true continue + case <-pingTimer: + if !m.doPing(errResp) { + errState = true + continue + } + goto sendReq case <-m.outBlock: } msg := message{ @@ -429,13 +479,41 @@ func (m *muxClient) handleTwowayRequests(internalResp chan<- Response, requests err := m.send(msg) PutByteBuffer(req) if err != nil { - m.addErrorNonBlockingClose(internalResp, err) + m.addErrorNonBlockingClose(errResp, err) errState = true continue } msg.Seq++ } } + + if errState { + // Drain requests. + for { + select { + case r, ok := <-requests: + if !ok { + return + } + PutByteBuffer(r) + default: + return + } + } + } + + for !errState { + select { + case <-m.ctx.Done(): + if debugPrint { + fmt.Println("Client sending disconnect to mux", m.MuxID) + } + m.addErrorNonBlockingClose(errResp, context.Cause(m.ctx)) + return + case <-pingTimer: + errState = !m.doPing(errResp) + } + } } // checkSeq will check if sequence number is correct and increment it by 1. @@ -471,7 +549,7 @@ func (m *muxClient) response(seq uint32, r Response) { m.addResponse(r) return } - atomic.StoreInt64(&m.LastPong, time.Now().Unix()) + atomic.StoreInt64(&m.LastPong, time.Now().UnixNano()) ok := m.addResponse(r) if !ok { PutByteBuffer(r.Msg) @@ -496,7 +574,7 @@ func (m *muxClient) ack(seq uint32) { return } available := cap(m.outBlock) - for i := 0; i < available; i++ { + for range available { m.outBlock <- struct{}{} } m.acked = true @@ -509,7 +587,7 @@ func (m *muxClient) unblockSend(seq uint32) { select { case m.outBlock <- struct{}{}: default: - logger.LogIf(m.ctx, errors.New("output unblocked overflow")) + gridLogIf(m.ctx, errors.New("output unblocked overflow")) } } @@ -522,7 +600,7 @@ func (m *muxClient) pong(msg pongMsg) { m.addResponse(Response{Err: err}) return } - atomic.StoreInt64(&m.LastPong, time.Now().Unix()) + atomic.StoreInt64(&m.LastPong, time.Now().UnixNano()) } // addResponse will add a response to the response channel. @@ -545,10 +623,10 @@ func (m *muxClient) addResponse(r Response) (ok bool) { default: if m.stateless { // Drop message if not stateful. - return + return ok } err := errors.New("INTERNAL ERROR: Response was blocked") - logger.LogIf(m.ctx, err) + gridLogIf(m.ctx, err) m.closeLocked() return false } diff --git a/internal/grid/muxserver.go b/internal/grid/muxserver.go index f4917bafad42d..61707d5e7d214 100644 --- a/internal/grid/muxserver.go +++ b/internal/grid/muxserver.go @@ -21,30 +21,27 @@ import ( "context" "errors" "fmt" - "runtime/debug" "sync" "sync/atomic" "time" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" ) -const lastPingThreshold = 4 * clientPingInterval - type muxServer struct { - ID uint64 - LastPing int64 - SendSeq, RecvSeq uint32 - Resp chan []byte - BaseFlags Flags - ctx context.Context - cancel context.CancelFunc - inbound chan []byte - parent *Connection - sendMu sync.Mutex - recvMu sync.Mutex - outBlock chan struct{} + ID uint64 + LastPing int64 + SendSeq, RecvSeq uint32 + Resp chan []byte + BaseFlags Flags + ctx context.Context + cancel context.CancelFunc + inbound chan []byte + parent *Connection + sendMu sync.Mutex + recvMu sync.Mutex + outBlock chan struct{} + clientPingInterval time.Duration } func newMuxStateless(ctx context.Context, msg message, c *Connection, handler StatelessHandler) *muxServer { @@ -91,16 +88,17 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea } m := muxServer{ - ID: msg.MuxID, - RecvSeq: msg.Seq + 1, - SendSeq: msg.Seq, - ctx: ctx, - cancel: cancel, - parent: c, - inbound: nil, - outBlock: make(chan struct{}, outboundCap), - LastPing: time.Now().Unix(), - BaseFlags: c.baseFlags, + ID: msg.MuxID, + RecvSeq: msg.Seq + 1, + SendSeq: msg.Seq, + ctx: ctx, + cancel: cancel, + parent: c, + inbound: nil, + outBlock: make(chan struct{}, outboundCap), + LastPing: time.Now().Unix(), + BaseFlags: c.baseFlags, + clientPingInterval: c.clientPingInterval, } // Acknowledge Mux created. // Send async. @@ -123,107 +121,146 @@ func newMuxStream(ctx context.Context, msg message, c *Connection, handler Strea if inboundCap > 0 { m.inbound = make(chan []byte, inboundCap) handlerIn = make(chan []byte, 1) - go func(inbound <-chan []byte) { + go func(inbound chan []byte) { wg.Wait() defer xioutil.SafeClose(handlerIn) - // Send unblocks when we have delivered the message to the handler. - for in := range inbound { - handlerIn <- in - m.send(message{Op: OpUnblockClMux, MuxID: m.ID, Flags: c.baseFlags}) - } + m.handleInbound(c, inbound, handlerIn) }(m.inbound) } - for i := 0; i < outboundCap; i++ { + // Fill outbound block. + // Each token represents a message that can be sent to the client without blocking. + // The client will refill the tokens as they confirm delivery of the messages. + for range outboundCap { m.outBlock <- struct{}{} } // Handler goroutine. - var handlerErr *RemoteErr + var handlerErr atomic.Pointer[RemoteErr] go func() { wg.Wait() - start := time.Now() - defer func() { - if debugPrint { - fmt.Println("Mux", m.ID, "Handler took", time.Since(start).Round(time.Millisecond)) - } - if r := recover(); r != nil { - logger.LogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r)) - debug.PrintStack() - err := RemoteErr(fmt.Sprintf("remote call panic: %v", r)) - handlerErr = &err - } - if debugPrint { - fmt.Println("muxServer: Mux", m.ID, "Returned with", handlerErr) - } - xioutil.SafeClose(send) - }() - // handlerErr is guarded by 'send' channel. - handlerErr = handler.Handle(ctx, msg.Payload, handlerIn, send) + defer xioutil.SafeClose(send) + err := m.handleRequests(ctx, msg, send, handler, handlerIn) + if err != nil { + handlerErr.Store(err) + } }() - // Response sender gorutine... + + // Response sender goroutine... go func(outBlock <-chan struct{}) { wg.Wait() defer m.parent.deleteMux(true, m.ID) - for { - // Process outgoing message. - var payload []byte - var ok bool - select { - case payload, ok = <-send: - case <-ctx.Done(): + m.sendResponses(ctx, send, c, &handlerErr, outBlock) + }(m.outBlock) + + // Remote aliveness check if needed. + if msg.DeadlineMS == 0 || msg.DeadlineMS > uint32(4*c.clientPingInterval/time.Millisecond) { + go func() { + wg.Wait() + m.checkRemoteAlive() + }() + } + return &m +} + +// handleInbound sends unblocks when we have delivered the message to the handler. +func (m *muxServer) handleInbound(c *Connection, inbound <-chan []byte, handlerIn chan<- []byte) { + for { + select { + case <-m.ctx.Done(): + return + case in, ok := <-inbound: + if !ok { return } select { - case <-ctx.Done(): + case <-m.ctx.Done(): return - case <-outBlock: + case handlerIn <- in: + m.send(message{Op: OpUnblockClMux, MuxID: m.ID, Flags: c.baseFlags}) } - msg := message{ - MuxID: m.ID, - Op: OpMuxServerMsg, - Flags: c.baseFlags, + } + } +} + +// sendResponses will send responses to the client. +func (m *muxServer) sendResponses(ctx context.Context, toSend <-chan []byte, c *Connection, handlerErr *atomic.Pointer[RemoteErr], outBlock <-chan struct{}) { + for { + // Process outgoing message. + var payload []byte + var ok bool + select { + case payload, ok = <-toSend: + case <-ctx.Done(): + return + } + select { + case <-ctx.Done(): + return + case <-outBlock: + } + msg := message{ + MuxID: m.ID, + Op: OpMuxServerMsg, + Flags: c.baseFlags, + } + if !ok { + hErr := handlerErr.Load() + if debugPrint { + fmt.Println("muxServer: Mux", m.ID, "send EOF", hErr) } - if !ok { - if debugPrint { - fmt.Println("muxServer: Mux", m.ID, "send EOF", handlerErr) - } - msg.Flags |= FlagEOF - if handlerErr != nil { - msg.Flags |= FlagPayloadIsErr - msg.Payload = []byte(*handlerErr) - } - msg.setZeroPayloadFlag() - m.send(msg) - return + msg.Flags |= FlagEOF + if hErr != nil { + msg.Flags |= FlagPayloadIsErr + msg.Payload = []byte(*hErr) } - msg.Payload = payload msg.setZeroPayloadFlag() m.send(msg) + return } - }(m.outBlock) + msg.Payload = payload + msg.setZeroPayloadFlag() + m.send(msg) + } +} - // Remote aliveness check. - if msg.DeadlineMS == 0 || msg.DeadlineMS > uint32(lastPingThreshold/time.Millisecond) { - go func() { - wg.Wait() - t := time.NewTicker(lastPingThreshold / 4) - defer t.Stop() - for { - select { - case <-m.ctx.Done(): - return - case <-t.C: - last := time.Since(time.Unix(atomic.LoadInt64(&m.LastPing), 0)) - if last > lastPingThreshold { - logger.LogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last)) - m.close() - return - } - } +// handleRequests will handle the requests from the client and call the handler function. +func (m *muxServer) handleRequests(ctx context.Context, msg message, send chan<- []byte, handler StreamHandler, handlerIn <-chan []byte) (handlerErr *RemoteErr) { + start := time.Now() + defer func() { + if debugPrint { + fmt.Println("Mux", m.ID, "Handler took", time.Since(start).Round(time.Millisecond)) + } + if r := recover(); r != nil { + gridLogIf(ctx, fmt.Errorf("grid handler (%v) panic: %v", msg.Handler, r)) + err := RemoteErr(fmt.Sprintf("handler panic: %v", r)) + handlerErr = &err + } + if debugPrint { + fmt.Println("muxServer: Mux", m.ID, "Returned with", handlerErr) + } + }() + // handlerErr is guarded by 'send' channel. + handlerErr = handler.Handle(ctx, msg.Payload, handlerIn, send) + return handlerErr +} + +// checkRemoteAlive will check if the remote is alive. +func (m *muxServer) checkRemoteAlive() { + t := time.NewTicker(m.clientPingInterval) + defer t.Stop() + for { + select { + case <-m.ctx.Done(): + return + case <-t.C: + last := time.Since(time.Unix(atomic.LoadInt64(&m.LastPing), 0)) + if last > 4*m.clientPingInterval { + gridLogIf(m.ctx, fmt.Errorf("canceling remote connection %s not seen for %v", m.parent, last)) + m.close() + return } - }() + } } - return &m } // checkSeq will check if sequence number is correct and increment it by 1. @@ -232,7 +269,7 @@ func (m *muxServer) checkSeq(seq uint32) (ok bool) { if debugPrint { fmt.Printf("expected sequence %d, got %d\n", m.RecvSeq, seq) } - m.disconnect(fmt.Sprintf("receive sequence number mismatch. want %d, got %d", m.RecvSeq, seq)) + m.disconnect(fmt.Sprintf("receive sequence number mismatch. want %d, got %d", m.RecvSeq, seq), false) return false } m.RecvSeq++ @@ -243,19 +280,19 @@ func (m *muxServer) message(msg message) { if debugPrint { fmt.Printf("muxServer: received message %d, length %d\n", msg.Seq, len(msg.Payload)) } + if !m.checkSeq(msg.Seq) { + return + } m.recvMu.Lock() defer m.recvMu.Unlock() if cap(m.inbound) == 0 { - m.disconnect("did not expect inbound message") - return - } - if !m.checkSeq(msg.Seq) { + m.disconnect("did not expect inbound message", true) return } // Note, on EOF no value can be sent. if msg.Flags&FlagEOF != 0 { if len(msg.Payload) > 0 { - logger.LogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload")) + gridLogIf(m.ctx, fmt.Errorf("muxServer: EOF message with payload")) } if m.inbound != nil { xioutil.SafeClose(m.inbound) @@ -271,7 +308,7 @@ func (m *muxServer) message(msg message) { fmt.Printf("muxServer: Sent seq %d to handler\n", msg.Seq) } default: - m.disconnect("handler blocked") + m.disconnect("handler blocked", true) } } @@ -288,7 +325,7 @@ func (m *muxServer) unblockSend(seq uint32) { select { case m.outBlock <- struct{}{}: default: - logger.LogIf(m.ctx, errors.New("output unblocked overflow")) + gridLogIf(m.ctx, errors.New("output unblocked overflow")) } } @@ -307,7 +344,9 @@ func (m *muxServer) ping(seq uint32) pongMsg { } } -func (m *muxServer) disconnect(msg string) { +// disconnect will disconnect the mux. +// m.recvMu must be locked when calling this function. +func (m *muxServer) disconnect(msg string, locked bool) { if debugPrint { fmt.Println("Mux", m.ID, "disconnecting. Reason:", msg) } @@ -316,6 +355,11 @@ func (m *muxServer) disconnect(msg string) { } else { m.send(message{Op: OpDisconnectClientMux, MuxID: m.ID}) } + // Unlock, since we are calling deleteMux, which will call close - which will lock recvMu. + if locked { + m.recvMu.Unlock() + defer m.recvMu.Lock() + } m.parent.deleteMux(true, m.ID) } @@ -328,7 +372,7 @@ func (m *muxServer) send(msg message) { if debugPrint { fmt.Printf("Mux %d, Sending %+v\n", m.ID, msg) } - logger.LogIf(m.ctx, m.parent.queueMsg(msg, nil)) + gridLogIf(m.ctx, m.parent.queueMsg(msg, nil)) } func (m *muxServer) close() { @@ -344,6 +388,5 @@ func (m *muxServer) close() { if m.outBlock != nil { xioutil.SafeClose(m.outBlock) m.outBlock = nil - } } diff --git a/internal/grid/stream.go b/internal/grid/stream.go index 29e0cebf4db4e..cbc69c1ba1e39 100644 --- a/internal/grid/stream.go +++ b/internal/grid/stream.go @@ -89,12 +89,26 @@ func (s *Stream) Results(next func(b []byte) error) (err error) { return nil } if resp.Err != nil { + s.cancel(resp.Err) return resp.Err } err = next(resp.Msg) if err != nil { + s.cancel(err) return err } } } } + +// Done will return a channel that will be closed when the stream is done. +// This mirrors context.Done(). +func (s *Stream) Done() <-chan struct{} { + return s.ctx.Done() +} + +// Err will return the error that caused the stream to end. +// This mirrors context.Err(). +func (s *Stream) Err() error { + return s.ctx.Err() +} diff --git a/internal/grid/trace.go b/internal/grid/trace.go index a612d7dcec553..f8989390a235a 100644 --- a/internal/grid/trace.go +++ b/internal/grid/trace.go @@ -105,6 +105,7 @@ func (c *muxClient) traceRoundtrip(ctx context.Context, t *tracer, h HandlerID, Duration: end.Sub(start), Path: t.Subroute, Error: errString, + Bytes: int64(len(req) + len(resp)), HTTP: &madmin.TraceHTTPStats{ ReqInfo: madmin.TraceRequestInfo{ Time: start, diff --git a/internal/grid/types.go b/internal/grid/types.go index f60f29d24d3da..a16f475665b02 100644 --- a/internal/grid/types.go +++ b/internal/grid/types.go @@ -27,6 +27,7 @@ import ( "strings" "sync" + "github.com/minio/minio/internal/bpool" "github.com/tinylib/msgp/msgp" ) @@ -53,7 +54,7 @@ func (m *MSS) Get(key string) string { // Set a key, value pair. func (m *MSS) Set(key, value string) { if m == nil { - *m = mssPool.Get().(map[string]string) + *m = mssPool.Get() } (*m)[key] = value } @@ -73,7 +74,7 @@ func (m *MSS) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, "Values") - return + return o, err } dst := *m if dst == nil { @@ -90,12 +91,12 @@ func (m *MSS) UnmarshalMsg(bts []byte) (o []byte, err error) { za0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Values") - return + return o, err } za0002, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, "Values", za0001) - return + return o, err } dst[za0001] = za0002 } @@ -130,10 +131,8 @@ func (m *MSS) Msgsize() int { // NewMSS returns a new MSS. func NewMSS() *MSS { - m := MSS(mssPool.Get().(map[string]string)) - for k := range m { - delete(m, k) - } + m := MSS(mssPool.Get()) + clear(m) return &m } @@ -143,8 +142,8 @@ func NewMSSWith(m map[string]string) *MSS { return &m2 } -var mssPool = sync.Pool{ - New: func() interface{} { +var mssPool = bpool.Pool[map[string]string]{ + New: func() map[string]string { return make(map[string]string, 5) }, } @@ -152,7 +151,7 @@ var mssPool = sync.Pool{ // Recycle the underlying map. func (m *MSS) Recycle() { if m != nil && *m != nil { - mssPool.Put(map[string]string(*m)) + mssPool.Put(*m) *m = nil } } @@ -189,6 +188,12 @@ func NewBytes() *Bytes { return &b } +// NewBytesCap returns an empty Bytes with the given capacity. +func NewBytesCap(size int) *Bytes { + b := Bytes(GetByteBufferCap(size)) + return &b +} + // NewBytesWith returns a new Bytes with the provided content. // When sent as a parameter, the caller gives up ownership of the byte slice. // When returned as response, the handler also gives up ownership of the byte slice. @@ -203,14 +208,9 @@ func NewBytesWithCopyOf(b []byte) *Bytes { bb := Bytes(nil) return &bb } - if len(b) < maxBufferSize { - bb := NewBytes() - *bb = append(*bb, b...) - return bb - } - bb := Bytes(make([]byte, len(b))) - copy(bb, b) - return &bb + bb := NewBytesCap(len(b)) + *bb = append(*bb, b...) + return bb } // Bytes provides a byte slice that can be serialized. @@ -238,7 +238,7 @@ func (b *Bytes) UnmarshalMsg(bytes []byte) ([]byte, error) { copy(*b, val) } else { if cap(*b) == 0 && len(val) <= maxBufferSize { - *b = GetByteBuffer()[:0] + *b = GetByteBufferCap(len(val)) } else { PutByteBuffer(*b) *b = make([]byte, 0, len(val)) @@ -278,15 +278,15 @@ func (b *Bytes) Recycle() { // URLValues can be used for url.Values. type URLValues map[string][]string -var urlValuesPool = sync.Pool{ - New: func() interface{} { +var urlValuesPool = bpool.Pool[map[string][]string]{ + New: func() map[string][]string { return make(map[string][]string, 10) }, } // NewURLValues returns a new URLValues. func NewURLValues() *URLValues { - u := URLValues(urlValuesPool.Get().(map[string][]string)) + u := URLValues(urlValuesPool.Get()) return &u } @@ -329,7 +329,7 @@ func (u URLValues) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.AppendString(o, zb0007[zb0008]) } } - return + return o, err } // UnmarshalMsg implements msgp.Unmarshaler @@ -338,10 +338,10 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } if *u == nil { - *u = urlValuesPool.Get().(map[string][]string) + *u = urlValuesPool.Get() } if len(*u) > 0 { for key := range *u { @@ -356,13 +356,13 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0001, bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err) - return + return o, err } var zb0005 uint32 zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) if err != nil { err = msgp.WrapError(err, zb0001) - return + return o, err } if cap(zb0002) >= int(zb0005) { zb0002 = zb0002[:zb0005] @@ -373,28 +373,27 @@ func (u *URLValues) UnmarshalMsg(bts []byte) (o []byte, err error) { zb0002[zb0003], bts, err = msgp.ReadStringBytes(bts) if err != nil { err = msgp.WrapError(err, zb0001, zb0003) - return + return o, err } } (*u)[zb0001] = zb0002 } o = bts - return + return o, err } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (u URLValues) Msgsize() (s int) { s = msgp.MapHeaderSize - if u != nil { - for zb0006, zb0007 := range u { - _ = zb0007 - s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize - for zb0008 := range zb0007 { - s += msgp.StringPrefixSize + len(zb0007[zb0008]) - } + for zb0006, zb0007 := range u { + _ = zb0007 + s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize + for zb0008 := range zb0007 { + s += msgp.StringPrefixSize + len(zb0007[zb0008]) } } - return + + return s } // JSONPool is a pool for JSON objects that unmarshal into T. @@ -412,7 +411,7 @@ func NewJSONPool[T any]() *JSONPool[T] { } return &JSONPool[T]{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { var t T return &t }, @@ -423,9 +422,11 @@ func NewJSONPool[T any]() *JSONPool[T] { func (p *JSONPool[T]) new() *T { var zero T - t := p.pool.Get().(*T) - *t = zero - return t + if t, ok := p.pool.Get().(*T); ok { + *t = zero + return t + } + return &zero } // JSON is a wrapper around a T object that can be serialized. @@ -556,15 +557,15 @@ func (NoPayload) Recycle() {} // ArrayOf wraps an array of Messagepack compatible objects. type ArrayOf[T RoundTripper] struct { - aPool sync.Pool // Arrays - ePool sync.Pool // Elements + aPool sync.Pool // Arrays + ePool bpool.Pool[T] // Elements } // NewArrayOf returns a new ArrayOf. // You must provide a function that returns a new instance of T. func NewArrayOf[T RoundTripper](newFn func() T) *ArrayOf[T] { return &ArrayOf[T]{ - ePool: sync.Pool{New: func() any { + ePool: bpool.Pool[T]{New: func() T { return newFn() }}, } @@ -597,6 +598,7 @@ func (p *ArrayOf[T]) newA(sz uint32) []T { func (p *ArrayOf[T]) putA(v []T) { var zero T // nil for i, t := range v { + //nolint:staticcheck // SA6002 IT IS A GENERIC VALUE! p.ePool.Put(t) v[i] = zero } @@ -607,7 +609,7 @@ func (p *ArrayOf[T]) putA(v []T) { } func (p *ArrayOf[T]) newE() T { - return p.ePool.Get().(T) + return p.ePool.Get() } // Array provides a wrapper for an underlying array of serializable objects. @@ -698,7 +700,7 @@ func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) { } else { j.val = j.val[:0] } - for i := uint32(0); i < l; i++ { + for range l { v := j.p.newE() bytes, err = v.UnmarshalMsg(bytes) if err != nil { diff --git a/internal/grid/types_test.go b/internal/grid/types_test.go index 43899de28ddb7..f60fecd171be1 100644 --- a/internal/grid/types_test.go +++ b/internal/grid/types_test.go @@ -81,8 +81,8 @@ func TestMarshalUnmarshalMSSNil(t *testing.T) { func BenchmarkMarshalMsgMSS(b *testing.B) { v := MSS{"abc": "def", "ghi": "jkl"} b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { v.MarshalMsg(nil) } } @@ -93,8 +93,8 @@ func BenchmarkAppendMsgMSS(b *testing.B) { bts, _ = v.MarshalMsg(bts[0:0]) b.SetBytes(int64(len(bts))) b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { bts, _ = v.MarshalMsg(bts[0:0]) } } @@ -104,8 +104,8 @@ func BenchmarkUnmarshalMSS(b *testing.B) { bts, _ := v.MarshalMsg(nil) b.ReportAllocs() b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { + + for b.Loop() { _, err := v.UnmarshalMsg(bts) if err != nil { b.Fatal(err) diff --git a/internal/handlers/forwarder.go b/internal/handlers/forwarder.go index 38bc58b22642f..fd36a6bead8fe 100644 --- a/internal/handlers/forwarder.go +++ b/internal/handlers/forwarder.go @@ -24,8 +24,9 @@ import ( "net/http/httputil" "net/url" "strings" - "sync" "time" + + "github.com/minio/minio/internal/bpool" ) const defaultFlushInterval = time.Duration(100) * time.Millisecond @@ -53,7 +54,7 @@ func NewForwarder(f *Forwarder) *Forwarder { type bufPool struct { sz int - pool sync.Pool + pool bpool.Pool[*[]byte] } func (b *bufPool) Put(buf []byte) { @@ -66,13 +67,16 @@ func (b *bufPool) Put(buf []byte) { } func (b *bufPool) Get() []byte { - bufp := b.pool.Get().(*[]byte) + bufp := b.pool.Get() + if bufp == nil || cap(*bufp) < b.sz { + return make([]byte, 0, b.sz) + } return (*bufp)[:b.sz] } func newBufPool(sz int) httputil.BufferPool { - return &bufPool{sz: sz, pool: sync.Pool{ - New: func() interface{} { + return &bufPool{sz: sz, pool: bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, sz) return &buf }, diff --git a/internal/handlers/proxy.go b/internal/handlers/proxy.go index 4e5dc966b1b88..f095b1fdb2bda 100644 --- a/internal/handlers/proxy.go +++ b/internal/handlers/proxy.go @@ -26,6 +26,9 @@ import ( "net/http" "regexp" "strings" + + "github.com/minio/minio/internal/config" + "github.com/minio/pkg/v3/env" ) var ( @@ -51,6 +54,9 @@ var ( protoRegex = regexp.MustCompile(`(?i)^(;|,| )+(?:proto=)(https|http)`) ) +// Used to disable all processing of the X-Forwarded-For header in source IP discovery. +var enableXFFHeader = env.Get("_MINIO_API_XFF_HEADER", config.EnableOn) == config.EnableOn + // GetSourceScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 // Forwarded headers (in that order). func GetSourceScheme(r *http.Request) string { @@ -84,29 +90,35 @@ func GetSourceScheme(r *http.Request) string { func GetSourceIPFromHeaders(r *http.Request) string { var addr string - if fwd := r.Header.Get(xForwardedFor); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ", ") - if s == -1 { - s = len(fwd) + if enableXFFHeader { + if fwd := r.Header.Get(xForwardedFor); fwd != "" { + // Only grab the first (client) address. Note that '192.168.0.1, + // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after + // the first may represent forwarding proxies earlier in the chain. + s := strings.Index(fwd, ", ") + if s == -1 { + s = len(fwd) + } + addr = fwd[:s] } - addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) + } + + if addr == "" { + if fwd := r.Header.Get(xRealIP); fwd != "" { + // X-Real-IP should only contain one IP address (the client making the + // request). + addr = fwd + } else if fwd := r.Header.Get(forwarded); fwd != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'for=' capture, which we ignore. In the case of multiple IP + // addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only + // extract the first, which should be the client IP. + if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + // IPv6 addresses in Forwarded headers are quoted-strings. We strip + // these quotes. + addr = strings.Trim(match[1], `"`) + } } } diff --git a/internal/handlers/proxy_test.go b/internal/handlers/proxy_test.go index ccd415df2d692..952e757451dc0 100644 --- a/internal/handlers/proxy_test.go +++ b/internal/handlers/proxy_test.go @@ -84,3 +84,27 @@ func TestGetSourceIP(t *testing.T) { } } } + +func TestXFFDisabled(t *testing.T) { + req := &http.Request{ + Header: http.Header{ + xForwardedFor: []string{"8.8.8.8"}, + xRealIP: []string{"1.1.1.1"}, + }, + } + // When X-Forwarded-For and X-Real-IP headers are both present, X-Forwarded-For takes precedence. + res := GetSourceIP(req) + if res != "8.8.8.8" { + t.Errorf("wrong header, xff takes precedence: got %s, want: 8.8.8.8", res) + } + + // When explicitly disabled, the XFF header is ignored and X-Real-IP is used. + enableXFFHeader = false + defer func() { + enableXFFHeader = true + }() + res = GetSourceIP(req) + if res != "1.1.1.1" { + t.Errorf("wrong header, xff is disabled: got %s, want: 1.1.1.1", res) + } +} diff --git a/internal/hash/checker.go b/internal/hash/checker.go new file mode 100644 index 0000000000000..df7a8918978f0 --- /dev/null +++ b/internal/hash/checker.go @@ -0,0 +1,70 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hash + +import ( + "bytes" + "errors" + "hash" + "io" + + "github.com/minio/minio/internal/ioutil" +) + +// Checker allows to verify the checksum of a reader. +type Checker struct { + c io.Closer + r io.Reader + h hash.Hash + + want []byte +} + +// NewChecker ensures that content with the specified length is read from rc. +// Calling Close on this will close upstream. +func NewChecker(rc io.ReadCloser, h hash.Hash, wantSum []byte, length int64) *Checker { + return &Checker{c: rc, r: ioutil.HardLimitReader(rc, length), h: h, want: wantSum} +} + +// Read satisfies io.Reader +func (c Checker) Read(p []byte) (n int, err error) { + n, err = c.r.Read(p) + if n > 0 { + c.h.Write(p[:n]) + } + if errors.Is(err, io.EOF) { + got := c.h.Sum(nil) + if !bytes.Equal(got, c.want) { + return n, ErrInvalidChecksum + } + return n, err + } + return n, err +} + +// Close satisfies io.Closer +func (c Checker) Close() error { + err := c.c.Close() + if err == nil { + got := c.h.Sum(nil) + if !bytes.Equal(got, c.want) { + return ErrInvalidChecksum + } + } + return err +} diff --git a/internal/hash/checksum.go b/internal/hash/checksum.go index ddf21947ace7f..fbf62fd84ebed 100644 --- a/internal/hash/checksum.go +++ b/internal/hash/checksum.go @@ -26,7 +26,9 @@ import ( "fmt" "hash" "hash/crc32" + "hash/crc64" "net/http" + "strconv" "strings" "github.com/minio/minio/internal/hash/sha256" @@ -34,9 +36,16 @@ import ( "github.com/minio/minio/internal/logger" ) +func hashLogIf(ctx context.Context, err error) { + logger.LogIf(ctx, "hash", err) +} + // MinIOMultipartChecksum is as metadata on multipart uploads to indicate checksum type. const MinIOMultipartChecksum = "x-minio-multipart-checksum" +// MinIOMultipartChecksumType is as metadata on multipart uploads to indicate checksum type. +const MinIOMultipartChecksumType = "x-minio-multipart-checksum-type" + // ChecksumType contains information about the checksum type. type ChecksumType uint32 @@ -60,16 +69,27 @@ const ( ChecksumMultipart // ChecksumIncludesMultipart indicates the checksum also contains part checksums. ChecksumIncludesMultipart + // ChecksumCRC64NVME indicates CRC64 with 0xad93d23594c93659 polynomial. + ChecksumCRC64NVME + // ChecksumFullObject indicates the checksum is of the full object, + // not checksum of checksums. Should only be set on ChecksumMultipart + ChecksumFullObject // ChecksumNone indicates no checksum. ChecksumNone ChecksumType = 0 + + baseTypeMask = ChecksumSHA256 | ChecksumSHA1 | ChecksumCRC32 | ChecksumCRC32C | ChecksumCRC64NVME ) +// BaseChecksumTypes is a list of all the base checksum types. +var BaseChecksumTypes = []ChecksumType{ChecksumSHA256, ChecksumSHA1, ChecksumCRC32, ChecksumCRC64NVME, ChecksumCRC32C} + // Checksum is a type and base 64 encoded value. type Checksum struct { - Type ChecksumType - Encoded string - Raw []byte + Type ChecksumType + Encoded string + Raw []byte + WantParts int } // Is returns if c is all of t. @@ -80,6 +100,11 @@ func (c ChecksumType) Is(t ChecksumType) bool { return c&t == t } +// Base returns the base checksum (if any) +func (c ChecksumType) Base() ChecksumType { + return c & baseTypeMask +} + // Key returns the header key. // returns empty string if invalid or none. func (c ChecksumType) Key() string { @@ -92,6 +117,8 @@ func (c ChecksumType) Key() string { return xhttp.AmzChecksumSHA1 case c.Is(ChecksumSHA256): return xhttp.AmzChecksumSHA256 + case c.Is(ChecksumCRC64NVME): + return xhttp.AmzChecksumCRC64NVME } return "" } @@ -107,17 +134,19 @@ func (c ChecksumType) RawByteLen() int { return sha1.Size case c.Is(ChecksumSHA256): return sha256.Size + case c.Is(ChecksumCRC64NVME): + return crc64.Size } return 0 } // IsSet returns whether the type is valid and known. func (c ChecksumType) IsSet() bool { - return !c.Is(ChecksumInvalid) && !c.Is(ChecksumNone) + return !c.Is(ChecksumInvalid) && !c.Base().Is(ChecksumNone) } -// NewChecksumType returns a checksum type based on the algorithm string. -func NewChecksumType(alg string) ChecksumType { +// ChecksumStringToType is like NewChecksumType but without the `mode` +func ChecksumStringToType(alg string) ChecksumType { switch strings.ToUpper(alg) { case "CRC32": return ChecksumCRC32 @@ -127,12 +156,58 @@ func NewChecksumType(alg string) ChecksumType { return ChecksumSHA1 case "SHA256": return ChecksumSHA256 + case "CRC64NVME": + // AWS seems to ignore full value, and just assume it. + return ChecksumCRC64NVME + case "": + return ChecksumNone + } + return ChecksumInvalid +} + +// NewChecksumType returns a checksum type based on the algorithm string and obj type. +func NewChecksumType(alg, objType string) ChecksumType { + full := ChecksumFullObject + switch objType { + case xhttp.AmzChecksumTypeFullObject: + case xhttp.AmzChecksumTypeComposite, "": + full = 0 + default: + return ChecksumInvalid + } + + switch strings.ToUpper(alg) { + case "CRC32": + return ChecksumCRC32 | full + case "CRC32C": + return ChecksumCRC32C | full + case "SHA1": + if full != 0 { + return ChecksumInvalid + } + return ChecksumSHA1 + case "SHA256": + if full != 0 { + return ChecksumInvalid + } + return ChecksumSHA256 + case "CRC64NVME": + // AWS seems to ignore full value, and just assume it. + return ChecksumCRC64NVME case "": + if full != 0 { + return ChecksumInvalid + } return ChecksumNone } return ChecksumInvalid } +// NewChecksumHeader returns a checksum type based on the algorithm string. +func NewChecksumHeader(h http.Header) ChecksumType { + return NewChecksumType(h.Get(xhttp.AmzChecksumAlgo), h.Get(xhttp.AmzChecksumType)) +} + // String returns the type as a string. func (c ChecksumType) String() string { switch { @@ -144,12 +219,64 @@ func (c ChecksumType) String() string { return "SHA1" case c.Is(ChecksumSHA256): return "SHA256" + case c.Is(ChecksumCRC64NVME): + return "CRC64NVME" case c.Is(ChecksumNone): return "" } return "invalid" } +// StringFull returns the type and all flags as a string. +func (c ChecksumType) StringFull() string { + out := []string{c.String()} + if c.Is(ChecksumMultipart) { + out = append(out, "MULTIPART") + } + if c.Is(ChecksumIncludesMultipart) { + out = append(out, "INCLUDESMP") + } + if c.Is(ChecksumTrailing) { + out = append(out, "TRAILING") + } + if c.Is(ChecksumFullObject) { + out = append(out, "FULLOBJ") + } + return strings.Join(out, "|") +} + +// FullObjectRequested will return if the checksum type indicates full object checksum was requested. +func (c ChecksumType) FullObjectRequested() bool { + return c&(ChecksumFullObject) == ChecksumFullObject || c.Is(ChecksumCRC64NVME) +} + +// IsMultipartComposite returns true if the checksum is multipart and full object was not requested. +func (c ChecksumType) IsMultipartComposite() bool { + return c.Is(ChecksumMultipart) && !c.FullObjectRequested() +} + +// ObjType returns a string to return as x-amz-checksum-type. +func (c ChecksumType) ObjType() string { + if c.FullObjectRequested() { + return xhttp.AmzChecksumTypeFullObject + } + if c.IsMultipartComposite() { + return xhttp.AmzChecksumTypeComposite + } + if !c.Is(ChecksumMultipart) { + return xhttp.AmzChecksumTypeFullObject + } + if c.IsSet() { + return xhttp.AmzChecksumTypeComposite + } + return "" +} + +// CanMerge will return if the checksum type indicates that checksums can be merged. +func (c ChecksumType) CanMerge() bool { + return c.Is(ChecksumCRC64NVME) || c.Is(ChecksumCRC32C) || c.Is(ChecksumCRC32) +} + // Hasher returns a hasher corresponding to the checksum type. // Returns nil if no checksum. func (c ChecksumType) Hasher() hash.Hash { @@ -162,6 +289,8 @@ func (c ChecksumType) Hasher() hash.Hash { return sha1.New() case c.Is(ChecksumSHA256): return sha256.New() + case c.Is(ChecksumCRC64NVME): + return crc64.New(crc64Table) } return nil } @@ -171,7 +300,7 @@ func (c ChecksumType) Trailing() bool { return c.Is(ChecksumTrailing) } -// NewChecksumFromData returns a new checksum from specified algorithm and base64 encoded value. +// NewChecksumFromData returns a new Checksum, using specified algorithm type on data. func NewChecksumFromData(t ChecksumType, data []byte) *Checksum { if !t.IsSet() { return nil @@ -187,7 +316,8 @@ func NewChecksumFromData(t ChecksumType, data []byte) *Checksum { } // ReadCheckSums will read checksums from b and return them. -func ReadCheckSums(b []byte, part int) map[string]string { +// Returns whether this is (part of) a multipart checksum. +func ReadCheckSums(b []byte, part int) (cs map[string]string, isMP bool) { res := make(map[string]string, 1) for len(b) > 0 { t, n := binary.Uvarint(b) @@ -201,14 +331,18 @@ func ReadCheckSums(b []byte, part int) map[string]string { if length == 0 || len(b) < length { break } + cs := base64.StdEncoding.EncodeToString(b[:length]) b = b[length:] if typ.Is(ChecksumMultipart) { + isMP = true t, n := binary.Uvarint(b) if n < 0 { break } - cs = fmt.Sprintf("%s-%d", cs, t) + if !typ.FullObjectRequested() { + cs = fmt.Sprintf("%s-%d", cs, t) + } b = b[n:] if part > 0 { cs = "" @@ -232,12 +366,15 @@ func ReadCheckSums(b []byte, part int) map[string]string { } if cs != "" { res[typ.String()] = cs + if ckType := typ.ObjType(); ckType != "" { + res[xhttp.AmzChecksumType] = ckType + } } } if len(res) == 0 { res = nil } - return res + return res, isMP } // ReadPartCheckSums will read all part checksums from b and return them. @@ -256,13 +393,14 @@ func ReadPartCheckSums(b []byte) (res []map[string]string) { } // Skip main checksum b = b[length:] - if !typ.Is(ChecksumIncludesMultipart) { - continue - } parts, n := binary.Uvarint(b) if n <= 0 { break } + if !typ.Is(ChecksumIncludesMultipart) { + continue + } + if len(res) == 0 { res = make([]map[string]string, parts) } @@ -288,11 +426,25 @@ func NewChecksumWithType(alg ChecksumType, value string) *Checksum { if !alg.IsSet() { return nil } + wantParts := 0 + if strings.ContainsRune(value, '-') { + valSplit := strings.Split(value, "-") + if len(valSplit) != 2 { + return nil + } + value = valSplit[0] + nParts, err := strconv.Atoi(valSplit[1]) + if err != nil { + return nil + } + alg |= ChecksumMultipart + wantParts = nParts + } bvalue, err := base64.StdEncoding.DecodeString(value) if err != nil { return nil } - c := Checksum{Type: alg, Encoded: value, Raw: bvalue} + c := Checksum{Type: alg, Encoded: value, Raw: bvalue, WantParts: wantParts} if !c.Valid() { return nil } @@ -301,7 +453,7 @@ func NewChecksumWithType(alg ChecksumType, value string) *Checksum { // NewChecksumString returns a new checksum from specified algorithm and base64 encoded value. func NewChecksumString(alg, value string) *Checksum { - return NewChecksumWithType(NewChecksumType(alg), value) + return NewChecksumWithType(NewChecksumType(alg, ""), value) } // AppendTo will append the checksum to b. @@ -314,6 +466,10 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte { var tmp [binary.MaxVarintLen32]byte n := binary.PutUvarint(tmp[:], uint64(c.Type)) crc := c.Raw + if c.Type.Trailing() { + // When we serialize we don't care if it was trailing. + c.Type ^= ChecksumTrailing + } if len(crc) != c.Type.RawByteLen() { return b } @@ -321,12 +477,15 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte { b = append(b, crc...) if c.Type.Is(ChecksumMultipart) { var checksums int + if c.WantParts > 0 && !c.Type.Is(ChecksumIncludesMultipart) { + checksums = c.WantParts + } // Ensure we don't divide by 0: if c.Type.RawByteLen() == 0 || len(parts)%c.Type.RawByteLen() != 0 { - logger.LogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen())) + hashLogIf(context.Background(), fmt.Errorf("internal error: Unexpected checksum length: %d, each checksum %d", len(parts), c.Type.RawByteLen())) checksums = 0 parts = nil - } else { + } else if len(parts) > 0 { checksums = len(parts) / c.Type.RawByteLen() } if !c.Type.Is(ChecksumIncludesMultipart) { @@ -341,6 +500,65 @@ func (c *Checksum) AppendTo(b []byte, parts []byte) []byte { return b } +// ChecksumFromBytes reconstructs a Checksum struct from the serialized bytes created in AppendTo() +// Returns nil if the bytes are invalid or empty. +// AppendTo() can append a serialized Checksum to another already-serialized Checksum, +// however, in practice, we only use one at a time. +// ChecksumFromBytes only returns the first one and no part checksums. +func ChecksumFromBytes(b []byte) *Checksum { + if len(b) == 0 { + return nil + } + + // Read checksum type + t, n := binary.Uvarint(b) + if n <= 0 { + return nil + } + b = b[n:] + + typ := ChecksumType(t) + length := typ.RawByteLen() + if length == 0 || len(b) < length { + return nil + } + + // Read raw checksum bytes + raw := make([]byte, length) + copy(raw, b[:length]) + b = b[length:] + + c := &Checksum{ + Type: typ, + Raw: raw, + Encoded: base64.StdEncoding.EncodeToString(raw), + } + + // Handle multipart checksums + if typ.Is(ChecksumMultipart) { + parts, n := binary.Uvarint(b) + if n <= 0 { + return nil + } + b = b[n:] + + c.WantParts = int(parts) + + if typ.Is(ChecksumIncludesMultipart) { + wantLen := int(parts) * length + if len(b) < wantLen { + return nil + } + } + } + + if !c.Valid() { + return nil + } + + return c +} + // Valid returns whether checksum is valid. func (c Checksum) Valid() bool { if c.Type == ChecksumInvalid { @@ -349,12 +567,11 @@ func (c Checksum) Valid() bool { if len(c.Encoded) == 0 || c.Type.Trailing() { return c.Type.Is(ChecksumNone) || c.Type.Trailing() } - raw := c.Raw - return c.Type.RawByteLen() == len(raw) + return c.Type.RawByteLen() == len(c.Raw) } // Matches returns whether given content matches c. -func (c Checksum) Matches(content []byte) error { +func (c Checksum) Matches(content []byte, parts int) error { if len(c.Encoded) == 0 { return nil } @@ -364,6 +581,13 @@ func (c Checksum) Matches(content []byte) error { return err } sum := hasher.Sum(nil) + if c.WantParts > 0 && c.WantParts != parts { + return ChecksumMismatch{ + Want: fmt.Sprintf("%s-%d", c.Encoded, c.WantParts), + Got: fmt.Sprintf("%s-%d", base64.StdEncoding.EncodeToString(sum), parts), + } + } + if !bytes.Equal(sum, c.Raw) { return ChecksumMismatch{ Want: c.Encoded, @@ -373,12 +597,26 @@ func (c Checksum) Matches(content []byte) error { return nil } -// AsMap returns the +// AsMap returns the checksum as a map[string]string. func (c *Checksum) AsMap() map[string]string { if c == nil || !c.Valid() { return nil } - return map[string]string{c.Type.String(): c.Encoded} + return map[string]string{ + c.Type.String(): c.Encoded, + xhttp.AmzChecksumType: c.Type.ObjType(), + } +} + +// Equal returns whether two checksum structs are equal in all their fields. +func (c *Checksum) Equal(s *Checksum) bool { + if c == nil || s == nil { + return c == s + } + return c.Type == s.Type && + c.Encoded == s.Encoded && + bytes.Equal(c.Raw, s.Raw) && + c.WantParts == s.WantParts } // TransferChecksumHeader will transfer any checksum value that has been checked. @@ -405,6 +643,10 @@ func TransferChecksumHeader(w http.ResponseWriter, r *http.Request) { // AddChecksumHeader will transfer any checksum value that has been checked. func AddChecksumHeader(w http.ResponseWriter, c map[string]string) { for k, v := range c { + if k == xhttp.AmzChecksumType { + w.Header().Set(xhttp.AmzChecksumType, v) + continue + } cksum := NewChecksumString(k, v) if cksum == nil { continue @@ -423,25 +665,27 @@ func GetContentChecksum(h http.Header) (*Checksum, error) { var res *Checksum for _, header := range trailing { var duplicates bool - switch { - case strings.EqualFold(header, ChecksumCRC32C.Key()): - duplicates = res != nil - res = NewChecksumWithType(ChecksumCRC32C|ChecksumTrailing, "") - case strings.EqualFold(header, ChecksumCRC32.Key()): - duplicates = res != nil - res = NewChecksumWithType(ChecksumCRC32|ChecksumTrailing, "") - case strings.EqualFold(header, ChecksumSHA256.Key()): - duplicates = res != nil - res = NewChecksumWithType(ChecksumSHA256|ChecksumTrailing, "") - case strings.EqualFold(header, ChecksumSHA1.Key()): - duplicates = res != nil - res = NewChecksumWithType(ChecksumSHA1|ChecksumTrailing, "") + for _, t := range BaseChecksumTypes { + if strings.EqualFold(t.Key(), header) { + duplicates = res != nil + res = NewChecksumWithType(t|ChecksumTrailing, "") + } } if duplicates { return nil, ErrInvalidChecksum } } if res != nil { + switch h.Get(xhttp.AmzChecksumType) { + case xhttp.AmzChecksumTypeFullObject: + if !res.Type.CanMerge() { + return nil, ErrInvalidChecksum + } + res.Type |= ChecksumFullObject + case xhttp.AmzChecksumTypeComposite, "": + default: + return nil, ErrInvalidChecksum + } return res, nil } } @@ -465,7 +709,13 @@ func getContentChecksum(h http.Header) (t ChecksumType, s string) { t = ChecksumNone alg := h.Get(xhttp.AmzChecksumAlgo) if alg != "" { - t |= NewChecksumType(alg) + t |= NewChecksumHeader(h) + if h.Get(xhttp.AmzChecksumType) == xhttp.AmzChecksumTypeFullObject { + if !t.CanMerge() { + return ChecksumInvalid, "" + } + t |= ChecksumFullObject + } if t.IsSet() { hdr := t.Key() if s = h.Get(hdr); s == "" { @@ -484,12 +734,19 @@ func getContentChecksum(h http.Header) (t ChecksumType, s string) { t = c s = got } + if h.Get(xhttp.AmzChecksumType) == xhttp.AmzChecksumTypeFullObject { + if !t.CanMerge() { + t = ChecksumInvalid + s = "" + return + } + t |= ChecksumFullObject + } return } } - checkType(ChecksumCRC32) - checkType(ChecksumCRC32C) - checkType(ChecksumSHA1) - checkType(ChecksumSHA256) + for _, t := range BaseChecksumTypes { + checkType(t) + } return t, s } diff --git a/internal/hash/checksum_test.go b/internal/hash/checksum_test.go new file mode 100644 index 0000000000000..50480379580fd --- /dev/null +++ b/internal/hash/checksum_test.go @@ -0,0 +1,205 @@ +// Copyright (c) 2015-2025 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hash + +import ( + "net/http/httptest" + "testing" + + xhttp "github.com/minio/minio/internal/http" +) + +// TestChecksumAddToHeader tests that adding and retrieving a checksum on a header works +func TestChecksumAddToHeader(t *testing.T) { + tests := []struct { + name string + checksum ChecksumType + fullobj bool + wantErr bool + }{ + {"CRC32-composite", ChecksumCRC32, false, false}, + {"CRC32-full-object", ChecksumCRC32, true, false}, + {"CRC32C-composite", ChecksumCRC32C, false, false}, + {"CRC32C-full-object", ChecksumCRC32C, true, false}, + {"CRC64NVME-full-object", ChecksumCRC64NVME, false, false}, // CRC64NVME is always full object + {"ChecksumSHA1-composite", ChecksumSHA1, false, false}, + {"ChecksumSHA256-composite", ChecksumSHA256, false, false}, + {"ChecksumSHA1-full-object", ChecksumSHA1, true, true}, // SHA1 does not support full object + {"ChecksumSHA256-full-object", ChecksumSHA256, true, true}, // SHA256 does not support full object + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Skip invalid cases where SHA1 or SHA256 is used with full object + if (tt.checksum.Is(ChecksumSHA1) || tt.checksum.Is(ChecksumSHA256)) && tt.fullobj { + // Validate that NewChecksumType correctly marks these as invalid + alg := tt.checksum.String() + typ := NewChecksumType(alg, xhttp.AmzChecksumTypeFullObject) + if !typ.Is(ChecksumInvalid) { + t.Fatalf("Expected ChecksumInvalid for %s with full object, got %s", tt.name, typ.StringFull()) + } + return + } + myData := []byte("this-is-a-checksum-data-test") + chksm := NewChecksumFromData(tt.checksum, myData) + if chksm == nil { + t.Fatalf("NewChecksumFromData failed for %s", tt.name) + } + if tt.fullobj { + chksm.Type |= ChecksumFullObject + } + + // CRC64NVME is always full object + if chksm.Type.Base().Is(ChecksumCRC64NVME) { + chksm.Type |= ChecksumFullObject + } + + // Prepare the checksum map with appropriate headers + m := chksm.AsMap() + m[xhttp.AmzChecksumAlgo] = chksm.Type.String() // Set the algorithm explicitly + if chksm.Type.FullObjectRequested() { + m[xhttp.AmzChecksumType] = xhttp.AmzChecksumTypeFullObject + } else { + m[xhttp.AmzChecksumType] = xhttp.AmzChecksumTypeComposite + } + + w := httptest.NewRecorder() + AddChecksumHeader(w, m) + gotChksm, err := GetContentChecksum(w.Result().Header) + if tt.wantErr { + if err == nil { + t.Fatalf("Expected error for %s, got none", tt.name) + } + return + } + if err != nil { + t.Fatalf("GetContentChecksum failed for %s: %v", tt.name, err) + } + + if gotChksm == nil { + t.Fatalf("Got nil checksum for %s", tt.name) + } + // Compare the full checksum structs + if !chksm.Equal(gotChksm) { + t.Errorf("Checksum mismatch for %s: expected %+v, got %+v", tt.name, chksm, gotChksm) + } + // Verify the checksum type + expectedType := chksm.Type + if gotChksm.Type != expectedType { + t.Errorf("Type mismatch for %s: expected %s, got %s", tt.name, expectedType.StringFull(), gotChksm.Type.StringFull()) + } + }) + } +} + +// TestChecksumSerializeDeserialize checks AppendTo can be reversed by ChecksumFromBytes +func TestChecksumSerializeDeserialize(t *testing.T) { + myData := []byte("this-is-a-checksum-data-test") + chksm := NewChecksumFromData(ChecksumCRC32, myData) + if chksm == nil { + t.Fatal("NewChecksumFromData returned nil") + } + // Serialize the checksum to bytes + b := chksm.AppendTo(nil, nil) + if b == nil { + t.Fatal("AppendTo returned nil") + } + + // Deserialize the checksum from bytes + chksmOut := ChecksumFromBytes(b) + if chksmOut == nil { + t.Fatal("ChecksumFromBytes returned nil") + } + + // Assert new checksum matches the content + matchError := chksmOut.Matches(myData, 0) + if matchError != nil { + t.Fatalf("Checksum mismatch on chksmOut: %v", matchError) + } + + // Assert they are exactly equal + if !chksmOut.Equal(chksm) { + t.Fatalf("Checksum mismatch: expected %+v, got %+v", chksm, chksmOut) + } +} + +// TestChecksumSerializeDeserializeMultiPart checks AppendTo can be reversed by ChecksumFromBytes +// for multipart checksum +func TestChecksumSerializeDeserializeMultiPart(t *testing.T) { + // Create dummy data that we'll split into 3 parts + dummyData := []byte("The quick brown fox jumps over the lazy dog. " + + "Pack my box with five dozen brown eggs. " + + "Have another go it will all make sense in the end!") + + // Split data into 3 parts + partSize := len(dummyData) / 3 + part1Data := dummyData[0:partSize] + part2Data := dummyData[partSize : 2*partSize] + part3Data := dummyData[2*partSize:] + + // Calculate CRC32C checksum for each part using NewChecksumFromData + checksumType := ChecksumCRC32C + + part1Checksum := NewChecksumFromData(checksumType, part1Data) + part2Checksum := NewChecksumFromData(checksumType, part2Data) + part3Checksum := NewChecksumFromData(checksumType, part3Data) + + // Combine the raw checksums (this is what happens in CompleteMultipartUpload) + var checksumCombined []byte + checksumCombined = append(checksumCombined, part1Checksum.Raw...) + checksumCombined = append(checksumCombined, part2Checksum.Raw...) + checksumCombined = append(checksumCombined, part3Checksum.Raw...) + + // Create the final checksum (checksum of the combined checksums) + // Add BOTH the multipart flag AND the includes-multipart flag + finalChecksumType := checksumType | ChecksumMultipart | ChecksumIncludesMultipart + finalChecksum := NewChecksumFromData(finalChecksumType, checksumCombined) + + // Set WantParts to indicate 3 parts + finalChecksum.WantParts = 3 + + // Test AppendTo serialization + var serialized []byte + serialized = finalChecksum.AppendTo(serialized, checksumCombined) + + // Use ChecksumFromBytes to deserialize the final checksum + chksmOut := ChecksumFromBytes(serialized) + if chksmOut == nil { + t.Fatal("ChecksumFromBytes returned nil") + } + + // Assert they are exactly equal + if !chksmOut.Equal(finalChecksum) { + t.Fatalf("Checksum mismatch: expected %+v, got %+v", finalChecksum, chksmOut) + } + + // Serialize what we got from ChecksumFromBytes + serializedOut := chksmOut.AppendTo(nil, checksumCombined) + + // Read part checksums from serializedOut + readParts := ReadPartCheckSums(serializedOut) + expectedChecksums := []string{ + part1Checksum.Encoded, + part2Checksum.Encoded, + part3Checksum.Encoded, + } + for i, expected := range expectedChecksums { + if got := readParts[i][ChecksumCRC32C.String()]; got != expected { + t.Fatalf("want part%dChecksum.Encoded %s, got %s", i+1, expected, got) + } + } +} diff --git a/internal/hash/crc.go b/internal/hash/crc.go new file mode 100644 index 0000000000000..6fc1d3ba81b70 --- /dev/null +++ b/internal/hash/crc.go @@ -0,0 +1,219 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package hash + +import ( + "encoding/base64" + "encoding/binary" + "fmt" + "hash/crc32" + "hash/crc64" + "math/bits" +) + +// AddPart will merge a part checksum into the current, +// as if the content of each was appended. +// The size of the content that produced the second checksum must be provided. +// Not all checksum types can be merged, use the CanMerge method to check. +// Checksum types must match. +func (c *Checksum) AddPart(other Checksum, size int64) error { + if !other.Type.CanMerge() { + return fmt.Errorf("checksum type cannot be merged") + } + if size == 0 { + return nil + } + if !c.Type.Is(other.Type.Base()) { + return fmt.Errorf("checksum type does not match got %s and %s", c.Type.String(), other.Type.String()) + } + // If never set, just add first checksum. + if len(c.Raw) == 0 { + c.Raw = other.Raw + c.Encoded = other.Encoded + return nil + } + if !c.Valid() { + return fmt.Errorf("invalid base checksum") + } + if !other.Valid() { + return fmt.Errorf("invalid part checksum") + } + + switch c.Type.Base() { + case ChecksumCRC32: + v := crc32Combine(crc32.IEEE, binary.BigEndian.Uint32(c.Raw), binary.BigEndian.Uint32(other.Raw), size) + binary.BigEndian.PutUint32(c.Raw, v) + case ChecksumCRC32C: + v := crc32Combine(crc32.Castagnoli, binary.BigEndian.Uint32(c.Raw), binary.BigEndian.Uint32(other.Raw), size) + binary.BigEndian.PutUint32(c.Raw, v) + case ChecksumCRC64NVME: + v := crc64Combine(bits.Reverse64(crc64NVMEPolynomial), binary.BigEndian.Uint64(c.Raw), binary.BigEndian.Uint64(other.Raw), size) + binary.BigEndian.PutUint64(c.Raw, v) + default: + return fmt.Errorf("unknown checksum type: %s", c.Type.String()) + } + c.Encoded = base64.StdEncoding.EncodeToString(c.Raw) + return nil +} + +const crc64NVMEPolynomial = 0xad93d23594c93659 + +var crc64Table = crc64.MakeTable(bits.Reverse64(crc64NVMEPolynomial)) + +// Following is ported from C to Go in 2016 by Justin Ruggles, with minimal alteration. +// Used uint for unsigned long. Used uint32 for input arguments in order to match +// the Go hash/crc32 package. zlib CRC32 combine (https://github.com/madler/zlib) +// Modified for hash/crc64 by Klaus Post, 2024. +func gf2MatrixTimes(mat []uint64, vec uint64) uint64 { + var sum uint64 + + for vec != 0 { + if vec&1 != 0 { + sum ^= mat[0] + } + vec >>= 1 + mat = mat[1:] + } + return sum +} + +func gf2MatrixSquare(square, mat []uint64) { + if len(square) != len(mat) { + panic("square matrix size mismatch") + } + for n := range mat { + square[n] = gf2MatrixTimes(mat, mat[n]) + } +} + +// crc32Combine returns the combined CRC-32 hash value of the two passed CRC-32 +// hash values crc1 and crc2. poly represents the generator polynomial +// and len2 specifies the byte length that the crc2 hash covers. +func crc32Combine(poly uint32, crc1, crc2 uint32, len2 int64) uint32 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 32) // even-power-of-two zeros operator + odd := make([]uint64, 32) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = uint64(poly) // CRC-32 polynomial + row := uint64(1) + for n := 1; n < 32; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := uint64(crc1) + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= uint64(crc2) + return uint32(crc1n) +} + +func crc64Combine(poly uint64, crc1, crc2 uint64, len2 int64) uint64 { + // degenerate case (also disallow negative lengths) + if len2 <= 0 { + return crc1 + } + + even := make([]uint64, 64) // even-power-of-two zeros operator + odd := make([]uint64, 64) // odd-power-of-two zeros operator + + // put operator for one zero bit in odd + odd[0] = poly // CRC-64 polynomial + row := uint64(1) + for n := 1; n < 64; n++ { + odd[n] = row + row <<= 1 + } + + // put operator for two zero bits in even + gf2MatrixSquare(even, odd) + + // put operator for four zero bits in odd + gf2MatrixSquare(odd, even) + + // apply len2 zeros to crc1 (first square will put the operator for one + // zero byte, eight zero bits, in even) + crc1n := crc1 + for { + // apply zeros operator for this bit of len2 + gf2MatrixSquare(even, odd) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(even, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + + // another iteration of the loop with odd and even swapped + gf2MatrixSquare(odd, even) + if len2&1 != 0 { + crc1n = gf2MatrixTimes(odd, crc1n) + } + len2 >>= 1 + + // if no more bits set, then done + if len2 == 0 { + break + } + } + + // return combined crc + crc1n ^= crc2 + return crc1n +} diff --git a/internal/hash/reader.go b/internal/hash/reader.go index f849a69f0fa53..ddae850a07c40 100644 --- a/internal/hash/reader.go +++ b/internal/hash/reader.go @@ -51,11 +51,18 @@ type Reader struct { checksum etag.ETag contentSHA256 []byte - // Content checksum + // Client-provided content checksum contentHash Checksum contentHasher hash.Hash disableMD5 bool + // Server side computed checksum. In some cases, like CopyObject, a new checksum + // needs to be computed and saved on the destination object, but the client + // does not provide it. Not calculated if client-side contentHash is set. + ServerSideChecksumType ChecksumType + ServerSideHasher hash.Hash + ServerSideChecksumResult *Checksum + trailer http.Header sha256 hash.Hash @@ -247,6 +254,16 @@ func (r *Reader) AddNonTrailingChecksum(cs *Checksum, ignoreValue bool) error { return nil } +// AddServerSideChecksumHasher adds a new hasher for computing the server-side checksum. +func (r *Reader) AddServerSideChecksumHasher(t ChecksumType) { + h := t.Hasher() + if h == nil { + return + } + r.ServerSideHasher = h + r.ServerSideChecksumType = t +} + func (r *Reader) Read(p []byte) (int, error) { n, err := r.src.Read(p) r.bytesRead += int64(n) @@ -255,6 +272,8 @@ func (r *Reader) Read(p []byte) (int, error) { } if r.contentHasher != nil { r.contentHasher.Write(p[:n]) + } else if r.ServerSideHasher != nil { + r.ServerSideHasher.Write(p[:n]) } if err == io.EOF { // Verify content SHA256, if set. @@ -293,6 +312,9 @@ func (r *Reader) Read(p []byte) (int, error) { } return n, err } + } else if r.ServerSideHasher != nil { + sum := r.ServerSideHasher.Sum(nil) + r.ServerSideChecksumResult = NewChecksumWithType(r.ServerSideChecksumType, base64.StdEncoding.EncodeToString(sum)) } } if err != nil && err != io.EOF { @@ -366,6 +388,14 @@ func (r *Reader) ContentCRC() map[string]string { return map[string]string{r.contentHash.Type.String(): r.contentHash.Encoded} } +// Checksum returns the content checksum if set. +func (r *Reader) Checksum() *Checksum { + if !r.contentHash.Type.IsSet() || !r.contentHash.Valid() { + return nil + } + return &r.contentHash +} + var _ io.Closer = (*Reader)(nil) // compiler check // Close and release resources. diff --git a/internal/hash/reader_test.go b/internal/hash/reader_test.go index 314efc57e24d7..a28ade5c30c60 100644 --- a/internal/hash/reader_test.go +++ b/internal/hash/reader_test.go @@ -19,7 +19,6 @@ package hash import ( "bytes" - "context" "encoding/base64" "encoding/hex" "fmt" @@ -31,7 +30,7 @@ import ( // Tests functions like Size(), MD5*(), SHA256*() func TestHashReaderHelperMethods(t *testing.T) { - r, err := NewReader(context.Background(), bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4) + r, err := NewReader(t.Context(), bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4) if err != nil { t.Fatal(err) } @@ -195,7 +194,7 @@ func TestHashReaderVerification(t *testing.T) { } for i, testCase := range testCases { t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - r, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) + r, err := NewReader(t.Context(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) if err != nil { t.Fatalf("Test %q: Initializing reader failed %s", testCase.desc, err) } @@ -214,7 +213,7 @@ func TestHashReaderVerification(t *testing.T) { } func mustReader(t *testing.T, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) *Reader { - r, err := NewReader(context.Background(), src, size, md5Hex, sha256Hex, actualSize) + r, err := NewReader(t.Context(), src, size, md5Hex, sha256Hex, actualSize) if err != nil { t.Fatal(err) } @@ -304,7 +303,7 @@ func TestHashReaderInvalidArguments(t *testing.T) { for i, testCase := range testCases { t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - _, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) + _, err := NewReader(t.Context(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) if err != nil && testCase.success { t.Errorf("Test %q: Expected success, but got error %s instead", testCase.desc, err) } diff --git a/internal/hash/sha256/sh256_nofips.go b/internal/hash/sha256/sh256.go similarity index 67% rename from internal/hash/sha256/sh256_nofips.go rename to internal/hash/sha256/sh256.go index 7068fa8d7849c..b25b1584b49fa 100644 --- a/internal/hash/sha256/sh256_nofips.go +++ b/internal/hash/sha256/sh256.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2022 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -15,23 +15,18 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -//go:build !fips -// +build !fips - package sha256 import ( + "crypto/sha256" "hash" - - nofipssha256 "github.com/minio/sha256-simd" ) -// New returns a new hash.Hash computing the SHA256 checksum. -// The SHA256 implementation is not FIPS 140-2 compliant. -func New() hash.Hash { return nofipssha256.New() } +// New initializes a new sha256.New() +func New() hash.Hash { return sha256.New() } // Sum256 returns the SHA256 checksum of the data. -func Sum256(data []byte) [nofipssha256.Size]byte { return nofipssha256.Sum256(data) } +func Sum256(data []byte) [sha256.Size]byte { return sha256.Sum256(data) } // Size is the size of a SHA256 checksum in bytes. -const Size = nofipssha256.Size +const Size = sha256.Size diff --git a/internal/hash/sha256/sh256_fips.go b/internal/hash/sha256/sh256_fips.go deleted file mode 100644 index db84606e9e02e..0000000000000 --- a/internal/hash/sha256/sh256_fips.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -//go:build fips -// +build fips - -package sha256 - -import ( - fipssha256 "crypto/sha256" - "hash" -) - -// New returns a new hash.Hash computing the SHA256 checksum. -// The SHA256 implementation is FIPS 140-2 compliant when the -// boringcrypto branch of Go is used. -// Ref: https://github.com/golang/go/tree/dev.boringcrypto -func New() hash.Hash { return fipssha256.New() } - -// Sum256 returns the SHA256 checksum of the data. -func Sum256(data []byte) [fipssha256.Size]byte { return fipssha256.Sum256(data) } - -// Size is the size of a SHA256 checksum in bytes. -const Size = fipssha256.Size diff --git a/internal/http/dial_dnscache.go b/internal/http/dial_dnscache.go index 3acf4c149f80b..c4851202225b0 100644 --- a/internal/http/dial_dnscache.go +++ b/internal/http/dial_dnscache.go @@ -75,6 +75,6 @@ func DialContextWithLookupHost(lookupHost LookupHost, baseDialCtx DialContext) D } } - return + return conn, err } } diff --git a/internal/http/dial_linux.go b/internal/http/dial_linux.go index fd2fb61c1cebe..fd279aab515c6 100644 --- a/internal/http/dial_linux.go +++ b/internal/http/dial_linux.go @@ -26,6 +26,7 @@ import ( "syscall" "time" + "github.com/minio/minio/internal/deadlineconn" "golang.org/x/sys/unix" ) @@ -39,9 +40,23 @@ func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) + // Enable custom socket send/recv buffers. + if opts.SendBufSize > 0 { + _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, opts.SendBufSize) + } + + if opts.RecvBufSize > 0 { + _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, opts.RecvBufSize) + } + + if opts.NoDelay { + _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_NODELAY, 1) + _ = syscall.SetsockoptInt(fd, syscall.SOL_TCP, unix.TCP_CORK, 0) + } + // Enable TCP open - // https://lwn.net/Articles/508865/ - 16k queue size. - _ = syscall.SetsockoptInt(fd, syscall.SOL_TCP, unix.TCP_FASTOPEN, 16*1024) + // https://lwn.net/Articles/508865/ - 32k queue size. + _ = syscall.SetsockoptInt(fd, syscall.SOL_TCP, unix.TCP_FASTOPEN, 32*1024) // Enable TCP fast connect // TCPFastOpenConnect sets the underlying socket to use @@ -53,24 +68,31 @@ func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall // "Set TCP_QUICKACK. If you find a case where that makes things worse, let me know." _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_QUICKACK, 1) - // The time (in seconds) the connection needs to remain idle before - // TCP starts sending keepalive probes - _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 15) + /// Enable keep-alive + { + _ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + + // The time (in seconds) the connection needs to remain idle before + // TCP starts sending keepalive probes + _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 15) - // Number of probes. - // ~ cat /proc/sys/net/ipv4/tcp_keepalive_probes (defaults to 9, we reduce it to 5) - _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 5) + // Number of probes. + // ~ cat /proc/sys/net/ipv4/tcp_keepalive_probes (defaults to 9, we reduce it to 5) + _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 5) - // Wait time after successful probe in seconds. - // ~ cat /proc/sys/net/ipv4/tcp_keepalive_intvl (defaults to 75 secs, we reduce it to 15 secs) - _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 15) + // Wait time after successful probe in seconds. + // ~ cat /proc/sys/net/ipv4/tcp_keepalive_intvl (defaults to 75 secs, we reduce it to 15 secs) + _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 15) + } // Set tcp user timeout in addition to the keep-alive - tcp-keepalive is not enough to close a socket // with dead end because tcp-keepalive is not fired when there is data in the socket buffer. // https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/ // This is a sensitive configuration, it is better to set it to high values, > 60 secs since it can // affect clients reading data with a very slow pace (disappropriate with socket buffer sizes) - _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, opts.UserTimeout) + if opts.UserTimeout > 0 { + _ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, opts.UserTimeout) + } if opts.Interface != "" { if h, _, err := net.SplitHostPort(address); err == nil { @@ -97,17 +119,16 @@ func NewInternodeDialContext(dialTimeout time.Duration, opts TCPOptions) DialCon Timeout: dialTimeout, Control: setTCPParametersFn(opts), } - return dialer.DialContext(ctx, network, addr) - } -} - -// NewCustomDialContext setups a custom dialer for any external communication and proxies. -func NewCustomDialContext(dialTimeout time.Duration, opts TCPOptions) DialContext { - return func(ctx context.Context, network, addr string) (net.Conn, error) { - dialer := &net.Dialer{ - Timeout: dialTimeout, - Control: setTCPParametersFn(opts), + conn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + if opts.DriveOPTimeout != nil { + // Read deadlines are sufficient for now as per various + // scenarios of hung node detection, we may add Write deadlines + // if needed later on. + return deadlineconn.New(conn).WithReadDeadline(opts.DriveOPTimeout()), nil } - return dialer.DialContext(ctx, network, addr) + return conn, nil } } diff --git a/internal/http/dial_others.go b/internal/http/dial_others.go index ccbcd24c7ea0b..fe548867f1c33 100644 --- a/internal/http/dial_others.go +++ b/internal/http/dial_others.go @@ -39,11 +39,8 @@ func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall // DialContext is a function to make custom Dial for internode communications type DialContext func(ctx context.Context, network, address string) (net.Conn, error) -// NewInternodeDialContext setups a custom dialer for internode communication -var NewInternodeDialContext = NewCustomDialContext - -// NewCustomDialContext configures a custom dialer for internode communications -func NewCustomDialContext(dialTimeout time.Duration, _ TCPOptions) DialContext { +// NewInternodeDialContext configures a custom dialer for internode communications +func NewInternodeDialContext(dialTimeout time.Duration, _ TCPOptions) DialContext { return func(ctx context.Context, network, addr string) (net.Conn, error) { dialer := &net.Dialer{ Timeout: dialTimeout, diff --git a/internal/grid/stats.go b/internal/http/flush.go similarity index 79% rename from internal/grid/stats.go rename to internal/http/flush.go index 2d21d4bdc9bc1..e17cf7d560599 100644 --- a/internal/grid/stats.go +++ b/internal/http/flush.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2023 MinIO, Inc. +// Copyright (c) 2015-2025 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -15,10 +15,13 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package grid +package http -// ConnectionStats contains connection statistics. -type ConnectionStats struct { - OutgoingStreams int - IncomingStreams int +import "net/http" + +// Flush the ResponseWriter. +func Flush(w http.ResponseWriter) { + if f, ok := w.(http.Flusher); ok { + f.Flush() + } } diff --git a/internal/http/headers.go b/internal/http/headers.go index 2c0fa1efc757d..9195e122e0d6e 100644 --- a/internal/http/headers.go +++ b/internal/http/headers.go @@ -170,12 +170,23 @@ const ( MinIOServerStatus = "x-minio-server-status" // Content Checksums - AmzChecksumAlgo = "x-amz-checksum-algorithm" - AmzChecksumCRC32 = "x-amz-checksum-crc32" - AmzChecksumCRC32C = "x-amz-checksum-crc32c" - AmzChecksumSHA1 = "x-amz-checksum-sha1" - AmzChecksumSHA256 = "x-amz-checksum-sha256" - AmzChecksumMode = "x-amz-checksum-mode" + AmzChecksumAlgo = "x-amz-checksum-algorithm" + AmzChecksumCRC32 = "x-amz-checksum-crc32" + AmzChecksumCRC32C = "x-amz-checksum-crc32c" + AmzChecksumSHA1 = "x-amz-checksum-sha1" + AmzChecksumSHA256 = "x-amz-checksum-sha256" + AmzChecksumCRC64NVME = "x-amz-checksum-crc64nvme" + AmzChecksumMode = "x-amz-checksum-mode" + AmzChecksumType = "x-amz-checksum-type" + AmzChecksumTypeFullObject = "FULL_OBJECT" + AmzChecksumTypeComposite = "COMPOSITE" + + // S3 Express API related constant reject it. + AmzWriteOffsetBytes = "x-amz-write-offset-bytes" + + // Post Policy related + AmzMetaUUID = "X-Amz-Meta-Uuid" + AmzMetaName = "X-Amz-Meta-Name" // Delete special flag to force delete a bucket or a prefix MinIOForceDelete = "x-minio-force-delete" @@ -233,6 +244,9 @@ const ( // Header indicates a Tag operation was performed on one/more peers successfully, though the // current cluster does not have the object yet. This is in a site/bucket replication scenario. MinIOTaggingProxied = "X-Minio-Tagging-Proxied" + // Header indicates the actual replicated object size + // In case of SSEC objects getting replicated (multipart) actual size would be needed at target + MinIOReplicationActualObjectSize = "X-Minio-Replication-Actual-Object-Size" // predicted date/time of transition MinIOTransition = "X-Minio-Transition" @@ -256,5 +270,6 @@ const ( // http headers sent to webhook targets const ( // Reports the version of MinIO server - MinIOVersion = "x-minio-version" + MinIOVersion = "x-minio-version" + WebhookEventPayloadCount = "x-minio-webhook-payload-count" ) diff --git a/internal/http/listener.go b/internal/http/listener.go index 7e818ad3efac2..bc6de3af98c94 100644 --- a/internal/http/listener.go +++ b/internal/http/listener.go @@ -21,6 +21,7 @@ import ( "context" "fmt" "net" + "slices" "syscall" "time" @@ -35,54 +36,42 @@ type acceptResult struct { // httpListener - HTTP listener capable of handling multiple server addresses. type httpListener struct { - opts TCPOptions - tcpListeners []*net.TCPListener // underlying TCP listeners. - acceptCh chan acceptResult // channel where all TCP listeners write accepted connection. - ctx context.Context - ctxCanceler context.CancelFunc + opts TCPOptions + listeners []net.Listener // underlying TCP listeners. + acceptCh chan acceptResult // channel where all TCP listeners write accepted connection. + ctxDoneCh <-chan struct{} + ctxCanceler context.CancelFunc } // start - starts separate goroutine for each TCP listener. A valid new connection is passed to httpListener.acceptCh. func (listener *httpListener) start() { - // Closure to send acceptResult to acceptCh. - // It returns true if the result is sent else false if returns when doneCh is closed. - send := func(result acceptResult) bool { - select { - case listener.acceptCh <- result: - // Successfully written to acceptCh - return true - case <-listener.ctx.Done(): - return false - } - } - - // Closure to handle TCPListener until done channel is closed. - handleListener := func(idx int, tcpListener *net.TCPListener) { + // Closure to handle listener until httpListener.ctxDoneCh channel is closed. + handleListener := func(idx int, ln net.Listener) { for { - tcpConn, err := tcpListener.AcceptTCP() - if tcpConn != nil { - tcpConn.SetKeepAlive(true) + conn, err := ln.Accept() + select { + case listener.acceptCh <- acceptResult{conn, err, idx}: + case <-listener.ctxDoneCh: + return } - send(acceptResult{tcpConn, err, idx}) } } - // Start separate goroutine for each TCP listener to handle connection. - for idx, tcpListener := range listener.tcpListeners { - go handleListener(idx, tcpListener) + // Start separate goroutine for each listener to handle connection. + for idx, ln := range listener.listeners { + go handleListener(idx, ln) } } // Accept - reads from httpListener.acceptCh for one of previously accepted TCP connection and returns the same. func (listener *httpListener) Accept() (conn net.Conn, err error) { select { - case result, ok := <-listener.acceptCh: - if ok { - return deadlineconn.New(result.conn). - WithReadDeadline(listener.opts.ClientReadTimeout). - WithWriteDeadline(listener.opts.ClientWriteTimeout), result.err + case result := <-listener.acceptCh: + if result.err != nil { + return nil, result.err } - case <-listener.ctx.Done(): + return deadlineconn.New(result.conn).WithReadDeadline(listener.opts.IdleTimeout).WithWriteDeadline(listener.opts.IdleTimeout), result.err + case <-listener.ctxDoneCh: } return nil, syscall.EINVAL } @@ -91,8 +80,8 @@ func (listener *httpListener) Accept() (conn net.Conn, err error) { func (listener *httpListener) Close() (err error) { listener.ctxCanceler() - for i := range listener.tcpListeners { - listener.tcpListeners[i].Close() + for i := range listener.listeners { + listener.listeners[i].Close() } return nil @@ -100,24 +89,26 @@ func (listener *httpListener) Close() (err error) { // Addr - net.Listener interface compatible method returns net.Addr. In case of multiple TCP listeners, it returns '0.0.0.0' as IP address. func (listener *httpListener) Addr() (addr net.Addr) { - addr = listener.tcpListeners[0].Addr() - if len(listener.tcpListeners) == 1 { + addr = listener.listeners[0].Addr() + if len(listener.listeners) == 1 { return addr } - tcpAddr := addr.(*net.TCPAddr) - if ip := net.ParseIP("0.0.0.0"); ip != nil { - tcpAddr.IP = ip + if tcpAddr, ok := addr.(*net.TCPAddr); ok { + return &net.TCPAddr{ + IP: net.IPv4zero, + Port: tcpAddr.Port, + Zone: tcpAddr.Zone, + } } - - addr = tcpAddr - return addr + panic("unknown address type on listener") } // Addrs - returns all address information of TCP listeners. func (listener *httpListener) Addrs() (addrs []net.Addr) { - for i := range listener.tcpListeners { - addrs = append(addrs, listener.tcpListeners[i].Addr()) + addrs = make([]net.Addr, 0, len(listener.listeners)) + for i := range listener.listeners { + addrs = append(addrs, listener.listeners[i].Addr()) } return addrs @@ -125,11 +116,28 @@ func (listener *httpListener) Addrs() (addrs []net.Addr) { // TCPOptions specify customizable TCP optimizations on raw socket type TCPOptions struct { - UserTimeout int // this value is expected to be in milliseconds - ClientReadTimeout time.Duration // When the net.Conn is idle for more than ReadTimeout duration, we close the connection on the client proactively. - ClientWriteTimeout time.Duration // When the net.Conn is idle for more than WriteTimeout duration, we close the connection on the client proactively. - Interface string // this is a VRF device passed via `--interface` flag - Trace func(msg string) // Trace when starting. + UserTimeout int // this value is expected to be in milliseconds + + // When the net.Conn is a remote drive this value is honored, we close the connection to remote peer proactively. + DriveOPTimeout func() time.Duration + + SendBufSize int // SO_SNDBUF size for the socket connection, NOTE: this sets server and client connection + RecvBufSize int // SO_RECVBUF size for the socket connection, NOTE: this sets server and client connection + NoDelay bool // Indicates callers to enable TCP_NODELAY on the net.Conn + Interface string // This is a VRF device passed via `--interface` flag + Trace func(msg string) // Trace when starting. + IdleTimeout time.Duration // Incoming TCP read/write timeout +} + +// ForWebsocket returns TCPOptions valid for websocket net.Conn +func (t TCPOptions) ForWebsocket() TCPOptions { + return TCPOptions{ + UserTimeout: t.UserTimeout, + Interface: t.Interface, + SendBufSize: t.SendBufSize, + RecvBufSize: t.RecvBufSize, + NoDelay: true, + } } // newHTTPListener - creates new httpListener object which is interface compatible to net.Listener. @@ -137,57 +145,47 @@ type TCPOptions struct { // * listen to multiple addresses // * controls incoming connections only doing HTTP protocol func newHTTPListener(ctx context.Context, serverAddrs []string, opts TCPOptions) (listener *httpListener, listenErrs []error) { - tcpListeners := make([]*net.TCPListener, 0, len(serverAddrs)) + listeners := make([]net.Listener, 0, len(serverAddrs)) listenErrs = make([]error, len(serverAddrs)) + if opts.Trace == nil { + opts.Trace = func(msg string) {} // Noop if not defined. + } + // Unix listener with special TCP options. listenCfg := net.ListenConfig{ Control: setTCPParametersFn(opts), } for i, serverAddr := range serverAddrs { - var ( - l net.Listener - e error - ) - if l, e = listenCfg.Listen(ctx, "tcp", serverAddr); e != nil { - if opts.Trace != nil { - opts.Trace(fmt.Sprint("listenCfg.Listen: ", e.Error())) - } + l, e := listenCfg.Listen(ctx, "tcp", serverAddr) + if e != nil { + opts.Trace("listenCfg.Listen: " + e.Error()) listenErrs[i] = e continue } + opts.Trace("adding listener to " + l.Addr().String()) - tcpListener, ok := l.(*net.TCPListener) - if !ok { - listenErrs[i] = fmt.Errorf("unexpected listener type found %v, expected net.TCPListener", l) - if opts.Trace != nil { - opts.Trace(fmt.Sprint("net.TCPListener: ", listenErrs[i].Error())) - } - continue - } - if opts.Trace != nil { - opts.Trace(fmt.Sprint("adding listener to ", tcpListener.Addr())) - } - tcpListeners = append(tcpListeners, tcpListener) + listeners = append(listeners, l) } - if len(tcpListeners) == 0 { + if len(listeners) == 0 { // No listeners initialized, no need to continue - return + return listener, listenErrs } + listeners = slices.Clip(listeners) + ctx, cancel := context.WithCancel(ctx) listener = &httpListener{ - tcpListeners: tcpListeners, - acceptCh: make(chan acceptResult, len(tcpListeners)), - opts: opts, - } - listener.ctx, listener.ctxCanceler = context.WithCancel(ctx) - if opts.Trace != nil { - opts.Trace(fmt.Sprint("opening ", len(listener.tcpListeners), " listeners")) + listeners: listeners, + acceptCh: make(chan acceptResult, len(listeners)), + opts: opts, + ctxDoneCh: ctx.Done(), + ctxCanceler: cancel, } + opts.Trace(fmt.Sprintf("opening %d listeners", len(listener.listeners))) listener.start() - return + return listener, listenErrs } diff --git a/internal/http/listener_test.go b/internal/http/listener_test.go index 1c0f55a58245e..b1ffdb7a0cdb7 100644 --- a/internal/http/listener_test.go +++ b/internal/http/listener_test.go @@ -18,7 +18,6 @@ package http import ( - "context" "crypto/tls" "net" "runtime" @@ -148,12 +147,12 @@ func TestNewHTTPListener(t *testing.T) { {[]string{"[::1:65432", "unknown-host:-1"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{true, true}}, // 7 {[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false}}, // 8 {[]string{"localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false}}, // 9 - {[]string{"[::1]:9090", "127.0.0.1:90900"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false}}, // 10 - {[]string{"[::1]:9090", "localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false}}, // 10 + {[]string{"[::1]:3737", "127.0.0.1:90900"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false, true}}, // 10 + {[]string{"[::1]:3737", "localhost:0"}, time.Duration(0), time.Duration(0), time.Duration(0), []bool{false, false}}, // 10 } for testIdx, testCase := range testCases { - listener, listenErrs := newHTTPListener(context.Background(), + listener, listenErrs := newHTTPListener(t.Context(), testCase.serverAddrs, TCPOptions{}, ) @@ -192,7 +191,7 @@ func TestHTTPListenerStartClose(t *testing.T) { nextTest: for i, testCase := range testCases { - listener, errs := newHTTPListener(context.Background(), + listener, errs := newHTTPListener(t.Context(), testCase.serverAddrs, TCPOptions{}, ) @@ -228,7 +227,7 @@ func TestHTTPListenerAddr(t *testing.T) { nonLoopBackIP := getNonLoopBackIP(t) var casePorts []string - for i := 0; i < 6; i++ { + for range 6 { casePorts = append(casePorts, getNextPort()) } @@ -246,7 +245,7 @@ func TestHTTPListenerAddr(t *testing.T) { nextTest: for i, testCase := range testCases { - listener, errs := newHTTPListener(context.Background(), + listener, errs := newHTTPListener(t.Context(), testCase.serverAddrs, TCPOptions{}, ) @@ -279,7 +278,7 @@ func TestHTTPListenerAddrs(t *testing.T) { nonLoopBackIP := getNonLoopBackIP(t) var casePorts []string - for i := 0; i < 6; i++ { + for range 6 { casePorts = append(casePorts, getNextPort()) } @@ -297,7 +296,7 @@ func TestHTTPListenerAddrs(t *testing.T) { nextTest: for i, testCase := range testCases { - listener, errs := newHTTPListener(context.Background(), + listener, errs := newHTTPListener(t.Context(), testCase.serverAddrs, TCPOptions{}, ) diff --git a/internal/http/response-recorder.go b/internal/http/response-recorder.go index cb630a4832da4..d6a397dc7bea8 100644 --- a/internal/http/response-recorder.go +++ b/internal/http/response-recorder.go @@ -26,6 +26,8 @@ import ( "net" "net/http" "time" + + "github.com/klauspost/compress/gzip" ) // ResponseRecorder - is a wrapper to trap the http response @@ -39,8 +41,10 @@ type ResponseRecorder struct { // Log body of all responses LogAllBody bool - TimeToFirstByte time.Duration - StartTime time.Time + ttfbHeader time.Duration + ttfbBody time.Duration + + StartTime time.Time // number of bytes written bytesWritten int // number of bytes of response headers written @@ -61,6 +65,15 @@ func (lrw *ResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { return hj.Hijack() } +// TTFB of the request - this function needs to be called +// when the request is finished to provide accurate data +func (lrw *ResponseRecorder) TTFB() time.Duration { + if lrw.ttfbBody != 0 { + return lrw.ttfbBody + } + return lrw.ttfbHeader +} + // NewResponseRecorder - returns a wrapped response writer to trap // http status codes for auditing purposes. func NewResponseRecorder(w http.ResponseWriter) *ResponseRecorder { @@ -95,13 +108,19 @@ func (lrw *ResponseRecorder) Write(p []byte) (int, error) { } n, err := lrw.ResponseWriter.Write(p) lrw.bytesWritten += n - if lrw.TimeToFirstByte == 0 { - lrw.TimeToFirstByte = time.Now().UTC().Sub(lrw.StartTime) + if lrw.ttfbBody == 0 { + lrw.ttfbBody = time.Now().UTC().Sub(lrw.StartTime) } - gzipped := lrw.Header().Get("Content-Encoding") == "gzip" - if !gzipped && ((lrw.LogErrBody && lrw.StatusCode >= http.StatusBadRequest) || lrw.LogAllBody) { - // Always logging error responses. - lrw.body.Write(p) + + if (lrw.LogErrBody && lrw.StatusCode >= http.StatusBadRequest) || lrw.LogAllBody { + // If body is > 10MB, drop it. + if lrw.bytesWritten+len(p) > 10<<20 { + lrw.LogAllBody = false + lrw.body = bytes.Buffer{} + } else { + // Always logging error responses. + lrw.body.Write(p) + } } if err != nil { return n, err @@ -128,8 +147,16 @@ var gzippedBody = []byte("") // Body - Return response body. func (lrw *ResponseRecorder) Body() []byte { if lrw.Header().Get("Content-Encoding") == "gzip" { - // ... otherwise we return the place holder - return gzippedBody + if lrw.body.Len() > 1<<20 { + return gzippedBody + } + r, err := gzip.NewReader(&lrw.body) + if err != nil { + return gzippedBody + } + defer r.Close() + b, _ := io.ReadAll(io.LimitReader(r, 10<<20)) + return b } // If there was an error response or body logging is enabled // then we return the body contents @@ -143,8 +170,9 @@ func (lrw *ResponseRecorder) Body() []byte { // WriteHeader - writes http status code func (lrw *ResponseRecorder) WriteHeader(code int) { if !lrw.headersLogged { + lrw.ttfbHeader = time.Now().UTC().Sub(lrw.StartTime) lrw.StatusCode = code - lrw.writeHeaders(&lrw.headers, code, lrw.ResponseWriter.Header()) + lrw.writeHeaders(&lrw.headers, code, lrw.Header()) lrw.headersLogged = true lrw.ResponseWriter.WriteHeader(code) } @@ -152,7 +180,9 @@ func (lrw *ResponseRecorder) WriteHeader(code int) { // Flush - Calls the underlying Flush. func (lrw *ResponseRecorder) Flush() { - lrw.ResponseWriter.(http.Flusher).Flush() + if flusher, ok := lrw.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } } // Size - returns the number of bytes written diff --git a/internal/http/server.go b/internal/http/server.go index 9839f72b05431..2934fda6c04d4 100644 --- a/internal/http/server.go +++ b/internal/http/server.go @@ -22,11 +22,8 @@ import ( "crypto/tls" "errors" "log" - "math/rand" "net" "net/http" - "os" - "runtime/pprof" "sync" "sync/atomic" "time" @@ -43,11 +40,6 @@ var ( ) const ( - shutdownPollIntervalMax = 500 * time.Millisecond - - // DefaultShutdownTimeout - default shutdown timeout to gracefully shutdown server. - DefaultShutdownTimeout = 5 * time.Second - // DefaultIdleTimeout for idle inactive connections DefaultIdleTimeout = 30 * time.Second @@ -61,13 +53,12 @@ const ( // Server - extended http.Server supports multiple addresses to serve and enhanced connection handling. type Server struct { http.Server - Addrs []string // addresses on which the server listens for new connection. - TCPOptions TCPOptions // all the configurable TCP conn specific configurable options. - ShutdownTimeout time.Duration // timeout used for graceful server shutdown. - listenerMutex sync.Mutex // to guard 'listener' field. - listener *httpListener // HTTP listener for all 'Addrs' field. - inShutdown uint32 // indicates whether the server is in shutdown or not - requestCount int32 // counter holds no. of request in progress. + Addrs []string // addresses on which the server listens for new connection. + TCPOptions TCPOptions // all the configurable TCP conn specific configurable options. + listenerMutex sync.Mutex // to guard 'listener' field. + listener *httpListener // HTTP listener for all 'Addrs' field. + inShutdown uint32 // indicates whether the server is in shutdown or not + requestCount int32 // counter holds no. of request in progress. } // GetRequestCount - returns number of request in progress. @@ -138,10 +129,10 @@ func (srv *Server) Init(listenCtx context.Context, listenErrCallback func(listen } serve = func() error { - return srv.Server.Serve(l) + return srv.Serve(l) } - return + return serve, err } // Shutdown - shuts down HTTP server. @@ -166,53 +157,8 @@ func (srv *Server) Shutdown() error { return err } - pollIntervalBase := time.Millisecond - nextPollInterval := func() time.Duration { - // Add 10% jitter. - interval := pollIntervalBase + time.Duration(rand.Intn(int(pollIntervalBase/10))) - // Double and clamp for next time. - pollIntervalBase *= 2 - if pollIntervalBase > shutdownPollIntervalMax { - pollIntervalBase = shutdownPollIntervalMax - } - return interval - } - // Wait for opened connection to be closed up to Shutdown timeout. - shutdownTimeout := srv.ShutdownTimeout - shutdownTimer := time.NewTimer(shutdownTimeout) - defer shutdownTimer.Stop() - - timer := time.NewTimer(nextPollInterval()) - defer timer.Stop() - for { - select { - case <-shutdownTimer.C: - if atomic.LoadInt32(&srv.requestCount) <= 0 { - return nil - } - - // Write all running goroutines. - tmp, err := os.CreateTemp("", "minio-goroutines-*.txt") - if err == nil { - _ = pprof.Lookup("goroutine").WriteTo(tmp, 1) - tmp.Close() - return errors.New("timed out. some connections are still active. goroutines written to " + tmp.Name()) - } - return errors.New("timed out. some connections are still active") - case <-timer.C: - if atomic.LoadInt32(&srv.requestCount) <= 0 { - return nil - } - timer.Reset(nextPollInterval()) - } - } -} - -// UseShutdownTimeout configure server shutdown timeout -func (srv *Server) UseShutdownTimeout(d time.Duration) *Server { - srv.ShutdownTimeout = d - return srv + return nil } // UseIdleTimeout configure idle connection timeout @@ -221,12 +167,24 @@ func (srv *Server) UseIdleTimeout(d time.Duration) *Server { return srv } +// UseReadTimeout configure connection request read timeout. +func (srv *Server) UseReadTimeout(d time.Duration) *Server { + srv.ReadTimeout = d + return srv +} + // UseReadHeaderTimeout configure read header timeout func (srv *Server) UseReadHeaderTimeout(d time.Duration) *Server { srv.ReadHeaderTimeout = d return srv } +// UseWriteTimeout configure connection response write timeout. +func (srv *Server) UseWriteTimeout(d time.Duration) *Server { + srv.WriteTimeout = d + return srv +} + // UseHandler configure final handler for this HTTP *Server func (srv *Server) UseHandler(h http.Handler) *Server { srv.Handler = h diff --git a/internal/http/server_test.go b/internal/http/server_test.go index c9cc7012ab5d7..27f260aeda071 100644 --- a/internal/http/server_test.go +++ b/internal/http/server_test.go @@ -24,7 +24,7 @@ import ( "reflect" "testing" - "github.com/minio/pkg/v2/certs" + "github.com/minio/pkg/v3/certs" ) func TestNewServer(t *testing.T) { @@ -48,8 +48,7 @@ func TestNewServer(t *testing.T) { for i, testCase := range testCases { server := NewServer(testCase.addrs). - UseHandler(testCase.handler). - UseShutdownTimeout(DefaultShutdownTimeout) + UseHandler(testCase.handler) if testCase.certFn != nil { server = server.UseTLSConfig(&tls.Config{ PreferServerCipherSuites: true, @@ -58,17 +57,10 @@ func TestNewServer(t *testing.T) { } if server == nil { t.Fatalf("Case %v: server: expected: , got: ", (i + 1)) - } - - if !reflect.DeepEqual(server.Addrs, testCase.addrs) { + } else if !reflect.DeepEqual(server.Addrs, testCase.addrs) { t.Fatalf("Case %v: server.Addrs: expected: %v, got: %v", (i + 1), testCase.addrs, server.Addrs) } - // Interfaces are not comparable even with reflection. - // if !reflect.DeepEqual(server.Handler, testCase.handler) { - // t.Fatalf("Case %v: server.Handler: expected: %v, got: %v", (i + 1), testCase.handler, server.Handler) - // } - if testCase.certFn == nil { if server.TLSConfig != nil { t.Fatalf("Case %v: server.TLSConfig: expected: , got: %v", (i + 1), server.TLSConfig) @@ -79,10 +71,6 @@ func TestNewServer(t *testing.T) { } } - if server.ShutdownTimeout != DefaultShutdownTimeout { - t.Fatalf("Case %v: server.ShutdownTimeout: expected: %v, got: %v", (i + 1), DefaultShutdownTimeout, server.ShutdownTimeout) - } - if server.MaxHeaderBytes != DefaultMaxHeaderBytes { t.Fatalf("Case %v: server.MaxHeaderBytes: expected: %v, got: %v", (i + 1), DefaultMaxHeaderBytes, server.MaxHeaderBytes) } diff --git a/internal/http/transports.go b/internal/http/transports.go index a2da2dbbb6a16..fba86bd3281fc 100644 --- a/internal/http/transports.go +++ b/internal/http/transports.go @@ -25,12 +25,20 @@ import ( "syscall" "time" - "github.com/minio/pkg/v2/certs" + "github.com/minio/pkg/v3/certs" ) // tlsClientSessionCacheSize is the cache size for client sessions. var tlsClientSessionCacheSize = 100 +const ( + // WriteBufferSize 64KiB moving up from 4KiB default + WriteBufferSize = 64 << 10 + + // ReadBufferSize 64KiB moving up from 4KiB default + ReadBufferSize = 64 << 10 +) + // ConnSettings - contains connection settings. type ConnSettings struct { DialContext DialContext // Custom dialContext, DialTimeout is ignored if this is already setup. @@ -72,8 +80,8 @@ func (s ConnSettings) getDefaultTransport(maxIdleConnsPerHost int) *http.Transpo Proxy: http.ProxyFromEnvironment, DialContext: dialContext, MaxIdleConnsPerHost: maxIdleConnsPerHost, - WriteBufferSize: 32 << 10, // 32KiB moving up from 4KiB default - ReadBufferSize: 32 << 10, // 32KiB moving up from 4KiB default + WriteBufferSize: WriteBufferSize, + ReadBufferSize: ReadBufferSize, IdleConnTimeout: 15 * time.Second, ResponseHeaderTimeout: 15 * time.Minute, // Conservative timeout is the default (for MinIO internode) TLSHandshakeTimeout: 10 * time.Second, @@ -175,3 +183,23 @@ func (s ConnSettings) NewRemoteTargetHTTPTransport(insecure bool) func() *http.T return tr } } + +// uaTransport - User-Agent transport +type uaTransport struct { + ua string + rt http.RoundTripper +} + +func (u *uaTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := req.Clone(req.Context()) + req2.Header.Set("User-Agent", u.ua) + return u.rt.RoundTrip(req2) +} + +// WithUserAgent wraps an existing transport with custom User-Agent +func WithUserAgent(rt http.RoundTripper, getUA func() string) http.RoundTripper { + return &uaTransport{ + ua: getUA(), + rt: rt, + } +} diff --git a/internal/ioutil/hardlimitreader.go b/internal/ioutil/hardlimitreader.go index 7415be6b7031e..3f37a294a8a90 100644 --- a/internal/ioutil/hardlimitreader.go +++ b/internal/ioutil/hardlimitreader.go @@ -52,5 +52,5 @@ func (l *HardLimitedReader) Read(p []byte) (n int, err error) { if l.N < 0 { return 0, ErrOverread } - return + return n, err } diff --git a/internal/ioutil/ioutil.go b/internal/ioutil/ioutil.go index 09712f4b14c4b..2f214b778ded9 100644 --- a/internal/ioutil/ioutil.go +++ b/internal/ioutil/ioutil.go @@ -20,48 +20,58 @@ package ioutil import ( - "bytes" "context" "errors" "io" "os" "runtime/debug" - "sync" "time" "github.com/dustin/go-humanize" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/disk" ) // Block sizes constant. const ( - BlockSizeSmall = 32 * humanize.KiByte // Default r/w block size for smaller objects. - BlockSizeLarge = 2 * humanize.MiByte // Default r/w block size for larger objects. - BlockSizeReallyLarge = 4 * humanize.MiByte // Default write block size for objects per shard >= 64MiB + SmallBlock = 32 * humanize.KiByte // Default r/w block size for smaller objects. + MediumBlock = 128 * humanize.KiByte // Default r/w block size for medium sized objects. + LargeBlock = 1 * humanize.MiByte // Default r/w block size for normal objects. ) +// AlignedBytePool is a pool of fixed size aligned blocks +type AlignedBytePool struct { + size int + p bpool.Pool[*[]byte] +} + +// NewAlignedBytePool creates a new pool with the specified size. +func NewAlignedBytePool(sz int) *AlignedBytePool { + return &AlignedBytePool{size: sz, p: bpool.Pool[*[]byte]{New: func() *[]byte { + b := disk.AlignedBlock(sz) + return &b + }}} +} + // aligned sync.Pool's var ( - ODirectPoolXLarge = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(BlockSizeReallyLarge) - return &b - }, - } - ODirectPoolLarge = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(BlockSizeLarge) - return &b - }, - } - ODirectPoolSmall = sync.Pool{ - New: func() interface{} { - b := disk.AlignedBlock(BlockSizeSmall) - return &b - }, - } + ODirectPoolLarge = NewAlignedBytePool(LargeBlock) + ODirectPoolMedium = NewAlignedBytePool(MediumBlock) + ODirectPoolSmall = NewAlignedBytePool(SmallBlock) ) +// Get a block. +func (p *AlignedBytePool) Get() *[]byte { + return p.p.Get() +} + +// Put a block. +func (p *AlignedBytePool) Put(pb *[]byte) { + if pb != nil && len(*pb) == p.size { + p.p.Put(pb) + } +} + // WriteOnCloser implements io.WriteCloser and always // executes at least one write operation if it is closed. // @@ -105,13 +115,6 @@ type ioret[V any] struct { err error } -// DeadlineWriter deadline writer with timeout -type DeadlineWriter struct { - io.WriteCloser - timeout time.Duration - err error -} - // WithDeadline will execute a function with a deadline and return a value of a given type. // If the deadline/context passes before the function finishes executing, // the zero value and the context error is returned. @@ -153,21 +156,17 @@ func NewDeadlineWorker(timeout time.Duration) *DeadlineWorker { // channel so that the work function can attempt to exit gracefully. // Multiple calls to Run will run independently of each other. func (d *DeadlineWorker) Run(work func() error) error { - c := make(chan ioret[struct{}], 1) - t := time.NewTimer(d.timeout) - go func() { - c <- ioret[struct{}]{val: struct{}{}, err: work()} - }() + _, err := WithDeadline[struct{}](context.Background(), d.timeout, func(ctx context.Context) (struct{}, error) { + return struct{}{}, work() + }) + return err +} - select { - case r := <-c: - if !t.Stop() { - <-t.C - } - return r.err - case <-t.C: - return context.DeadlineExceeded - } +// DeadlineWriter deadline writer with timeout +type DeadlineWriter struct { + io.WriteCloser + timeout time.Duration + err error } // NewDeadlineWriter wraps a writer to make it respect given deadline @@ -275,15 +274,25 @@ func (s *SkipReader) Read(p []byte) (int, error) { if l == 0 { return 0, nil } - for s.skipCount > 0 { - if l > s.skipCount { - l = s.skipCount + if s.skipCount > 0 { + tmp := p + if s.skipCount > l && l < SmallBlock { + // We may get a very small buffer, so we grab a temporary buffer. + bufp := ODirectPoolSmall.Get() + tmp = *bufp + defer ODirectPoolSmall.Put(bufp) + l = int64(len(tmp)) } - n, err := s.Reader.Read(p[:l]) - if err != nil { - return 0, err + for s.skipCount > 0 { + if l > s.skipCount { + l = s.skipCount + } + n, err := s.Reader.Read(tmp[:l]) + if err != nil { + return 0, err + } + s.skipCount -= int64(n) } - s.skipCount -= int64(n) } return s.Reader.Read(p) } @@ -293,20 +302,19 @@ func NewSkipReader(r io.Reader, n int64) io.Reader { return &SkipReader{r, n} } -var copyBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 32*1024) - return &b - }, +// writerOnly hides an io.Writer value's optional ReadFrom method +// from io.Copy. +type writerOnly struct { + io.Writer } // Copy is exactly like io.Copy but with reusable buffers. func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - bufp := copyBufPool.Get().(*[]byte) + bufp := ODirectPoolMedium.Get() + defer ODirectPoolMedium.Put(bufp) buf := *bufp - defer copyBufPool.Put(bufp) - return io.CopyBuffer(dst, src, buf) + return io.CopyBuffer(writerOnly{dst}, src, buf) } // SameFile returns if the files are same. @@ -341,19 +349,6 @@ func CopyAligned(w io.Writer, r io.Reader, alignedBuf []byte, totalSize int64, f return 0, nil } - // Writes remaining bytes in the buffer. - writeUnaligned := func(w io.Writer, buf []byte) (remainingWritten int64, err error) { - // Disable O_DIRECT on fd's on unaligned buffer - // perform an amortized Fdatasync(fd) on the fd at - // the end, this is performed by the caller before - // closing 'w'. - if err = disk.DisableDirectIO(file); err != nil { - return remainingWritten, err - } - // Since w is *os.File io.Copy shall use ReadFrom() call. - return io.Copy(w, bytes.NewReader(buf)) - } - var written int64 for { buf := alignedBuf @@ -371,15 +366,38 @@ func CopyAligned(w io.Writer, r io.Reader, alignedBuf []byte, totalSize int64, f } buf = buf[:nr] - var nw int64 - if len(buf)%DirectioAlignSize == 0 { - var n int + var ( + n int + un int + nw int64 + ) + + remain := len(buf) % DirectioAlignSize + if remain == 0 { // buf is aligned for directio write() n, err = w.Write(buf) nw = int64(n) } else { + if remain < len(buf) { + n, err = w.Write(buf[:len(buf)-remain]) + if err != nil { + return written, err + } + nw = int64(n) + } + + // Disable O_DIRECT on fd's on unaligned buffer + // perform an amortized Fdatasync(fd) on the fd at + // the end, this is performed by the caller before + // closing 'w'. + if err = disk.DisableDirectIO(file); err != nil { + return written, err + } + // buf is not aligned, hence use writeUnaligned() - nw, err = writeUnaligned(w, buf) + // for the remainder + un, err = w.Write(buf[len(buf)-remain:]) + nw += int64(un) } if nw > 0 { diff --git a/internal/ioutil/ioutil_test.go b/internal/ioutil/ioutil_test.go index 6e332b3f59eb8..b71c2f2e0f24e 100644 --- a/internal/ioutil/ioutil_test.go +++ b/internal/ioutil/ioutil_test.go @@ -41,6 +41,26 @@ func (w *sleepWriter) Close() error { return nil } +func TestDeadlineWorker(t *testing.T) { + work := NewDeadlineWorker(500 * time.Millisecond) + + err := work.Run(func() error { + time.Sleep(600 * time.Millisecond) + return nil + }) + if err != context.DeadlineExceeded { + t.Error("DeadlineWorker shouldn't be successful - should return context.DeadlineExceeded") + } + + err = work.Run(func() error { + time.Sleep(450 * time.Millisecond) + return nil + }) + if err != nil { + t.Error("DeadlineWorker should succeed") + } +} + func TestDeadlineWriter(t *testing.T) { w := NewDeadlineWriter(&sleepWriter{timeout: 500 * time.Millisecond}, 450*time.Millisecond) _, err := w.Write([]byte("1")) @@ -82,7 +102,7 @@ func TestCloseOnWriter(t *testing.T) { // Test for AppendFile. func TestAppendFile(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Fatal(err) } @@ -91,7 +111,7 @@ func TestAppendFile(t *testing.T) { f.WriteString("aaaaaaaaaa") f.Close() - f, err = os.CreateTemp("", "") + f, err = os.CreateTemp(t.TempDir(), "") if err != nil { t.Fatal(err) } @@ -142,7 +162,7 @@ func TestSkipReader(t *testing.T) { } func TestSameFile(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Errorf("Error creating tmp file: %v", err) } @@ -173,7 +193,7 @@ func TestSameFile(t *testing.T) { } func TestCopyAligned(t *testing.T) { - f, err := os.CreateTemp("", "") + f, err := os.CreateTemp(t.TempDir(), "") if err != nil { t.Errorf("Error creating tmp file: %v", err) } @@ -182,7 +202,7 @@ func TestCopyAligned(t *testing.T) { r := strings.NewReader("hello world") - bufp := ODirectPoolSmall.Get().(*[]byte) + bufp := ODirectPoolSmall.Get() defer ODirectPoolSmall.Put(bufp) written, err := CopyAligned(f, io.LimitReader(r, 5), *bufp, r.Size(), f) diff --git a/internal/jwt/parser.go b/internal/jwt/parser.go index 44c29c284597a..1a4ddb44a22b4 100644 --- a/internal/jwt/parser.go +++ b/internal/jwt/parser.go @@ -30,13 +30,13 @@ import ( "errors" "fmt" "hash" - "sync" "time" "github.com/buger/jsonparser" "github.com/dustin/go-humanize" jwtgo "github.com/golang-jwt/jwt/v4" jsoniter "github.com/json-iterator/go" + "github.com/minio/minio/internal/bpool" ) // SigningMethodHMAC - Implements the HMAC-SHA family of signing methods signing methods @@ -44,7 +44,7 @@ import ( type SigningMethodHMAC struct { Name string Hash crypto.Hash - HasherPool sync.Pool + HasherPool bpool.Pool[hash.Hash] } // Specific instances for HS256, HS384, HS512 @@ -57,13 +57,13 @@ var ( const base64BufferSize = 64 * humanize.KiByte var ( - base64BufPool sync.Pool + base64BufPool bpool.Pool[*[]byte] hmacSigners []*SigningMethodHMAC ) func init() { - base64BufPool = sync.Pool{ - New: func() interface{} { + base64BufPool = bpool.Pool[*[]byte]{ + New: func() *[]byte { buf := make([]byte, base64BufferSize) return &buf }, @@ -76,7 +76,7 @@ func init() { } for i := range hmacSigners { h := hmacSigners[i].Hash - hmacSigners[i].HasherPool.New = func() interface{} { + hmacSigners[i].HasherPool.New = func() hash.Hash { return h.New() } } @@ -89,13 +89,13 @@ func (s *SigningMethodHMAC) HashBorrower() HashBorrower { // HashBorrower keeps track of borrowed hashers and allows to return them all. type HashBorrower struct { - pool *sync.Pool + pool *bpool.Pool[hash.Hash] borrowed []hash.Hash } // Borrow a single hasher. func (h *HashBorrower) Borrow() hash.Hash { - hasher := h.pool.Get().(hash.Hash) + hasher := h.pool.Get() h.borrowed = append(h.borrowed, hasher) hasher.Reset() return hasher @@ -245,17 +245,33 @@ func NewMapClaims() *MapClaims { return &MapClaims{MapClaims: jwtgo.MapClaims{}} } +// Set Adds new arbitrary claim keys and values. +func (c *MapClaims) Set(key string, val any) { + if c == nil { + return + } + c.MapClaims[key] = val +} + +// Delete deletes a key named key. +func (c *MapClaims) Delete(key string) { + if c == nil { + return + } + delete(c.MapClaims, key) +} + // Lookup returns the value and if the key is found. func (c *MapClaims) Lookup(key string) (value string, ok bool) { if c == nil { return "", false } - var vinterface interface{} + var vinterface any vinterface, ok = c.MapClaims[key] if ok { value, ok = vinterface.(string) } - return + return value, ok } // SetExpiry sets expiry in unix epoch secs @@ -286,7 +302,7 @@ func (c *MapClaims) Valid() error { } // Map returns underlying low-level map claims. -func (c *MapClaims) Map() map[string]interface{} { +func (c *MapClaims) Map() map[string]any { if c == nil { return nil } @@ -307,10 +323,10 @@ func ParseWithStandardClaims(tokenStr string, claims *StandardClaims, key []byte return jwtgo.NewValidationError("no key was provided.", jwtgo.ValidationErrorUnverifiable) } - bufp := base64BufPool.Get().(*[]byte) + bufp := base64BufPool.Get() defer base64BufPool.Put(bufp) - tokenBuf := base64BufPool.Get().(*[]byte) + tokenBuf := base64BufPool.Get() defer base64BufPool.Put(tokenBuf) token := *tokenBuf @@ -403,10 +419,10 @@ func ParseWithClaims(tokenStr string, claims *MapClaims, fn func(*MapClaims) ([] return jwtgo.NewValidationError("no Keyfunc was provided.", jwtgo.ValidationErrorUnverifiable) } - bufp := base64BufPool.Get().(*[]byte) + bufp := base64BufPool.Get() defer base64BufPool.Put(bufp) - tokenBuf := base64BufPool.Get().(*[]byte) + tokenBuf := base64BufPool.Get() defer base64BufPool.Put(tokenBuf) token := *tokenBuf diff --git a/internal/jwt/parser_test.go b/internal/jwt/parser_test.go index 9fc6889e96b6a..a521bfcfcad51 100644 --- a/internal/jwt/parser_test.go +++ b/internal/jwt/parser_test.go @@ -176,7 +176,6 @@ func standardClaimsToken(claims *StandardClaims) string { func TestParserParse(t *testing.T) { // Iterate over test data set and run tests for _, data := range jwtTestData { - data := data t.Run(data.name, func(t *testing.T) { // Parse the token var err error diff --git a/internal/kms/config.go b/internal/kms/config.go index f19352ac3c6ce..158bd0d60336b 100644 --- a/internal/kms/config.go +++ b/internal/kms/config.go @@ -17,15 +17,425 @@ package kms -// Top level config constants for KMS +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + + "aead.dev/mtls" + "github.com/minio/kms-go/kes" + "github.com/minio/kms-go/kms" + "github.com/minio/pkg/v3/certs" + "github.com/minio/pkg/v3/ellipses" + "github.com/minio/pkg/v3/env" +) + +// Environment variables for MinIO KMS. +const ( + EnvKMSEndpoint = "MINIO_KMS_SERVER" // List of MinIO KMS endpoints, separated by ',' + EnvKMSEnclave = "MINIO_KMS_ENCLAVE" // MinIO KMS enclave in which the key and identity exists + EnvKMSDefaultKey = "MINIO_KMS_SSE_KEY" // Default key used for SSE-S3 or when no SSE-KMS key ID is specified + EnvKMSAPIKey = "MINIO_KMS_API_KEY" // Credential to access the MinIO KMS. +) + +// Environment variables for MinIO KES. const ( - EnvKMSSecretKey = "MINIO_KMS_SECRET_KEY" - EnvKMSSecretKeyFile = "MINIO_KMS_SECRET_KEY_FILE" EnvKESEndpoint = "MINIO_KMS_KES_ENDPOINT" // One or multiple KES endpoints, separated by ',' - EnvKESKeyName = "MINIO_KMS_KES_KEY_NAME" // The default key name used for IAM data and when no key ID is specified on a bucket + EnvKESDefaultKey = "MINIO_KMS_KES_KEY_NAME" // The default key name used for IAM data and when no key ID is specified on a bucket EnvKESAPIKey = "MINIO_KMS_KES_API_KEY" // Access credential for KES - API keys and private key / certificate are mutually exclusive EnvKESClientKey = "MINIO_KMS_KES_KEY_FILE" // Path to TLS private key for authenticating to KES with mTLS - usually prefer API keys - EnvKESClientPassword = "MINIO_KMS_KES_KEY_PASSWORD" // Optional password to decrypt an encrypt TLS private key EnvKESClientCert = "MINIO_KMS_KES_CERT_FILE" // Path to TLS certificate for authenticating to KES with mTLS - usually prefer API keys EnvKESServerCA = "MINIO_KMS_KES_CAPATH" // Path to file/directory containing CA certificates to verify the KES server certificate + EnvKESClientPassword = "MINIO_KMS_KES_KEY_PASSWORD" // Optional password to decrypt an encrypt TLS private key +) + +// Environment variables for static KMS key. +const ( + EnvKMSSecretKey = "MINIO_KMS_SECRET_KEY" // Static KMS key in the form ":". Implements a subset of KMS/KES APIs + EnvKMSSecretKeyFile = "MINIO_KMS_SECRET_KEY_FILE" // Path to a file to read the static KMS key from ) + +// EnvKMSReplicateKeyID is an env. variable that controls whether MinIO +// replicates the KMS key ID. By default, KMS key ID replication is enabled +// but can be turned off. +const EnvKMSReplicateKeyID = "MINIO_KMS_REPLICATE_KEYID" + +const ( + tlsClientSessionCacheSize = 100 +) + +var replicateKeyID = sync.OnceValue(func() bool { + if v, ok := os.LookupEnv(EnvKMSReplicateKeyID); ok && strings.ToLower(v) == "off" { + return false + } + return true // by default, replicating KMS key IDs is enabled +}) + +// ReplicateKeyID reports whether KMS key IDs should be included when +// replicating objects. It's enabled by default. To disable it, set: +// +// MINIO_KMS_REPLICATE_KEYID=off +// +// Some deployments use different KMS clusters with destinct keys on +// each site. Trying to replicate the KMS key ID can cause requests +// to fail in such setups. +func ReplicateKeyID() bool { return replicateKeyID() } + +// ConnectionOptions is a structure containing options for connecting +// to a KMS. +type ConnectionOptions struct { + CADir string // Path to directory (or file) containing CA certificates +} + +// Connect returns a new Conn to a KMS. It uses configuration from the +// environment and returns a: +// +// - connection to MinIO KMS if the "MINIO_KMS_SERVER" variable is present. +// - connection to MinIO KES if the "MINIO_KMS_KES_ENDPOINT" is present. +// - connection to a "local" KMS implementation using a static key if the +// "MINIO_KMS_SECRET_KEY" or "MINIO_KMS_SECRET_KEY_FILE" is present. +// +// It returns an error if connecting to the KMS implementation fails, +// e.g. due to incomplete config, or when configurations for multiple +// KMS implementations are present. +func Connect(ctx context.Context, opts *ConnectionOptions) (*KMS, error) { + if present, err := IsPresent(); !present || err != nil { + if err != nil { + return nil, err + } + return nil, errors.New("kms: no KMS configuration specified") + } + + lookup := func(key string) bool { + _, ok := os.LookupEnv(key) + return ok + } + switch { + case lookup(EnvKMSEndpoint): + rawEndpoint := env.Get(EnvKMSEndpoint, "") + if rawEndpoint == "" { + return nil, errors.New("kms: no KMS server endpoint provided") + } + endpoints, err := expandEndpoints(rawEndpoint) + if err != nil { + return nil, err + } + + key, err := mtls.ParsePrivateKey(env.Get(EnvKMSAPIKey, "")) + if err != nil { + return nil, err + } + + var rootCAs *x509.CertPool + if opts != nil && opts.CADir != "" { + rootCAs, err = certs.GetRootCAs(opts.CADir) + if err != nil { + return nil, err + } + } + + client, err := kms.NewClient(&kms.Config{ + Endpoints: endpoints, + APIKey: key, + TLS: &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), + RootCAs: rootCAs, + }, + }) + if err != nil { + return nil, err + } + + return &KMS{ + Type: MinKMS, + DefaultKey: env.Get(EnvKMSDefaultKey, ""), + conn: &kmsConn{ + enclave: env.Get(EnvKMSEnclave, ""), + defaultKey: env.Get(EnvKMSDefaultKey, ""), + client: client, + }, + latencyBuckets: defaultLatencyBuckets, + latency: make([]atomic.Uint64, len(defaultLatencyBuckets)), + }, nil + case lookup(EnvKESEndpoint): + rawEndpoint := env.Get(EnvKESEndpoint, "") + if rawEndpoint == "" { + return nil, errors.New("kms: no KES server endpoint provided") + } + endpoints, err := expandEndpoints(rawEndpoint) + if err != nil { + return nil, err + } + + conf := &tls.Config{ + MinVersion: tls.VersionTLS12, + ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), + } + if s := env.Get(EnvKESAPIKey, ""); s != "" { + key, err := kes.ParseAPIKey(s) + if err != nil { + return nil, err + } + + cert, err := kes.GenerateCertificate(key) + if err != nil { + return nil, err + } + conf.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { return &cert, nil } + } else { + loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) { + // Manually load the certificate and private key into memory. + // We need to check whether the private key is encrypted, and + // if so, decrypt it using the user-provided password. + certBytes, err := os.ReadFile(certFile) + if err != nil { + return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err) + } + keyBytes, err := os.ReadFile(keyFile) + if err != nil { + return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err) + } + privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes)) + if len(rest) != 0 { + return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data") + } + if x509.IsEncryptedPEMBlock(privateKeyPEM) { + keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(EnvKESClientPassword, ""))) + if err != nil { + return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err) + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes}) + } + certificate, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err) + } + return certificate, nil + } + + certificate, err := certs.NewCertificate(env.Get(EnvKESClientCert, ""), env.Get(EnvKESClientKey, ""), loadX509KeyPair) + if err != nil { + return nil, err + } + certificate.Watch(ctx, 15*time.Minute, syscall.SIGHUP) + + conf.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + cert := certificate.Get() + return &cert, nil + } + } + + var caDir string + if opts != nil { + caDir = opts.CADir + } + conf.RootCAs, err = certs.GetRootCAs(env.Get(EnvKESServerCA, caDir)) + if err != nil { + return nil, err + } + + client := kes.NewClientWithConfig("", conf) + client.Endpoints = endpoints + + // Keep the default key in the KES cache to prevent availability issues + // when MinIO restarts + go func() { + timer := time.NewTicker(10 * time.Second) + defer timer.Stop() + defaultKey := env.Get(EnvKESDefaultKey, "") + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + client.DescribeKey(ctx, defaultKey) + } + } + }() + + return &KMS{ + Type: MinKES, + DefaultKey: env.Get(EnvKESDefaultKey, ""), + conn: &kesConn{ + defaultKeyID: env.Get(EnvKESDefaultKey, ""), + client: client, + }, + latencyBuckets: defaultLatencyBuckets, + latency: make([]atomic.Uint64, len(defaultLatencyBuckets)), + }, nil + default: + var s string + if lookup(EnvKMSSecretKeyFile) { + b, err := os.ReadFile(env.Get(EnvKMSSecretKeyFile, "")) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if os.IsNotExist(err) { + // Relative path where "/run/secrets" is the default docker path for secrets + b, err = os.ReadFile(filepath.Join("/run/secrets", env.Get(EnvKMSSecretKeyFile, ""))) + } + if err != nil { + return nil, err + } + s = string(b) + } else { + s = env.Get(EnvKMSSecretKey, "") + } + return ParseSecretKey(s) + } +} + +// IsPresent reports whether a KMS configuration is present. +// It returns an error if multiple KMS configurations are +// present or if one configuration is incomplete. +func IsPresent() (bool, error) { + // isPresent reports whether at least one of the + // given env. variables is present. + isPresent := func(vars ...string) bool { + for _, v := range vars { + if _, ok := os.LookupEnv(v); ok { + return ok + } + } + return false + } + + // First, check which KMS/KES env. variables are present. + // Only one set, either KMS, KES or static key must be + // present. + kmsPresent := isPresent( + EnvKMSEndpoint, + EnvKMSEnclave, + EnvKMSAPIKey, + EnvKMSDefaultKey, + ) + kesPresent := isPresent( + EnvKESEndpoint, + EnvKESDefaultKey, + EnvKESAPIKey, + EnvKESClientKey, + EnvKESClientCert, + EnvKESClientPassword, + EnvKESServerCA, + ) + // We have to handle a special case for MINIO_KMS_SECRET_KEY and + // MINIO_KMS_SECRET_KEY_FILE. The docker image always sets the + // MINIO_KMS_SECRET_KEY_FILE - either to the argument passed to + // the container or to a default string (e.g. "minio_master_key"). + // + // We have to distinguish a explicit config from an implicit. Hence, + // we unset the env. vars if they are set but empty or contain a path + // which does not exist. The downside of this check is that if + // MINIO_KMS_SECRET_KEY_FILE is set to a path that does not exist, + // the server does not complain and start without a KMS config. + // + // Until the container image changes, this behavior has to be preserved. + if isPresent(EnvKMSSecretKey) && os.Getenv(EnvKMSSecretKey) == "" { + os.Unsetenv(EnvKMSSecretKey) + } + if isPresent(EnvKMSSecretKeyFile) { + if filename := os.Getenv(EnvKMSSecretKeyFile); filename == "" { + os.Unsetenv(EnvKMSSecretKeyFile) + } else if _, err := os.Stat(filename); errors.Is(err, os.ErrNotExist) { + os.Unsetenv(EnvKMSSecretKeyFile) + } + } + // Now, the static key env. vars are only present if they contain explicit + // values. + staticKeyPresent := isPresent(EnvKMSSecretKey, EnvKMSSecretKeyFile) + + switch { + case kmsPresent && kesPresent: + return false, errors.New("kms: configuration for MinIO KMS and MinIO KES is present") + case kmsPresent && staticKeyPresent: + return false, errors.New("kms: configuration for MinIO KMS and static KMS key is present") + case kesPresent && staticKeyPresent: + return false, errors.New("kms: configuration for MinIO KES and static KMS key is present") + } + + // Next, we check that all required configuration for the concrete + // KMS is present. + // For example, the MinIO KMS requires an endpoint or a list of + // endpoints and authentication credentials. However, a path to + // CA certificates is optional. + switch { + default: + return false, nil // No KMS config present + case kmsPresent: + if !isPresent(EnvKMSEndpoint) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KMS: missing '%s'", EnvKMSEndpoint) + } + if !isPresent(EnvKMSEnclave) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KMS: missing '%s'", EnvKMSEnclave) + } + if !isPresent(EnvKMSDefaultKey) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KMS: missing '%s'", EnvKMSDefaultKey) + } + if !isPresent(EnvKMSAPIKey) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KMS: missing '%s'", EnvKMSAPIKey) + } + return true, nil + case staticKeyPresent: + if isPresent(EnvKMSSecretKey) && isPresent(EnvKMSSecretKeyFile) { + return false, fmt.Errorf("kms: invalid configuration for static KMS key: '%s' and '%s' are present", EnvKMSSecretKey, EnvKMSSecretKeyFile) + } + return true, nil + case kesPresent: + if !isPresent(EnvKESEndpoint) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KES: missing '%s'", EnvKESEndpoint) + } + if !isPresent(EnvKESDefaultKey) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KES: missing '%s'", EnvKESDefaultKey) + } + + if isPresent(EnvKESClientKey, EnvKESClientCert, EnvKESClientPassword) { + if isPresent(EnvKESAPIKey) { + return false, fmt.Errorf("kms: invalid configuration for MinIO KES: '%s' and client certificate is present", EnvKESAPIKey) + } + if !isPresent(EnvKESClientCert) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KES: missing '%s'", EnvKESClientCert) + } + if !isPresent(EnvKESClientKey) { + return false, fmt.Errorf("kms: incomplete configuration for MinIO KES: missing '%s'", EnvKESClientKey) + } + } else if !isPresent(EnvKESAPIKey) { + return false, errors.New("kms: incomplete configuration for MinIO KES: missing authentication method") + } + return true, nil + } +} + +func expandEndpoints(s string) ([]string, error) { + var endpoints []string + for endpoint := range strings.SplitSeq(s, ",") { + endpoint = strings.TrimSpace(endpoint) + if endpoint == "" { + continue + } + if !ellipses.HasEllipses(endpoint) { + endpoints = append(endpoints, endpoint) + continue + } + + pattern, err := ellipses.FindEllipsesPatterns(endpoint) + if err != nil { + return nil, fmt.Errorf("kms: invalid endpoint '%s': %v", endpoint, err) + } + for _, p := range pattern.Expand() { + endpoints = append(endpoints, strings.Join(p, "")) + } + } + return endpoints, nil +} diff --git a/internal/kms/config_test.go b/internal/kms/config_test.go new file mode 100644 index 0000000000000..d63d06525585a --- /dev/null +++ b/internal/kms/config_test.go @@ -0,0 +1,105 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// # This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package kms + +import ( + "os" + "testing" +) + +func TestIsPresent(t *testing.T) { + for i, test := range isPresentTests { + os.Clearenv() + for k, v := range test.Env { + t.Setenv(k, v) + } + + ok, err := IsPresent() + if err != nil && !test.ShouldFail { + t.Fatalf("Test %d: %v", i, err) + } + if err == nil && test.ShouldFail { + t.Fatalf("Test %d: should have failed but succeeded", i) + } + + if !test.ShouldFail && ok != test.IsPresent { + t.Fatalf("Test %d: reported that KMS present=%v - want present=%v", i, ok, test.IsPresent) + } + } +} + +var isPresentTests = []struct { + Env map[string]string + IsPresent bool + ShouldFail bool +}{ + {Env: map[string]string{}}, // 0 + { // 1 + Env: map[string]string{ + EnvKMSSecretKey: "minioy-default-key:6jEQjjMh8iPq8/gqgb4eMDIZFOtPACIsr9kO+vx8JFs=", + }, + IsPresent: true, + }, + { // 2 + Env: map[string]string{ + EnvKMSEndpoint: "https://127.0.0.1:7373", + EnvKMSDefaultKey: "minio-key", + EnvKMSEnclave: "demo", + EnvKMSAPIKey: "k1:MBDtmC9ZAf3Wi4-oGglgKx_6T1jwJfct1IC15HOxetg", + }, + IsPresent: true, + }, + { // 3 + Env: map[string]string{ + EnvKESEndpoint: "https://127.0.0.1:7373", + EnvKESDefaultKey: "minio-key", + EnvKESAPIKey: "kes:v1:AGtR4PvKXNjz+/MlBX2Djg0qxwS3C4OjoDzsuFSQr82e", + }, + IsPresent: true, + }, + { // 4 + Env: map[string]string{ + EnvKESEndpoint: "https://127.0.0.1:7373", + EnvKESDefaultKey: "minio-key", + EnvKESClientKey: "/tmp/client.key", + EnvKESClientCert: "/tmp/client.crt", + }, + IsPresent: true, + }, + { // 5 + Env: map[string]string{ + EnvKMSEndpoint: "https://127.0.0.1:7373", + EnvKESEndpoint: "https://127.0.0.1:7373", + }, + ShouldFail: true, + }, + { // 6 + Env: map[string]string{ + EnvKMSEndpoint: "https://127.0.0.1:7373", + EnvKMSSecretKey: "minioy-default-key:6jEQjjMh8iPq8/gqgb4eMDIZFOtPACIsr9kO+vx8JFs=", + }, + ShouldFail: true, + }, + { // 7 + Env: map[string]string{ + EnvKMSEnclave: "foo", + EnvKESServerCA: "/etc/minio/certs", + }, + ShouldFail: true, + }, +} diff --git a/internal/kms/conn.go b/internal/kms/conn.go new file mode 100644 index 0000000000000..c63fcb6cb8f81 --- /dev/null +++ b/internal/kms/conn.go @@ -0,0 +1,167 @@ +// Copyright (c) 2015-2021 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package kms + +import ( + "context" + "encoding" + "encoding/json" + "strconv" + + jsoniter "github.com/json-iterator/go" + "github.com/minio/madmin-go/v3" +) + +// conn represents a connection to a KMS implementation. +// It's implemented by the MinKMS and KES client wrappers +// and the static / single key KMS. +type conn interface { + // Version returns version information about the KMS. + // + // TODO(aead): refactor this API call. It does not account + // for multiple endpoints. + Version(context.Context) (string, error) + + // APIs returns a list of APIs supported by the KMS server. + // + // TODO(aead): remove this API call. It's hardly useful. + APIs(context.Context) ([]madmin.KMSAPI, error) + + // Stat returns the current KMS status. + Status(context.Context) (map[string]madmin.ItemState, error) + + // CreateKey creates a new key at the KMS with the given key ID. + CreateKey(context.Context, *CreateKeyRequest) error + + ListKeys(context.Context, *ListRequest) ([]madmin.KMSKeyInfo, string, error) + + // GenerateKey generates a new data encryption key using the + // key referenced by the key ID. + // + // The KMS may use a default key if the key ID is empty. + // GenerateKey returns an error if the referenced key does + // not exist. + // + // The context is associated and tied to the generated DEK. + // The same context must be provided when the generated key + // should be decrypted. Therefore, it is the callers + // responsibility to remember the corresponding context for + // a particular DEK. The context may be nil. + GenerateKey(context.Context, *GenerateKeyRequest) (DEK, error) + + // DecryptKey decrypts the ciphertext with the key referenced + // by the key ID. The context must match the context value + // used to generate the ciphertext. + Decrypt(context.Context, *DecryptRequest) ([]byte, error) + + // MAC generates the checksum of the given req.Message using the key + // with the req.Name at the KMS. + MAC(context.Context, *MACRequest) ([]byte, error) +} + +var ( // compiler checks + _ conn = (*kmsConn)(nil) + _ conn = (*kesConn)(nil) + _ conn = secretKey{} +) + +// Supported KMS types +const ( + MinKMS Type = iota + 1 // MinIO KMS + MinKES // MinIO MinKES + Builtin // Builtin single key KMS implementation +) + +// Type identifies the KMS type. +type Type uint + +// String returns the Type's string representation +func (t Type) String() string { + switch t { + case MinKMS: + return "MinIO KMS" + case MinKES: + return "MinIO KES" + case Builtin: + return "MinIO builtin" + default: + return "!INVALID:" + strconv.Itoa(int(t)) + } +} + +// Status describes the current state of a KMS. +type Status struct { + Online map[string]struct{} + Offline map[string]Error +} + +// DEK is a data encryption key. It consists of a +// plaintext-ciphertext pair and the ID of the key +// used to generate the ciphertext. +// +// The plaintext can be used for cryptographic +// operations - like encrypting some data. The +// ciphertext is the encrypted version of the +// plaintext data and can be stored on untrusted +// storage. +type DEK struct { + KeyID string // Name of the master key + Version int // Version of the master key (MinKMS only) + Plaintext []byte // Paintext of the data encryption key + Ciphertext []byte // Ciphertext of the data encryption key +} + +var ( + _ encoding.TextMarshaler = (*DEK)(nil) + _ encoding.TextUnmarshaler = (*DEK)(nil) +) + +// MarshalText encodes the DEK's key ID and ciphertext +// as JSON. +func (d DEK) MarshalText() ([]byte, error) { + type JSON struct { + KeyID string `json:"keyid"` + Version uint32 `json:"version,omitempty"` + Ciphertext []byte `json:"ciphertext"` + } + return json.Marshal(JSON{ + KeyID: d.KeyID, + Version: uint32(d.Version), + Ciphertext: d.Ciphertext, + }) +} + +// UnmarshalText tries to decode text as JSON representation +// of a DEK and sets DEK's key ID and ciphertext to the +// decoded values. +// +// It sets DEK's plaintext to nil. +func (d *DEK) UnmarshalText(text []byte) error { + type JSON struct { + KeyID string `json:"keyid"` + Version uint32 `json:"version"` + Ciphertext []byte `json:"ciphertext"` + } + var v JSON + json := jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(text, &v); err != nil { + return err + } + d.KeyID, d.Version, d.Plaintext, d.Ciphertext = v.KeyID, int(v.Version), nil, v.Ciphertext + return nil +} diff --git a/internal/kms/dek_test.go b/internal/kms/dek_test.go index dd83eca6e876d..12ab164d5087c 100644 --- a/internal/kms/dek_test.go +++ b/internal/kms/dek_test.go @@ -41,6 +41,13 @@ var dekEncodeDecodeTests = []struct { Ciphertext: mustDecodeB64("eyJhZWFkIjoiQUVTLTI1Ni1HQ00tSE1BQy1TSEEtMjU2IiwiaXYiOiJ3NmhLUFVNZXVtejZ5UlVZL29pTFVBPT0iLCJub25jZSI6IktMSEU3UE1jRGo2N2UweHkiLCJieXRlcyI6Ik1wUkhjQWJaTzZ1Sm5lUGJGcnpKTkxZOG9pdkxwTmlUcTNLZ0hWdWNGYkR2Y0RlbEh1c1lYT29zblJWVTZoSXIifQ=="), }, }, + { + Key: DEK{ + Version: 3, + Plaintext: mustDecodeB64("GM2UvLXp/X8lzqq0mibFC0LayDCGlmTHQhYLj7qAy7Q="), + Ciphertext: mustDecodeB64("eyJhZWFkIjoiQUVTLTI1Ni1HQ00tSE1BQy1TSEEtMjU2IiwiaXYiOiJ3NmhLUFVNZXVtejZ5UlVZL29pTFVBPT0iLCJub25jZSI6IktMSEU3UE1jRGo2N2UweHkiLCJieXRlcyI6Ik1wUkhjQWJaTzZ1Sm5lUGJGcnpKTkxZOG9pdkxwTmlUcTNLZ0hWdWNGYkR2Y0RlbEh1c1lYT29zblJWVTZoSXIifQ=="), + }, + }, } func TestEncodeDecodeDEK(t *testing.T) { diff --git a/internal/kms/errors.go b/internal/kms/errors.go index 7900c0a2397e8..9583651ed278d 100644 --- a/internal/kms/errors.go +++ b/internal/kms/errors.go @@ -17,13 +17,112 @@ package kms -// Error encapsulates S3 API error response fields. +import ( + "fmt" + "net/http" +) + +var ( + // ErrPermission is an error returned by the KMS when it has not + // enough permissions to perform the operation. + ErrPermission = Error{ + Code: http.StatusForbidden, + APICode: "kms:NotAuthorized", + Err: "insufficient permissions to perform KMS operation", + } + + // ErrKeyExists is an error returned by the KMS when trying to + // create a key that already exists. + ErrKeyExists = Error{ + Code: http.StatusConflict, + APICode: "kms:KeyAlreadyExists", + Err: "key with given key ID already exits", + } + + // ErrKeyNotFound is an error returned by the KMS when trying to + // use a key that does not exist. + ErrKeyNotFound = Error{ + Code: http.StatusNotFound, + APICode: "kms:KeyNotFound", + Err: "key with given key ID does not exist", + } + + // ErrDecrypt is an error returned by the KMS when the decryption + // of a ciphertext failed. + ErrDecrypt = Error{ + Code: http.StatusBadRequest, + APICode: "kms:InvalidCiphertextException", + Err: "failed to decrypt ciphertext", + } + + // ErrNotSupported is an error returned by the KMS when the requested + // functionality is not supported by the KMS service. + ErrNotSupported = Error{ + Code: http.StatusNotImplemented, + APICode: "kms:NotSupported", + Err: "requested functionality is not supported", + } +) + +// Error is a KMS error that can be translated into an S3 API error. +// +// It does not implement the standard error Unwrap interface for +// better error log messages. type Error struct { - Err error - APICode string - HTTPStatusCode int + Code int // The HTTP status code returned to the client + APICode string // The API error code identifying the error + Err string // The error message returned to the client + Cause error // Optional, lower level error cause. } func (e Error) Error() string { - return e.Err.Error() + if e.Cause == nil { + return e.Err + } + return fmt.Sprintf("%s: %v", e.Err, e.Cause) +} + +func errKeyCreationFailed(err error) Error { + return Error{ + Code: http.StatusInternalServerError, + APICode: "kms:KeyCreationFailed", + Err: "failed to create KMS key", + Cause: err, + } +} + +func errKeyDeletionFailed(err error) Error { + return Error{ + Code: http.StatusInternalServerError, + APICode: "kms:KeyDeletionFailed", + Err: "failed to delete KMS key", + Cause: err, + } +} + +func errListingKeysFailed(err error) Error { + return Error{ + Code: http.StatusInternalServerError, + APICode: "kms:KeyListingFailed", + Err: "failed to list keys at the KMS", + Cause: err, + } +} + +func errKeyGenerationFailed(err error) Error { + return Error{ + Code: http.StatusInternalServerError, + APICode: "kms:KeyGenerationFailed", + Err: "failed to generate data key with KMS key", + Cause: err, + } +} + +func errDecryptionFailed(err error) Error { + return Error{ + Code: http.StatusInternalServerError, + APICode: "kms:DecryptionFailed", + Err: "failed to decrypt ciphertext with KMS key", + Cause: err, + } } diff --git a/internal/kms/identity-manager.go b/internal/kms/identity-manager.go deleted file mode 100644 index ad3a1e8aefa21..0000000000000 --- a/internal/kms/identity-manager.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package kms - -import ( - "context" - - "github.com/minio/kms-go/kes" -) - -// IdentityManager is the generic interface that handles KMS identity operations -type IdentityManager interface { - // DescribeIdentity describes an identity by returning its metadata. - // e.g. which policy is currently assigned and whether its an admin identity. - DescribeIdentity(ctx context.Context, identity string) (*kes.IdentityInfo, error) - - // DescribeSelfIdentity describes the identity issuing the request. - // It infers the identity from the TLS client certificate used to authenticate. - // It returns the identity and policy information for the client identity. - DescribeSelfIdentity(ctx context.Context) (*kes.IdentityInfo, *kes.Policy, error) - - // ListIdentities lists all identities. - ListIdentities(ctx context.Context) (*kes.ListIter[kes.Identity], error) -} diff --git a/internal/kms/kes.go b/internal/kms/kes.go index 8a9d943008123..8bd387c2730ac 100644 --- a/internal/kms/kes.go +++ b/internal/kms/kes.go @@ -18,204 +18,124 @@ package kms import ( - "bytes" "context" - "crypto/subtle" - "crypto/tls" - "crypto/x509" "errors" - "fmt" - "strings" + "net/http" "sync" + "time" "github.com/minio/kms-go/kes" - "github.com/minio/pkg/v2/certs" - "github.com/minio/pkg/v2/env" + "github.com/minio/madmin-go/v3" ) -const ( - tlsClientSessionCacheSize = 100 -) - -// Config contains various KMS-related configuration -// parameters - like KMS endpoints or authentication -// credentials. -type Config struct { - // Endpoints contains a list of KMS server - // HTTP endpoints. - Endpoints []string - - // DefaultKeyID is the key ID used when - // no explicit key ID is specified for - // a cryptographic operation. - DefaultKeyID string - - // APIKey is an credential provided by env. var. - // to authenticate to a KES server. Either an - // API key or a client certificate must be specified. - APIKey kes.APIKey - - // Certificate is the client TLS certificate - // to authenticate to KMS via mTLS. - Certificate *certs.Certificate - - // ReloadCertEvents is an event channel that receives - // the reloaded client certificate. - ReloadCertEvents <-chan tls.Certificate +type kesConn struct { + defaultKeyID string + client *kes.Client +} - // RootCAs is a set of root CA certificates - // to verify the KMS server TLS certificate. - RootCAs *x509.CertPool +func (c *kesConn) Version(ctx context.Context) (string, error) { + return c.client.Version(ctx) } -// NewWithConfig returns a new KMS using the given -// configuration. -func NewWithConfig(config Config) (KMS, error) { - if len(config.Endpoints) == 0 { - return nil, errors.New("kms: no server endpoints") +func (c *kesConn) APIs(ctx context.Context) ([]madmin.KMSAPI, error) { + APIs, err := c.client.APIs(ctx) + if err != nil { + if errors.Is(err, kes.ErrNotAllowed) { + return nil, ErrPermission + } + return nil, Error{ + Code: http.StatusInternalServerError, + APICode: "kms:InternalError", + Err: "failed to list KMS APIs", + Cause: err, + } } - endpoints := make([]string, len(config.Endpoints)) // Copy => avoid being affect by any changes to the original slice - copy(endpoints, config.Endpoints) - var client *kes.Client - if config.APIKey != nil { - cert, err := kes.GenerateCertificate(config.APIKey) - if err != nil { - return nil, err - } - client = kes.NewClientWithConfig("", &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{cert}, - RootCAs: config.RootCAs, - ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), - }) - } else { - client = kes.NewClientWithConfig("", &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{config.Certificate.Get()}, - RootCAs: config.RootCAs, - ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), + list := make([]madmin.KMSAPI, 0, len(APIs)) + for _, api := range APIs { + list = append(list, madmin.KMSAPI{ + Method: api.Method, + Path: api.Path, + MaxBody: api.MaxBody, + Timeout: int64(api.Timeout.Truncate(time.Second).Seconds()), }) } - client.Endpoints = endpoints + return list, nil +} - c := &kesClient{ - client: client, - defaultKeyID: config.DefaultKeyID, - } - go func() { - if config.Certificate == nil || config.ReloadCertEvents == nil { - return - } - var prevCertificate tls.Certificate - for { - certificate, ok := <-config.ReloadCertEvents - if !ok { - return +// Stat returns the current KES status containing a +// list of KES endpoints and the default key ID. +func (c *kesConn) Status(ctx context.Context) (map[string]madmin.ItemState, error) { + if len(c.client.Endpoints) == 1 { + if _, err := c.client.Status(ctx); err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return nil, err } - sameCert := len(certificate.Certificate) == len(prevCertificate.Certificate) - for i, b := range certificate.Certificate { - if !sameCert { - break - } - sameCert = sameCert && bytes.Equal(b, prevCertificate.Certificate[i]) + if errors.Is(err, kes.ErrNotAllowed) { + return nil, ErrPermission } - // Do not reload if its the same cert as before. - if !sameCert { - client := kes.NewClientWithConfig("", &tls.Config{ - MinVersion: tls.VersionTLS12, - Certificates: []tls.Certificate{certificate}, - RootCAs: config.RootCAs, - ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), - }) - client.Endpoints = endpoints - - c.lock.Lock() - c.client = client - c.lock.Unlock() - prevCertificate = certificate - } + return map[string]madmin.ItemState{ + c.client.Endpoints[0]: madmin.ItemOffline, + }, nil } - }() - return c, nil -} + return map[string]madmin.ItemState{ + c.client.Endpoints[0]: madmin.ItemOnline, + }, nil + } -type kesClient struct { - lock sync.RWMutex - defaultKeyID string - client *kes.Client -} + type Result struct { + Endpoint string + ItemState madmin.ItemState + } -var ( // compiler checks - _ KMS = (*kesClient)(nil) - _ KeyManager = (*kesClient)(nil) - _ IdentityManager = (*kesClient)(nil) - _ PolicyManager = (*kesClient)(nil) -) + var wg sync.WaitGroup + results := make([]Result, len(c.client.Endpoints)) + for i := range c.client.Endpoints { + wg.Add(1) + go func(i int) { + defer wg.Done() -// Stat returns the current KES status containing a -// list of KES endpoints and the default key ID. -func (c *kesClient) Stat(ctx context.Context) (Status, error) { - c.lock.RLock() - defer c.lock.RUnlock() + client := kes.Client{ + Endpoints: []string{c.client.Endpoints[i]}, + HTTPClient: c.client.HTTPClient, + } - st, err := c.client.Status(ctx) - if err != nil { - return Status{}, err + var item madmin.ItemState + if _, err := client.Status(ctx); err == nil { + item = madmin.ItemOnline + } else { + item = madmin.ItemOffline + } + results[i] = Result{ + Endpoint: c.client.Endpoints[i], + ItemState: item, + } + }(i) } - endpoints := make([]string, len(c.client.Endpoints)) - copy(endpoints, c.client.Endpoints) - return Status{ - Name: "KES", - Endpoints: endpoints, - DefaultKey: c.defaultKeyID, - Details: st, - }, nil -} + wg.Wait() -// IsLocal returns true if the KMS is a local implementation -func (c *kesClient) IsLocal() bool { - return env.IsSet(EnvKMSSecretKey) -} - -// List returns an array of local KMS Names -func (c *kesClient) List() []kes.KeyInfo { - var kmsSecret []kes.KeyInfo - envKMSSecretKey := env.Get(EnvKMSSecretKey, "") - values := strings.SplitN(envKMSSecretKey, ":", 2) - if len(values) == 2 { - kmsSecret = []kes.KeyInfo{ - { - Name: values[0], - }, + status := make(map[string]madmin.ItemState, len(results)) + for _, r := range results { + if r.ItemState == madmin.ItemOnline { + status[r.Endpoint] = madmin.ItemOnline + } else { + status[r.Endpoint] = madmin.ItemOffline } } - return kmsSecret + return status, nil } -// Metrics retrieves server metrics in the Prometheus exposition format. -func (c *kesClient) Metrics(ctx context.Context) (kes.Metric, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.Metrics(ctx) -} - -// Version retrieves version information -func (c *kesClient) Version(ctx context.Context) (string, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.Version(ctx) -} - -// APIs retrieves a list of supported API endpoints -func (c *kesClient) APIs(ctx context.Context) ([]kes.API, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.APIs(ctx) +func (c *kesConn) ListKeys(ctx context.Context, req *ListRequest) ([]madmin.KMSKeyInfo, string, error) { + names, continueAt, err := c.client.ListKeys(ctx, req.Prefix, req.Limit) + if err != nil { + return nil, "", err + } + keyInfos := make([]madmin.KMSKeyInfo, len(names)) + for i := range names { + keyInfos[i].Name = names[i] + } + return keyInfos, continueAt, nil } // CreateKey tries to create a new key at the KMS with the @@ -223,32 +143,34 @@ func (c *kesClient) APIs(ctx context.Context) ([]kes.API, error) { // // If the a key with the same keyID already exists then // CreateKey returns kes.ErrKeyExists. -func (c *kesClient) CreateKey(ctx context.Context, keyID string) error { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.CreateKey(ctx, keyID) +func (c *kesConn) CreateKey(ctx context.Context, req *CreateKeyRequest) error { + if err := c.client.CreateKey(ctx, req.Name); err != nil { + if errors.Is(err, kes.ErrKeyExists) { + return ErrKeyExists + } + if errors.Is(err, kes.ErrNotAllowed) { + return ErrPermission + } + return errKeyCreationFailed(err) + } + return nil } // DeleteKey deletes a key at the KMS with the given key ID. // Please note that is a dangerous operation. // Once a key has been deleted all data that has been encrypted with it cannot be decrypted // anymore, and therefore, is lost. -func (c *kesClient) DeleteKey(ctx context.Context, keyID string) error { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.DeleteKey(ctx, keyID) -} - -// ListKeys returns an iterator over all key names. -func (c *kesClient) ListKeys(ctx context.Context) (*kes.ListIter[string], error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return &kes.ListIter[string]{ - NextFunc: c.client.ListKeys, - }, nil +func (c *kesConn) DeleteKey(ctx context.Context, req *DeleteKeyRequest) error { + if err := c.client.DeleteKey(ctx, req.Name); err != nil { + if errors.Is(err, kes.ErrKeyNotFound) { + return ErrKeyNotFound + } + if errors.Is(err, kes.ErrNotAllowed) { + return ErrPermission + } + return errKeyDeletionFailed(err) + } + return nil } // GenerateKey generates a new data encryption key using @@ -259,34 +181,36 @@ func (c *kesClient) ListKeys(ctx context.Context) (*kes.ListIter[string], error) // The context is associated and tied to the generated DEK. // The same context must be provided when the generated // key should be decrypted. -func (c *kesClient) GenerateKey(ctx context.Context, keyID string, cryptoCtx Context) (DEK, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - if keyID == "" { - keyID = c.defaultKeyID - } - ctxBytes, err := cryptoCtx.MarshalText() +func (c *kesConn) GenerateKey(ctx context.Context, req *GenerateKeyRequest) (DEK, error) { + aad, err := req.AssociatedData.MarshalText() if err != nil { return DEK{}, err } - dek, err := c.client.GenerateKey(ctx, keyID, ctxBytes) + name := req.Name + if name == "" { + name = c.defaultKeyID + } + + dek, err := c.client.GenerateKey(ctx, name, aad) if err != nil { - return DEK{}, err + if errors.Is(err, kes.ErrKeyNotFound) { + return DEK{}, ErrKeyNotFound + } + if errors.Is(err, kes.ErrNotAllowed) { + return DEK{}, ErrPermission + } + return DEK{}, errKeyGenerationFailed(err) } return DEK{ - KeyID: keyID, + KeyID: name, Plaintext: dek.Plaintext, Ciphertext: dek.Ciphertext, }, nil } // ImportKey imports a cryptographic key into the KMS. -func (c *kesClient) ImportKey(ctx context.Context, keyID string, bytes []byte) error { - c.lock.RLock() - defer c.lock.RUnlock() - +func (c *kesConn) ImportKey(ctx context.Context, keyID string, bytes []byte) error { return c.client.ImportKey(ctx, keyID, &kes.ImportKeyRequest{ Key: bytes, }) @@ -294,10 +218,7 @@ func (c *kesClient) ImportKey(ctx context.Context, keyID string, bytes []byte) e // EncryptKey Encrypts and authenticates a (small) plaintext with the cryptographic key // The plaintext must not exceed 1 MB -func (c *kesClient) EncryptKey(keyID string, plaintext []byte, ctx Context) ([]byte, error) { - c.lock.RLock() - defer c.lock.RUnlock() - +func (c *kesConn) EncryptKey(keyID string, plaintext []byte, ctx Context) ([]byte, error) { ctxBytes, err := ctx.MarshalText() if err != nil { return nil, err @@ -308,145 +229,42 @@ func (c *kesClient) EncryptKey(keyID string, plaintext []byte, ctx Context) ([]b // DecryptKey decrypts the ciphertext with the key at the KES // server referenced by the key ID. The context must match the // context value used to generate the ciphertext. -func (c *kesClient) DecryptKey(keyID string, ciphertext []byte, ctx Context) ([]byte, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - ctxBytes, err := ctx.MarshalText() +func (c *kesConn) Decrypt(ctx context.Context, req *DecryptRequest) ([]byte, error) { + aad, err := req.AssociatedData.MarshalText() if err != nil { return nil, err } - return c.client.Decrypt(context.Background(), keyID, ciphertext, ctxBytes) -} -func (c *kesClient) DecryptAll(ctx context.Context, keyID string, ciphertexts [][]byte, contexts []Context) ([][]byte, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - plaintexts := make([][]byte, 0, len(ciphertexts)) - for i := range ciphertexts { - ctxBytes, err := contexts[i].MarshalText() - if err != nil { - return nil, err + plaintext, err := c.client.Decrypt(context.Background(), req.Name, req.Ciphertext, aad) + if err != nil { + if errors.Is(err, kes.ErrKeyNotFound) { + return nil, ErrKeyNotFound + } + if errors.Is(err, kes.ErrDecrypt) { + return nil, ErrDecrypt } - plaintext, err := c.client.Decrypt(ctx, keyID, ciphertexts[i], ctxBytes) - if err != nil { - return nil, err + if errors.Is(err, kes.ErrNotAllowed) { + return nil, ErrPermission } - plaintexts = append(plaintexts, plaintext) + return nil, errDecryptionFailed(err) } - return plaintexts, nil -} - -// HMAC generates the HMAC checksum of the given msg using the key -// with the given keyID at the KMS. -func (c *kesClient) HMAC(ctx context.Context, keyID string, msg []byte) ([]byte, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.HMAC(context.Background(), keyID, msg) + return plaintext, nil } -// DescribePolicy describes a policy by returning its metadata. -// e.g. who created the policy at which point in time. -func (c *kesClient) DescribePolicy(ctx context.Context, policy string) (*kes.PolicyInfo, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.DescribePolicy(ctx, policy) -} - -// ListPolicies returns an iterator over all policy names. -func (c *kesClient) ListPolicies(ctx context.Context) (*kes.ListIter[string], error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return &kes.ListIter[string]{ - NextFunc: c.client.ListPolicies, - }, nil -} - -// GetPolicy gets a policy from KMS. -func (c *kesClient) GetPolicy(ctx context.Context, policy string) (*kes.Policy, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.GetPolicy(ctx, policy) -} - -// DescribeIdentity describes an identity by returning its metadata. -// e.g. which policy is currently assigned and whether its an admin identity. -func (c *kesClient) DescribeIdentity(ctx context.Context, identity string) (*kes.IdentityInfo, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.DescribeIdentity(ctx, kes.Identity(identity)) -} - -// DescribeSelfIdentity describes the identity issuing the request. -// It infers the identity from the TLS client certificate used to authenticate. -// It returns the identity and policy information for the client identity. -func (c *kesClient) DescribeSelfIdentity(ctx context.Context) (*kes.IdentityInfo, *kes.Policy, error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return c.client.DescribeSelf(ctx) -} - -// ListPolicies returns an iterator over all identities. -func (c *kesClient) ListIdentities(ctx context.Context) (*kes.ListIter[kes.Identity], error) { - c.lock.RLock() - defer c.lock.RUnlock() - - return &kes.ListIter[kes.Identity]{ - NextFunc: c.client.ListIdentities, - }, nil -} - -// Verify verifies all KMS endpoints and returns details -func (c *kesClient) Verify(ctx context.Context) []VerifyResult { - c.lock.RLock() - defer c.lock.RUnlock() - - results := []VerifyResult{} - kmsContext := Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation - for _, endpoint := range c.client.Endpoints { - client := kes.Client{ - Endpoints: []string{endpoint}, - HTTPClient: c.client.HTTPClient, - } - - // 1. Get stats for the KES instance - state, err := client.Status(ctx) - if err != nil { - results = append(results, VerifyResult{Status: "offline", Endpoint: endpoint}) - continue - } - - // 2. Generate a new key using the KMS. - kmsCtx, err := kmsContext.MarshalText() - if err != nil { - results = append(results, VerifyResult{Status: "offline", Endpoint: endpoint}) - continue +// MAC generates the checksum of the given req.Message using the key +// with the req.Name at the KMS. +func (c *kesConn) MAC(ctx context.Context, req *MACRequest) ([]byte, error) { + mac, err := c.client.HMAC(context.Background(), req.Name, req.Message) + if err != nil { + if errors.Is(err, kes.ErrKeyNotFound) { + return nil, ErrKeyNotFound } - result := VerifyResult{Status: "online", Endpoint: endpoint, Version: state.Version} - key, err := client.GenerateKey(ctx, env.Get(EnvKESKeyName, ""), kmsCtx) - if err != nil { - result.Encrypt = fmt.Sprintf("Encryption failed: %v", err) - } else { - result.Encrypt = "success" + if errors.Is(err, kes.ErrNotAllowed) { + return nil, ErrPermission } - // 3. Verify that we can indeed decrypt the (encrypted) key - decryptedKey, err := client.Decrypt(ctx, env.Get(EnvKESKeyName, ""), key.Ciphertext, kmsCtx) - switch { - case err != nil: - result.Decrypt = fmt.Sprintf("Decryption failed: %v", err) - case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1: - result.Decrypt = "Decryption failed: decrypted key does not match generated key" - default: - result.Decrypt = "success" + if kErr, ok := err.(kes.Error); ok && kErr.Status() == http.StatusNotImplemented { + return nil, ErrNotSupported } - results = append(results, result) } - return results + return mac, nil } diff --git a/internal/kms/key-manager.go b/internal/kms/key-manager.go deleted file mode 100644 index 414272d5065ab..0000000000000 --- a/internal/kms/key-manager.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package kms - -import ( - "context" - - "github.com/minio/kms-go/kes" -) - -// KeyManager is the generic interface that handles KMS key operations -type KeyManager interface { - // CreateKey creates a new key at the KMS with the given key ID. - CreateKey(ctx context.Context, keyID string) error - - // DeleteKey deletes a key at the KMS with the given key ID. - // Please note that is a dangerous operation. - // Once a key has been deleted all data that has been encrypted with it cannot be decrypted - // anymore, and therefore, is lost. - DeleteKey(ctx context.Context, keyID string) error - - // ListKeys lists all key names. - ListKeys(ctx context.Context) (*kes.ListIter[string], error) - - // ImportKey imports a cryptographic key into the KMS. - ImportKey(ctx context.Context, keyID string, bytes []byte) error - - // EncryptKey Encrypts and authenticates a (small) plaintext with the cryptographic key - // The plaintext must not exceed 1 MB - EncryptKey(keyID string, plaintext []byte, context Context) ([]byte, error) - - // HMAC computes the HMAC of the given msg and key with the given - // key ID. - HMAC(ctx context.Context, keyID string, msg []byte) ([]byte, error) -} diff --git a/internal/kms/kms.go b/internal/kms/kms.go index 04bfee7ba5bc0..414d3795f50cb 100644 --- a/internal/kms/kms.go +++ b/internal/kms/kms.go @@ -19,132 +19,402 @@ package kms import ( "context" - "encoding" - "encoding/json" + "errors" + "net/http" + "slices" + "sync/atomic" + "time" - jsoniter "github.com/json-iterator/go" - "github.com/minio/kms-go/kes" + "github.com/minio/kms-go/kms" + "github.com/minio/madmin-go/v3" ) -// KMS is the generic interface that abstracts over -// different KMS implementations. -type KMS interface { - // Stat returns the current KMS status. - Stat(cxt context.Context) (Status, error) +// ListRequest is a structure containing fields +// and options for listing keys. +type ListRequest struct { + // Prefix is an optional prefix for filtering names. + // A list operation only returns elements that match + // this prefix. + // An empty prefix matches any value. + Prefix string - // IsLocal returns true if the KMS is a local implementation - IsLocal() bool + // ContinueAt is the name of the element from where + // a listing should continue. It allows paginated + // listings. + ContinueAt string - // List returns an array of local KMS Names - List() []kes.KeyInfo + // Limit limits the number of elements returned by + // a single list operation. If <= 0, a reasonable + // limit is selected automatically. + Limit int +} + +// CreateKeyRequest is a structure containing fields +// and options for creating keys. +type CreateKeyRequest struct { + // Name is the name of the key that gets created. + Name string +} - // Metrics returns a KMS metric snapshot. - Metrics(ctx context.Context) (kes.Metric, error) +// DeleteKeyRequest is a structure containing fields +// and options for deleting keys. +type DeleteKeyRequest struct { + // Name is the name of the key that gets deleted. + Name string +} - // CreateKey creates a new key at the KMS with the given key ID. - CreateKey(ctx context.Context, keyID string) error +// GenerateKeyRequest is a structure containing fields +// and options for generating data keys. +type GenerateKeyRequest struct { + // Name is the name of the master key used to generate + // the data key. + Name string - // GenerateKey generates a new data encryption key using the - // key referenced by the key ID. - // - // The KMS may use a default key if the key ID is empty. - // GenerateKey returns an error if the referenced key does - // not exist. + // AssociatedData is optional data that is cryptographically + // associated with the generated data key. The same data + // must be provided when decrypting an encrypted data key. // - // The context is associated and tied to the generated DEK. - // The same context must be provided when the generated key - // should be decrypted. Therefore, it is the callers - // responsibility to remember the corresponding context for - // a particular DEK. The context may be nil. - GenerateKey(ctx context.Context, keyID string, context Context) (DEK, error) - - // DecryptKey decrypts the ciphertext with the key referenced - // by the key ID. The context must match the context value - // used to generate the ciphertext. - DecryptKey(keyID string, ciphertext []byte, context Context) ([]byte, error) - - // DecryptAll decrypts all ciphertexts with the key referenced - // by the key ID. The contexts must match the context value - // used to generate the ciphertexts. - DecryptAll(ctx context.Context, keyID string, ciphertext [][]byte, context []Context) ([][]byte, error) - - // Verify verifies all KMS endpoints and returns the details - Verify(cxt context.Context) []VerifyResult -} - -// VerifyResult describes the verification result details a KMS endpoint -type VerifyResult struct { - Endpoint string - Decrypt string - Encrypt string - Version string - Status string -} - -// Status describes the current state of a KMS. -type Status struct { - Name string // The name of the KMS - Endpoints []string // A set of the KMS endpoints - - // DefaultKey is the key used when no explicit key ID - // is specified. It is empty if the KMS does not support - // a default key. + // Typically, associated data is some metadata about the + // data key. For example, the name of the object for which + // the data key is used. + AssociatedData Context +} + +// DecryptRequest is a structure containing fields +// and options for decrypting data. +type DecryptRequest struct { + // Name is the name of the master key used decrypt + // the ciphertext. + Name string + + // Version is the version of the master used for + // decryption. If empty, the latest key version + // is used. + Version int + + // Ciphertext is the encrypted data that gets + // decrypted. + Ciphertext []byte + + // AssociatedData is the crypto. associated data. + // It must match the data used during encryption + // or data key generation. + AssociatedData Context +} + +// MACRequest is a structure containing fields +// and options for generating message authentication +// codes (MAC). +type MACRequest struct { + // Name is the name of the master key used decrypt + // the ciphertext. + Name string + + Version int + + Message []byte +} + +// Metrics is a structure containing KMS metrics. +type Metrics struct { + ReqOK uint64 `json:"kms_req_success"` // Number of requests that succeeded + ReqErr uint64 `json:"kms_req_error"` // Number of requests that failed with a defined error + ReqFail uint64 `json:"kms_req_failure"` // Number of requests that failed with an undefined error + Latency map[time.Duration]uint64 `json:"kms_resp_time"` // Latency histogram of all requests +} + +var defaultLatencyBuckets = []time.Duration{ + 10 * time.Millisecond, + 50 * time.Millisecond, + 100 * time.Millisecond, + 250 * time.Millisecond, + 500 * time.Millisecond, + 1000 * time.Millisecond, // 1s + 1500 * time.Millisecond, + 3000 * time.Millisecond, + 5000 * time.Millisecond, + 10000 * time.Millisecond, // 10s +} + +// KMS is a connection to a key management system. +// It implements various cryptographic operations, +// like data key generation and decryption. +type KMS struct { + // Type identifies the KMS implementation. Either, + // MinKMS, MinKES or Builtin. + Type Type + + // The default key, used for generating new data keys + // if no explicit GenerateKeyRequest.Name is provided. DefaultKey string - // Details provides more details about the KMS endpoint status. - // including uptime, version and available CPUs. - // Could be more in future. - Details kes.State + conn conn // Connection to the KMS + + // Metrics + reqOK, reqErr, reqFail atomic.Uint64 + latencyBuckets []time.Duration // expected to be sorted + latency []atomic.Uint64 } -// DEK is a data encryption key. It consists of a -// plaintext-ciphertext pair and the ID of the key -// used to generate the ciphertext. +// Version returns version information about the KMS. // -// The plaintext can be used for cryptographic -// operations - like encrypting some data. The -// ciphertext is the encrypted version of the -// plaintext data and can be stored on untrusted -// storage. -type DEK struct { - KeyID string - Plaintext []byte - Ciphertext []byte +// TODO(aead): refactor this API call since it does not account +// for multiple KMS/KES servers. +func (k *KMS) Version(ctx context.Context) (string, error) { + return k.conn.Version(ctx) } -var ( - _ encoding.TextMarshaler = (*DEK)(nil) - _ encoding.TextUnmarshaler = (*DEK)(nil) -) +// APIs returns a list of KMS server APIs. +// +// TODO(aead): remove this API since it's hardly useful. +func (k *KMS) APIs(ctx context.Context) ([]madmin.KMSAPI, error) { + return k.conn.APIs(ctx) +} -// MarshalText encodes the DEK's key ID and ciphertext -// as JSON. -func (d DEK) MarshalText() ([]byte, error) { - type JSON struct { - KeyID string `json:"keyid"` - Ciphertext []byte `json:"ciphertext"` +// Metrics returns a current snapshot of the KMS metrics. +func (k *KMS) Metrics(ctx context.Context) (*Metrics, error) { + latency := make(map[time.Duration]uint64, len(k.latencyBuckets)) + for i, b := range k.latencyBuckets { + latency[b] = k.latency[i].Load() } - return json.Marshal(JSON{ - KeyID: d.KeyID, - Ciphertext: d.Ciphertext, - }) + + return &Metrics{ + ReqOK: k.reqOK.Load(), + ReqErr: k.reqErr.Load(), + ReqFail: k.reqFail.Load(), + Latency: latency, + }, nil } -// UnmarshalText tries to decode text as JSON representation -// of a DEK and sets DEK's key ID and ciphertext to the -// decoded values. +// Status returns status information about the KMS. // -// It sets DEK's plaintext to nil. -func (d *DEK) UnmarshalText(text []byte) error { - type JSON struct { - KeyID string `json:"keyid"` - Ciphertext []byte `json:"ciphertext"` - } - var v JSON - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(text, &v); err != nil { - return err - } - d.KeyID, d.Plaintext, d.Ciphertext = v.KeyID, nil, v.Ciphertext +// TODO(aead): refactor this API call since it does not account +// for multiple KMS/KES servers. +func (k *KMS) Status(ctx context.Context) (*madmin.KMSStatus, error) { + endpoints, err := k.conn.Status(ctx) + if err != nil { + return nil, err + } + + return &madmin.KMSStatus{ + Name: k.Type.String(), + DefaultKeyID: k.DefaultKey, + Endpoints: endpoints, + }, nil +} + +// CreateKey creates the master key req.Name. It returns +// ErrKeyExists if the key already exists. +func (k *KMS) CreateKey(ctx context.Context, req *CreateKeyRequest) error { + start := time.Now() + err := k.conn.CreateKey(ctx, req) + k.updateMetrics(err, time.Since(start)) + + return err +} + +// ListKeys returns a list of keys with metadata and a potential +// next name from where to continue a subsequent listing. +func (k *KMS) ListKeys(ctx context.Context, req *ListRequest) ([]madmin.KMSKeyInfo, string, error) { + if req.Prefix == "*" { + req.Prefix = "" + } + return k.conn.ListKeys(ctx, req) +} + +// GenerateKey generates a new data key using the master key req.Name. +// It returns ErrKeyNotFound if the key does not exist. If req.Name is +// empty, the KMS default key is used. +func (k *KMS) GenerateKey(ctx context.Context, req *GenerateKeyRequest) (DEK, error) { + if req.Name == "" { + req.Name = k.DefaultKey + } + + start := time.Now() + dek, err := k.conn.GenerateKey(ctx, req) + k.updateMetrics(err, time.Since(start)) + + return dek, err +} + +// Decrypt decrypts a ciphertext using the master key req.Name. +// It returns ErrKeyNotFound if the key does not exist. +func (k *KMS) Decrypt(ctx context.Context, req *DecryptRequest) ([]byte, error) { + start := time.Now() + plaintext, err := k.conn.Decrypt(ctx, req) + k.updateMetrics(err, time.Since(start)) + + return plaintext, err +} + +// MAC generates the checksum of the given req.Message using the key +// with the req.Name at the KMS. +func (k *KMS) MAC(ctx context.Context, req *MACRequest) ([]byte, error) { + if req.Name == "" { + req.Name = k.DefaultKey + } + + start := time.Now() + mac, err := k.conn.MAC(ctx, req) + k.updateMetrics(err, time.Since(start)) + + return mac, err +} + +func (k *KMS) updateMetrics(err error, latency time.Duration) { + // First, update the latency histogram + // Therefore, find the first bucket that holds the counter for + // requests with a latency at least as large as the given request + // latency and update its and all subsequent counters. + bucket := slices.IndexFunc(k.latencyBuckets, func(b time.Duration) bool { return latency < b }) + if bucket < 0 { + bucket = len(k.latencyBuckets) - 1 + } + for i := bucket; i < len(k.latency); i++ { + k.latency[i].Add(1) + } + + // Next, update the request counters + if err == nil { + k.reqOK.Add(1) + return + } + + var s3Err Error + if errors.As(err, &s3Err) && s3Err.Code >= http.StatusInternalServerError { + k.reqFail.Add(1) + } else { + k.reqErr.Add(1) + } +} + +type kmsConn struct { + endpoints []string + enclave string + defaultKey string + client *kms.Client +} + +func (c *kmsConn) Version(ctx context.Context) (string, error) { + resp, err := c.client.Version(ctx, &kms.VersionRequest{}) + if len(resp) == 0 && err != nil { + return "", err + } + return resp[0].Version, nil +} + +func (c *kmsConn) APIs(ctx context.Context) ([]madmin.KMSAPI, error) { + return nil, ErrNotSupported +} + +func (c *kmsConn) Status(ctx context.Context) (map[string]madmin.ItemState, error) { + stat := make(map[string]madmin.ItemState, len(c.endpoints)) + resp, err := c.client.Version(ctx, &kms.VersionRequest{}) + + for _, r := range resp { + stat[r.Host] = madmin.ItemOnline + } + for _, e := range kms.UnwrapHostErrors(err) { + stat[e.Host] = madmin.ItemOffline + } + return stat, nil +} + +func (c *kmsConn) ListKeys(ctx context.Context, req *ListRequest) ([]madmin.KMSKeyInfo, string, error) { + resp, err := c.client.ListKeys(ctx, &kms.ListRequest{ + Enclave: c.enclave, + Prefix: req.Prefix, + ContinueAt: req.ContinueAt, + Limit: req.Limit, + }) + if err != nil { + return nil, "", errListingKeysFailed(err) + } + + keyInfos := make([]madmin.KMSKeyInfo, len(resp.Items)) + for i, v := range resp.Items { + keyInfos[i].Name = v.Name + keyInfos[i].CreatedAt = v.CreatedAt + keyInfos[i].CreatedBy = v.CreatedBy.String() + } + return keyInfos, resp.ContinueAt, nil +} + +func (c *kmsConn) CreateKey(ctx context.Context, req *CreateKeyRequest) error { + if err := c.client.CreateKey(ctx, c.enclave, &kms.CreateKeyRequest{ + Name: req.Name, + }); err != nil { + if errors.Is(err, kms.ErrKeyExists) { + return ErrKeyExists + } + if errors.Is(err, kms.ErrPermission) { + return ErrPermission + } + return errKeyCreationFailed(err) + } return nil } + +func (c *kmsConn) GenerateKey(ctx context.Context, req *GenerateKeyRequest) (DEK, error) { + aad, err := req.AssociatedData.MarshalText() + if err != nil { + return DEK{}, err + } + + name := req.Name + if name == "" { + name = c.defaultKey + } + + resp, err := c.client.GenerateKey(ctx, c.enclave, &kms.GenerateKeyRequest{ + Name: name, + AssociatedData: aad, + Length: 32, + }) + if err != nil { + if errors.Is(err, kms.ErrKeyNotFound) { + return DEK{}, ErrKeyNotFound + } + if errors.Is(err, kms.ErrPermission) { + return DEK{}, ErrPermission + } + return DEK{}, errKeyGenerationFailed(err) + } + + return DEK{ + KeyID: name, + Version: resp[0].Version, + Plaintext: resp[0].Plaintext, + Ciphertext: resp[0].Ciphertext, + }, nil +} + +func (c *kmsConn) Decrypt(ctx context.Context, req *DecryptRequest) ([]byte, error) { + aad, err := req.AssociatedData.MarshalText() + if err != nil { + return nil, err + } + + ciphertext, _ := parseCiphertext(req.Ciphertext) + resp, err := c.client.Decrypt(ctx, c.enclave, &kms.DecryptRequest{ + Name: req.Name, + Ciphertext: ciphertext, + AssociatedData: aad, + }) + if err != nil { + if errors.Is(err, kms.ErrKeyNotFound) { + return nil, ErrKeyNotFound + } + if errors.Is(err, kms.ErrPermission) { + return nil, ErrPermission + } + return nil, errDecryptionFailed(err) + } + return resp[0].Plaintext, nil +} + +// MAC generates the checksum of the given req.Message using the key +// with the req.Name at the KMS. +func (*kmsConn) MAC(context.Context, *MACRequest) ([]byte, error) { + return nil, ErrNotSupported +} diff --git a/internal/kms/policy-manager.go b/internal/kms/policy-manager.go deleted file mode 100644 index 0428065d68bd0..0000000000000 --- a/internal/kms/policy-manager.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015-2022 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package kms - -import ( - "context" - - "github.com/minio/kms-go/kes" -) - -// PolicyManager is the generic interface that handles KMS policy] operations -type PolicyManager interface { - // DescribePolicy describes a policy by returning its metadata. - // e.g. who created the policy at which point in time. - DescribePolicy(ctx context.Context, policy string) (*kes.PolicyInfo, error) - - // GetPolicy gets a policy from KMS. - GetPolicy(ctx context.Context, policy string) (*kes.Policy, error) - - // ListPolicies lists all policies. - ListPolicies(ctx context.Context) (*kes.ListIter[string], error) -} diff --git a/internal/kms/secret-key.go b/internal/kms/secret-key.go new file mode 100644 index 0000000000000..8db53bd552926 --- /dev/null +++ b/internal/kms/secret-key.go @@ -0,0 +1,312 @@ +// Copyright (c) 2015-2021 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package kms + +import ( + "context" + "crypto/aes" + "crypto/cipher" + "crypto/hmac" + "encoding/base64" + "encoding/json" + "errors" + "strconv" + "strings" + "sync/atomic" + + "github.com/secure-io/sio-go/sioutil" + "golang.org/x/crypto/chacha20" + "golang.org/x/crypto/chacha20poly1305" + + "github.com/minio/kms-go/kms" + "github.com/minio/madmin-go/v3" + "github.com/minio/minio/internal/hash/sha256" +) + +// ParseSecretKey parses s as : and returns a +// KMS that uses s as builtin single key as KMS implementation. +func ParseSecretKey(s string) (*KMS, error) { + v := strings.SplitN(s, ":", 2) + if len(v) != 2 { + return nil, errors.New("kms: invalid secret key format") + } + + keyID, b64Key := v[0], v[1] + key, err := base64.StdEncoding.DecodeString(b64Key) + if err != nil { + return nil, err + } + return NewBuiltin(keyID, key) +} + +// NewBuiltin returns a single-key KMS that derives new DEKs from the +// given key. +func NewBuiltin(keyID string, key []byte) (*KMS, error) { + if len(key) != 32 { + return nil, errors.New("kms: invalid key length " + strconv.Itoa(len(key))) + } + return &KMS{ + Type: Builtin, + DefaultKey: keyID, + conn: secretKey{ + keyID: keyID, + key: key, + }, + latencyBuckets: defaultLatencyBuckets, + latency: make([]atomic.Uint64, len(defaultLatencyBuckets)), + }, nil +} + +// secretKey is a KMS implementation that derives new DEKs +// from a single key. +type secretKey struct { + keyID string + key []byte +} + +// Version returns the version of the builtin KMS. +func (secretKey) Version(ctx context.Context) (string, error) { return "v1", nil } + +// APIs returns an error since the builtin KMS does not provide a list of APIs. +func (secretKey) APIs(ctx context.Context) ([]madmin.KMSAPI, error) { + return nil, ErrNotSupported +} + +// Status returns a set of endpoints and their KMS status. Since, the builtin KMS is not +// external it returns "127.0.0.1: online". +func (secretKey) Status(context.Context) (map[string]madmin.ItemState, error) { + return map[string]madmin.ItemState{ + "127.0.0.1": madmin.ItemOnline, + }, nil +} + +// ListKeys returns a list of keys with metadata. The builtin KMS consists of just a single key. +func (s secretKey) ListKeys(ctx context.Context, req *ListRequest) ([]madmin.KMSKeyInfo, string, error) { + if strings.HasPrefix(s.keyID, req.Prefix) && strings.HasPrefix(s.keyID, req.ContinueAt) { + return []madmin.KMSKeyInfo{{Name: s.keyID}}, "", nil + } + return []madmin.KMSKeyInfo{}, "", nil +} + +// CreateKey returns ErrKeyExists unless req.Name is equal to the secretKey name. +// The builtin KMS does not support creating multiple keys. +func (s secretKey) CreateKey(_ context.Context, req *CreateKeyRequest) error { + if req.Name != s.keyID { + return ErrNotSupported + } + return ErrKeyExists +} + +// GenerateKey decrypts req.Ciphertext. The key name req.Name must match the key +// name of the secretKey. +// +// The returned DEK is encrypted using AES-GCM and the ciphertext format is compatible +// with KES and MinKMS. +func (s secretKey) GenerateKey(_ context.Context, req *GenerateKeyRequest) (DEK, error) { + if req.Name != s.keyID { + return DEK{}, ErrKeyNotFound + } + associatedData, err := req.AssociatedData.MarshalText() + if err != nil { + return DEK{}, err + } + + const randSize = 28 + random, err := sioutil.Random(randSize) + if err != nil { + return DEK{}, err + } + iv, nonce := random[:16], random[16:] + + prf := hmac.New(sha256.New, s.key) + prf.Write(iv) + key := prf.Sum(make([]byte, 0, prf.Size())) + + block, err := aes.NewCipher(key) + if err != nil { + return DEK{}, err + } + aead, err := cipher.NewGCM(block) + if err != nil { + return DEK{}, err + } + + plaintext, err := sioutil.Random(32) + if err != nil { + return DEK{}, err + } + ciphertext := aead.Seal(nil, nonce, plaintext, associatedData) + ciphertext = append(ciphertext, random...) + return DEK{ + KeyID: req.Name, + Version: 0, + Plaintext: plaintext, + Ciphertext: ciphertext, + }, nil +} + +// Decrypt decrypts req.Ciphertext. The key name req.Name must match the key +// name of the secretKey. +// +// Decrypt supports decryption of binary-encoded ciphertexts, as produced by KES +// and MinKMS, and legacy JSON formatted ciphertexts. +func (s secretKey) Decrypt(_ context.Context, req *DecryptRequest) ([]byte, error) { + if req.Name != s.keyID { + return nil, ErrKeyNotFound + } + + const randSize = 28 + ciphertext, keyType := parseCiphertext(req.Ciphertext) + ciphertext, random := ciphertext[:len(ciphertext)-randSize], ciphertext[len(ciphertext)-randSize:] + iv, nonce := random[:16], random[16:] + + var aead cipher.AEAD + switch keyType { + case kms.AES256: + mac := hmac.New(sha256.New, s.key) + mac.Write(iv) + sealingKey := mac.Sum(nil) + + block, err := aes.NewCipher(sealingKey) + if err != nil { + return nil, err + } + aead, err = cipher.NewGCM(block) + if err != nil { + return nil, err + } + case kms.ChaCha20: + sealingKey, err := chacha20.HChaCha20(s.key, iv) + if err != nil { + return nil, err + } + aead, err = chacha20poly1305.New(sealingKey) + if err != nil { + return nil, err + } + default: + return nil, ErrDecrypt + } + + associatedData, _ := req.AssociatedData.MarshalText() + plaintext, err := aead.Open(nil, nonce, ciphertext, associatedData) + if err != nil { + return nil, ErrDecrypt + } + return plaintext, nil +} + +// MAC generate hmac for the request +func (s secretKey) MAC(_ context.Context, req *MACRequest) ([]byte, error) { + mac := hmac.New(sha256.New, s.key) + mac.Write(req.Message) + return mac.Sum(make([]byte, 0, mac.Size())), nil +} + +// parseCiphertext parses and converts a ciphertext into +// the format expected by a secretKey. +// +// Previous implementations of the secretKey produced a structured +// ciphertext. parseCiphertext converts all previously generated +// formats into the expected format. +func parseCiphertext(b []byte) ([]byte, kms.SecretKeyType) { + if len(b) == 0 { + return b, kms.AES256 + } + + if b[0] == '{' && b[len(b)-1] == '}' { // JSON object + var c ciphertext + if err := c.UnmarshalJSON(b); err != nil { + // It may happen that a random ciphertext starts with '{' and ends with '}'. + // In such a case, parsing will fail but we must not return an error. Instead + // we return the ciphertext as it is. + return b, kms.AES256 + } + + b = b[:0] + b = append(b, c.Bytes...) + b = append(b, c.IV...) + b = append(b, c.Nonce...) + return b, c.Algorithm + } + return b, kms.AES256 +} + +// ciphertext is a structure that contains the encrypted +// bytes and all relevant information to decrypt these +// bytes again with a cryptographic key. +type ciphertext struct { + Algorithm kms.SecretKeyType + ID string + IV []byte + Nonce []byte + Bytes []byte +} + +// UnmarshalJSON parses the given text as JSON-encoded +// ciphertext. +// +// UnmarshalJSON provides backward-compatible unmarsahaling +// of existing ciphertext. In the past, ciphertexts were +// JSON-encoded. Now, ciphertexts are binary-encoded. +// Therefore, there is no MarshalJSON implementation. +func (c *ciphertext) UnmarshalJSON(text []byte) error { + const ( + IVSize = 16 + NonceSize = 12 + + AES256GCM = "AES-256-GCM-HMAC-SHA-256" + CHACHA20POLY1305 = "ChaCha20Poly1305" + ) + + type JSON struct { + Algorithm string `json:"aead"` + ID string `json:"id"` + IV []byte `json:"iv"` + Nonce []byte `json:"nonce"` + Bytes []byte `json:"bytes"` + } + var value JSON + if err := json.Unmarshal(text, &value); err != nil { + return ErrDecrypt + } + + if value.Algorithm != AES256GCM && value.Algorithm != CHACHA20POLY1305 { + return ErrDecrypt + } + if len(value.IV) != IVSize { + return ErrDecrypt + } + if len(value.Nonce) != NonceSize { + return ErrDecrypt + } + + switch value.Algorithm { + case AES256GCM: + c.Algorithm = kms.AES256 + case CHACHA20POLY1305: + c.Algorithm = kms.ChaCha20 + default: + c.Algorithm = 0 + } + c.ID = value.ID + c.IV = value.IV + c.Nonce = value.Nonce + c.Bytes = value.Bytes + return nil +} diff --git a/internal/kms/single-key_test.go b/internal/kms/secret-key_test.go similarity index 67% rename from internal/kms/single-key_test.go rename to internal/kms/secret-key_test.go index 166f56cd88e43..3577a6adf6113 100644 --- a/internal/kms/single-key_test.go +++ b/internal/kms/secret-key_test.go @@ -19,22 +19,24 @@ package kms import ( "bytes" - "context" "encoding/base64" "testing" ) func TestSingleKeyRoundtrip(t *testing.T) { - KMS, err := Parse("my-key:eEm+JI9/q4JhH8QwKvf3LKo4DEBl6QbfvAl1CAbMIv8=") + KMS, err := ParseSecretKey("my-key:eEm+JI9/q4JhH8QwKvf3LKo4DEBl6QbfvAl1CAbMIv8=") if err != nil { t.Fatalf("Failed to initialize KMS: %v", err) } - key, err := KMS.GenerateKey(context.Background(), "my-key", Context{}) + key, err := KMS.GenerateKey(t.Context(), &GenerateKeyRequest{Name: "my-key"}) if err != nil { t.Fatalf("Failed to generate key: %v", err) } - plaintext, err := KMS.DecryptKey(key.KeyID, key.Ciphertext, Context{}) + plaintext, err := KMS.Decrypt(t.Context(), &DecryptRequest{ + Name: key.KeyID, + Ciphertext: key.Ciphertext, + }) if err != nil { t.Fatalf("Failed to decrypt key: %v", err) } @@ -44,7 +46,7 @@ func TestSingleKeyRoundtrip(t *testing.T) { } func TestDecryptKey(t *testing.T) { - KMS, err := Parse("my-key:eEm+JI9/q4JhH8QwKvf3LKo4DEBl6QbfvAl1CAbMIv8=") + KMS, err := ParseSecretKey("my-key:eEm+JI9/q4JhH8QwKvf3LKo4DEBl6QbfvAl1CAbMIv8=") if err != nil { t.Fatalf("Failed to initialize KMS: %v", err) } @@ -54,11 +56,11 @@ func TestDecryptKey(t *testing.T) { if err != nil { t.Fatalf("Test %d: failed to decode plaintext key: %v", i, err) } - ciphertext, err := base64.StdEncoding.DecodeString(test.Ciphertext) - if err != nil { - t.Fatalf("Test %d: failed to decode ciphertext key: %v", i, err) - } - plaintext, err := KMS.DecryptKey(test.KeyID, ciphertext, test.Context) + plaintext, err := KMS.Decrypt(t.Context(), &DecryptRequest{ + Name: test.KeyID, + Ciphertext: []byte(test.Ciphertext), + AssociatedData: test.Context, + }) if err != nil { t.Fatalf("Test %d: failed to decrypt key: %v", i, err) } @@ -77,12 +79,12 @@ var decryptKeyTests = []struct { { KeyID: "my-key", Plaintext: "zmS7NrG765UZ0ZN85oPjybelxqVvpz01vxsSpOISy2M=", - Ciphertext: "eyJhZWFkIjoiQ2hhQ2hhMjBQb2x5MTMwNSIsIml2IjoiSmJJK3Z3dll3dzFsQ2I1VnBrQUZ1UT09Iiwibm9uY2UiOiJBUmpJakp4QlNENTQxR3o4IiwiYnl0ZXMiOiJLQ2JFYzJzQTBUTHZBN2FXVFdhMjNBZGNjVmZKTXBPeHdnRzhobSs0UGFOcnhZZnkxeEZXWmcyZ0VlblZyT2d2In0=", + Ciphertext: `{"aead":"ChaCha20Poly1305","iv":"JbI+vwvYww1lCb5VpkAFuQ==","nonce":"ARjIjJxBSD541Gz8","bytes":"KCbEc2sA0TLvA7aWTWa23AdccVfJMpOxwgG8hm+4PaNrxYfy1xFWZg2gEenVrOgv"}`, }, { KeyID: "my-key", Plaintext: "UnPWsZgVI+T4L9WGNzFlP1PsP1Z6hn2Fx8ISeZfDGnA=", - Ciphertext: "eyJhZWFkIjoiQ2hhQ2hhMjBQb2x5MTMwNSIsIml2IjoicjQreWZpVmJWSVlSMFoySTlGcSs2Zz09Iiwibm9uY2UiOiIyWXB3R3dFNTlHY1ZyYUkzIiwiYnl0ZXMiOiJrL3N2TWdsT1U3L0tnd3Y3M2hlRzM4TldXNTc1WExjRnAzU2F4UUhETWpKR1l5UkkzRml5Z3UyT2V1dEdQWE5MIn0=", + Ciphertext: `{"aead":"ChaCha20Poly1305","iv":"r4+yfiVbVIYR0Z2I9Fq+6g==","nonce":"2YpwGwE59GcVraI3","bytes":"k/svMglOU7/Kgwv73heG38NWW575XLcFp3SaxQHDMjJGYyRI3Fiygu2OeutGPXNL"}`, Context: Context{"key": "value"}, }, } diff --git a/internal/kms/single-key.go b/internal/kms/single-key.go deleted file mode 100644 index ab77775fc081e..0000000000000 --- a/internal/kms/single-key.go +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package kms - -import ( - "context" - "crypto/aes" - "crypto/cipher" - "crypto/hmac" - "encoding/base64" - "errors" - "fmt" - "net/http" - "strconv" - "strings" - - jsoniter "github.com/json-iterator/go" - "github.com/secure-io/sio-go/sioutil" - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/chacha20poly1305" - - "github.com/minio/kms-go/kes" - "github.com/minio/minio/internal/hash/sha256" -) - -// Parse parses s as single-key KMS. The given string -// is expected to have the following format: -// -// : -// -// The returned KMS implementation uses the parsed -// key ID and key to derive new DEKs and decrypt ciphertext. -func Parse(s string) (KMS, error) { - v := strings.SplitN(s, ":", 2) - if len(v) != 2 { - return nil, errors.New("kms: invalid master key format") - } - - keyID, b64Key := v[0], v[1] - key, err := base64.StdEncoding.DecodeString(b64Key) - if err != nil { - return nil, err - } - return New(keyID, key) -} - -// New returns a single-key KMS that derives new DEKs from the -// given key. -func New(keyID string, key []byte) (KMS, error) { - if len(key) != 32 { - return nil, errors.New("kms: invalid key length " + strconv.Itoa(len(key))) - } - return secretKey{ - keyID: keyID, - key: key, - }, nil -} - -// secretKey is a KMS implementation that derives new DEKs -// from a single key. -type secretKey struct { - keyID string - key []byte -} - -var _ KMS = secretKey{} // compiler check - -const ( // algorithms used to derive and encrypt DEKs - algorithmAESGCM = "AES-256-GCM-HMAC-SHA-256" - algorithmChaCha20Poly1305 = "ChaCha20Poly1305" -) - -func (kms secretKey) Stat(context.Context) (Status, error) { - return Status{ - Name: "SecretKey", - DefaultKey: kms.keyID, - }, nil -} - -// IsLocal returns true if the KMS is a local implementation -func (kms secretKey) IsLocal() bool { - return true -} - -// List returns an array of local KMS Names -func (kms secretKey) List() []kes.KeyInfo { - kmsSecret := []kes.KeyInfo{ - { - Name: kms.keyID, - }, - } - return kmsSecret -} - -func (secretKey) Metrics(ctx context.Context) (kes.Metric, error) { - return kes.Metric{}, Error{ - HTTPStatusCode: http.StatusNotImplemented, - APICode: "KMS.NotImplemented", - Err: errors.New("metrics are not supported"), - } -} - -func (kms secretKey) CreateKey(_ context.Context, keyID string) error { - if keyID == kms.keyID { - return nil - } - return Error{ - HTTPStatusCode: http.StatusNotImplemented, - APICode: "KMS.NotImplemented", - Err: fmt.Errorf("creating custom key %q is not supported", keyID), - } -} - -func (kms secretKey) GenerateKey(_ context.Context, keyID string, context Context) (DEK, error) { - if keyID == "" { - keyID = kms.keyID - } - if keyID != kms.keyID { - return DEK{}, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.NotFoundException", - Err: fmt.Errorf("key %q does not exist", keyID), - } - } - iv, err := sioutil.Random(16) - if err != nil { - return DEK{}, err - } - - var algorithm string - if sioutil.NativeAES() { - algorithm = algorithmAESGCM - } else { - algorithm = algorithmChaCha20Poly1305 - } - - var aead cipher.AEAD - switch algorithm { - case algorithmAESGCM: - mac := hmac.New(sha256.New, kms.key) - mac.Write(iv) - sealingKey := mac.Sum(nil) - - var block cipher.Block - block, err = aes.NewCipher(sealingKey) - if err != nil { - return DEK{}, err - } - aead, err = cipher.NewGCM(block) - if err != nil { - return DEK{}, err - } - case algorithmChaCha20Poly1305: - var sealingKey []byte - sealingKey, err = chacha20.HChaCha20(kms.key, iv) - if err != nil { - return DEK{}, err - } - aead, err = chacha20poly1305.New(sealingKey) - if err != nil { - return DEK{}, err - } - default: - return DEK{}, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: errors.New("invalid algorithm: " + algorithm), - } - } - - nonce, err := sioutil.Random(aead.NonceSize()) - if err != nil { - return DEK{}, err - } - - plaintext, err := sioutil.Random(32) - if err != nil { - return DEK{}, err - } - associatedData, _ := context.MarshalText() - ciphertext := aead.Seal(nil, nonce, plaintext, associatedData) - - json := jsoniter.ConfigCompatibleWithStandardLibrary - ciphertext, err = json.Marshal(encryptedKey{ - Algorithm: algorithm, - IV: iv, - Nonce: nonce, - Bytes: ciphertext, - }) - if err != nil { - return DEK{}, err - } - return DEK{ - KeyID: keyID, - Plaintext: plaintext, - Ciphertext: ciphertext, - }, nil -} - -func (kms secretKey) DecryptKey(keyID string, ciphertext []byte, context Context) ([]byte, error) { - if keyID != kms.keyID { - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.NotFoundException", - Err: fmt.Errorf("key %q does not exist", keyID), - } - } - - var encryptedKey encryptedKey - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err := json.Unmarshal(ciphertext, &encryptedKey); err != nil { - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: err, - } - } - - if n := len(encryptedKey.IV); n != 16 { - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: fmt.Errorf("invalid iv size: %d", n), - } - } - - var aead cipher.AEAD - switch encryptedKey.Algorithm { - case algorithmAESGCM: - mac := hmac.New(sha256.New, kms.key) - mac.Write(encryptedKey.IV) - sealingKey := mac.Sum(nil) - - block, err := aes.NewCipher(sealingKey) - if err != nil { - return nil, err - } - aead, err = cipher.NewGCM(block) - if err != nil { - return nil, err - } - case algorithmChaCha20Poly1305: - sealingKey, err := chacha20.HChaCha20(kms.key, encryptedKey.IV) - if err != nil { - return nil, err - } - aead, err = chacha20poly1305.New(sealingKey) - if err != nil { - return nil, err - } - default: - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: fmt.Errorf("invalid algorithm: %q", encryptedKey.Algorithm), - } - } - - if n := len(encryptedKey.Nonce); n != aead.NonceSize() { - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: fmt.Errorf("invalid nonce size %d", n), - } - } - - associatedData, _ := context.MarshalText() - plaintext, err := aead.Open(nil, encryptedKey.Nonce, encryptedKey.Bytes, associatedData) - if err != nil { - return nil, Error{ - HTTPStatusCode: http.StatusBadRequest, - APICode: "KMS.InternalException", - Err: fmt.Errorf("encrypted key is not authentic"), - } - } - return plaintext, nil -} - -func (kms secretKey) DecryptAll(_ context.Context, keyID string, ciphertexts [][]byte, contexts []Context) ([][]byte, error) { - plaintexts := make([][]byte, 0, len(ciphertexts)) - for i := range ciphertexts { - plaintext, err := kms.DecryptKey(keyID, ciphertexts[i], contexts[i]) - if err != nil { - return nil, err - } - plaintexts = append(plaintexts, plaintext) - } - return plaintexts, nil -} - -// Verify verifies all KMS endpoints and returns details -func (kms secretKey) Verify(cxt context.Context) []VerifyResult { - return []VerifyResult{ - {Endpoint: "self"}, - } -} - -type encryptedKey struct { - Algorithm string `json:"aead"` - IV []byte `json:"iv"` - Nonce []byte `json:"nonce"` - Bytes []byte `json:"bytes"` -} diff --git a/internal/kms/stub.go b/internal/kms/stub.go new file mode 100644 index 0000000000000..2df1e9d8bd191 --- /dev/null +++ b/internal/kms/stub.go @@ -0,0 +1,125 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package kms + +import ( + "context" + "net/http" + "slices" + "sync/atomic" + "time" + + "github.com/minio/madmin-go/v3" + "github.com/minio/pkg/v3/wildcard" +) + +var ( + // StubCreatedAt is a constant timestamp for testing + StubCreatedAt = time.Date(2024, time.January, 1, 15, 0, 0, 0, time.UTC) + // StubCreatedBy is a constant created identity for testing + StubCreatedBy = "MinIO" +) + +// NewStub returns a stub of KMS for testing +func NewStub(defaultKeyName string) *KMS { + return &KMS{ + Type: Builtin, + DefaultKey: defaultKeyName, + latencyBuckets: defaultLatencyBuckets, + latency: make([]atomic.Uint64, len(defaultLatencyBuckets)), + conn: &StubKMS{ + KeyNames: []string{defaultKeyName}, + }, + } +} + +// StubKMS is a KMS implementation for tests +type StubKMS struct { + KeyNames []string +} + +// Version returns the type of the KMS. +func (s StubKMS) Version(ctx context.Context) (string, error) { + return "stub", nil +} + +// APIs returns supported APIs +func (s StubKMS) APIs(ctx context.Context) ([]madmin.KMSAPI, error) { + return []madmin.KMSAPI{ + {Method: http.MethodGet, Path: "stub/path"}, + }, nil +} + +// Status returns a set of endpoints and their KMS status. +func (s StubKMS) Status(context.Context) (map[string]madmin.ItemState, error) { + return map[string]madmin.ItemState{ + "127.0.0.1": madmin.ItemOnline, + }, nil +} + +// ListKeys returns a list of keys with metadata. +func (s StubKMS) ListKeys(ctx context.Context, req *ListRequest) ([]madmin.KMSKeyInfo, string, error) { + matches := []madmin.KMSKeyInfo{} + if req.Prefix == "" { + req.Prefix = "*" + } + for _, keyName := range s.KeyNames { + if wildcard.MatchAsPatternPrefix(req.Prefix, keyName) { + matches = append(matches, madmin.KMSKeyInfo{Name: keyName, CreatedAt: StubCreatedAt, CreatedBy: StubCreatedBy}) + } + } + + return matches, "", nil +} + +// CreateKey creates a new key with the given name. +func (s *StubKMS) CreateKey(_ context.Context, req *CreateKeyRequest) error { + if s.containsKeyName(req.Name) { + return ErrKeyExists + } + s.KeyNames = append(s.KeyNames, req.Name) + return nil +} + +// GenerateKey is a non-functional stub. +func (s StubKMS) GenerateKey(_ context.Context, req *GenerateKeyRequest) (DEK, error) { + if !s.containsKeyName(req.Name) { + return DEK{}, ErrKeyNotFound + } + return DEK{ + KeyID: req.Name, + Version: 0, + Plaintext: []byte("stubplaincharswhichare32bytelong"), + Ciphertext: []byte("stubplaincharswhichare32bytelong"), + }, nil +} + +// Decrypt is a non-functional stub. +func (s StubKMS) Decrypt(_ context.Context, req *DecryptRequest) ([]byte, error) { + return req.Ciphertext, nil +} + +// MAC is a non-functional stub. +func (s StubKMS) MAC(_ context.Context, m *MACRequest) ([]byte, error) { + return m.Message, nil +} + +// containsKeyName returns true if the given key name exists in the stub KMS. +func (s *StubKMS) containsKeyName(keyName string) bool { + return slices.Contains(s.KeyNames, keyName) +} diff --git a/internal/lock/lock_test.go b/internal/lock/lock_test.go index 7f01b86ccf83b..11dad9037cf8b 100644 --- a/internal/lock/lock_test.go +++ b/internal/lock/lock_test.go @@ -25,7 +25,7 @@ import ( // Test lock fails. func TestLockFail(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } @@ -55,7 +55,7 @@ func TestLockDirFail(t *testing.T) { // Tests rwlock methods. func TestRWLockedFile(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } @@ -118,7 +118,7 @@ func TestRWLockedFile(t *testing.T) { // Tests lock and unlock semantics. func TestLockAndUnlock(t *testing.T) { - f, err := os.CreateTemp("", "lock") + f, err := os.CreateTemp(t.TempDir(), "lock") if err != nil { t.Fatal(err) } diff --git a/internal/lock/lock_windows.go b/internal/lock/lock_windows.go index 57bc2f53337be..6d97d6d007639 100644 --- a/internal/lock/lock_windows.go +++ b/internal/lock/lock_windows.go @@ -257,5 +257,5 @@ func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.O err = syscall.EINVAL } } - return + return err } diff --git a/internal/logger/audit.go b/internal/logger/audit.go index 41464858b87cb..62af94cc56ea7 100644 --- a/internal/logger/audit.go +++ b/internal/logger/audit.go @@ -24,9 +24,9 @@ import ( "strconv" "time" + "github.com/minio/madmin-go/v3/logger/audit" internalAudit "github.com/minio/minio/internal/logger/message/audit" "github.com/minio/minio/internal/mcontext" - "github.com/minio/pkg/v2/logger/message/audit" xhttp "github.com/minio/minio/internal/http" ) @@ -36,7 +36,7 @@ const contextAuditKey = contextKeyType("audit-entry") // SetAuditEntry sets Audit info in the context. func SetAuditEntry(ctx context.Context, audit *audit.Entry) context.Context { if ctx == nil { - LogIf(context.Background(), fmt.Errorf("context is nil")) + LogIf(context.Background(), "audit", fmt.Errorf("context is nil")) return nil } return context.WithValue(ctx, contextAuditKey, audit) @@ -60,7 +60,7 @@ func GetAuditEntry(ctx context.Context) *audit.Entry { } // AuditLog - logs audit logs to all audit targets. -func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, filterKeys ...string) { +func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqClaims map[string]any, filterKeys ...string) { auditTgts := AuditTargets() if len(auditTgts) == 0 { return @@ -100,7 +100,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl outputBytes = int64(tc.ResponseRecorder.Size()) headerBytes = int64(tc.ResponseRecorder.HeaderSize()) timeToResponse = time.Now().UTC().Sub(tc.ResponseRecorder.StartTime) - timeToFirstByte = tc.ResponseRecorder.TimeToFirstByte + timeToFirstByte = tc.ResponseRecorder.TTFB() } entry.AccessKey = reqInfo.Cred.AccessKey @@ -124,7 +124,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl entry.API.TimeToResponse = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) + "ns" entry.API.TimeToResponseInNS = strconv.FormatInt(timeToResponse.Nanoseconds(), 10) // We hold the lock, so we cannot call reqInfo.GetTagsMap(). - tags := make(map[string]interface{}, len(reqInfo.tags)) + tags := make(map[string]any, len(reqInfo.tags)) for _, t := range reqInfo.tags { tags[t.Key] = t.Val } @@ -144,7 +144,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl // Send audit logs only to http targets. for _, t := range auditTgts { if err := t.Send(ctx, entry); err != nil { - LogOnceIf(ctx, fmt.Errorf("Unable to send an audit event to the target `%v`: %v", t, err), "send-audit-event-failure") + LogOnceIf(ctx, "logging", fmt.Errorf("Unable to send audit event(s) to the target `%v`: %v", t, err), "send-audit-event-failure") } } } diff --git a/internal/logger/config.go b/internal/logger/config.go index b2c8f3167b62a..5cc71d0d99025 100644 --- a/internal/logger/config.go +++ b/internal/logger/config.go @@ -21,11 +21,13 @@ import ( "context" "crypto/tls" "errors" + "fmt" "strconv" "strings" + "time" - "github.com/minio/pkg/v2/env" - xnet "github.com/minio/pkg/v2/net" + "github.com/minio/pkg/v3/env" + xnet "github.com/minio/pkg/v3/net" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/logger/target/http" @@ -39,14 +41,17 @@ type Console struct { // Audit/Logger constants const ( - Endpoint = "endpoint" - AuthToken = "auth_token" - ClientCert = "client_cert" - ClientKey = "client_key" - BatchSize = "batch_size" - QueueSize = "queue_size" - QueueDir = "queue_dir" - Proxy = "proxy" + Endpoint = "endpoint" + AuthToken = "auth_token" + ClientCert = "client_cert" + ClientKey = "client_key" + BatchSize = "batch_size" + QueueSize = "queue_size" + QueueDir = "queue_dir" + MaxRetry = "max_retry" + RetryInterval = "retry_interval" + Proxy = "proxy" + httpTimeout = "http_timeout" KafkaBrokers = "brokers" KafkaTopic = "topic" @@ -63,24 +68,30 @@ const ( KafkaQueueDir = "queue_dir" KafkaQueueSize = "queue_size" - EnvLoggerWebhookEnable = "MINIO_LOGGER_WEBHOOK_ENABLE" - EnvLoggerWebhookEndpoint = "MINIO_LOGGER_WEBHOOK_ENDPOINT" - EnvLoggerWebhookAuthToken = "MINIO_LOGGER_WEBHOOK_AUTH_TOKEN" - EnvLoggerWebhookClientCert = "MINIO_LOGGER_WEBHOOK_CLIENT_CERT" - EnvLoggerWebhookClientKey = "MINIO_LOGGER_WEBHOOK_CLIENT_KEY" - EnvLoggerWebhookProxy = "MINIO_LOGGER_WEBHOOK_PROXY" - EnvLoggerWebhookBatchSize = "MINIO_LOGGER_WEBHOOK_BATCH_SIZE" - EnvLoggerWebhookQueueSize = "MINIO_LOGGER_WEBHOOK_QUEUE_SIZE" - EnvLoggerWebhookQueueDir = "MINIO_LOGGER_WEBHOOK_QUEUE_DIR" - - EnvAuditWebhookEnable = "MINIO_AUDIT_WEBHOOK_ENABLE" - EnvAuditWebhookEndpoint = "MINIO_AUDIT_WEBHOOK_ENDPOINT" - EnvAuditWebhookAuthToken = "MINIO_AUDIT_WEBHOOK_AUTH_TOKEN" - EnvAuditWebhookClientCert = "MINIO_AUDIT_WEBHOOK_CLIENT_CERT" - EnvAuditWebhookClientKey = "MINIO_AUDIT_WEBHOOK_CLIENT_KEY" - EnvAuditWebhookBatchSize = "MINIO_AUDIT_WEBHOOK_BATCH_SIZE" - EnvAuditWebhookQueueSize = "MINIO_AUDIT_WEBHOOK_QUEUE_SIZE" - EnvAuditWebhookQueueDir = "MINIO_AUDIT_WEBHOOK_QUEUE_DIR" + EnvLoggerWebhookEnable = "MINIO_LOGGER_WEBHOOK_ENABLE" + EnvLoggerWebhookEndpoint = "MINIO_LOGGER_WEBHOOK_ENDPOINT" + EnvLoggerWebhookAuthToken = "MINIO_LOGGER_WEBHOOK_AUTH_TOKEN" + EnvLoggerWebhookClientCert = "MINIO_LOGGER_WEBHOOK_CLIENT_CERT" + EnvLoggerWebhookClientKey = "MINIO_LOGGER_WEBHOOK_CLIENT_KEY" + EnvLoggerWebhookProxy = "MINIO_LOGGER_WEBHOOK_PROXY" + EnvLoggerWebhookBatchSize = "MINIO_LOGGER_WEBHOOK_BATCH_SIZE" + EnvLoggerWebhookQueueSize = "MINIO_LOGGER_WEBHOOK_QUEUE_SIZE" + EnvLoggerWebhookQueueDir = "MINIO_LOGGER_WEBHOOK_QUEUE_DIR" + EnvLoggerWebhookMaxRetry = "MINIO_LOGGER_WEBHOOK_MAX_RETRY" + EnvLoggerWebhookRetryInterval = "MINIO_LOGGER_WEBHOOK_RETRY_INTERVAL" + EnvLoggerWebhookHTTPTimeout = "MINIO_LOGGER_WEBHOOK_HTTP_TIMEOUT" + + EnvAuditWebhookEnable = "MINIO_AUDIT_WEBHOOK_ENABLE" + EnvAuditWebhookEndpoint = "MINIO_AUDIT_WEBHOOK_ENDPOINT" + EnvAuditWebhookAuthToken = "MINIO_AUDIT_WEBHOOK_AUTH_TOKEN" + EnvAuditWebhookClientCert = "MINIO_AUDIT_WEBHOOK_CLIENT_CERT" + EnvAuditWebhookClientKey = "MINIO_AUDIT_WEBHOOK_CLIENT_KEY" + EnvAuditWebhookBatchSize = "MINIO_AUDIT_WEBHOOK_BATCH_SIZE" + EnvAuditWebhookQueueSize = "MINIO_AUDIT_WEBHOOK_QUEUE_SIZE" + EnvAuditWebhookQueueDir = "MINIO_AUDIT_WEBHOOK_QUEUE_DIR" + EnvAuditWebhookMaxRetry = "MINIO_AUDIT_WEBHOOK_MAX_RETRY" + EnvAuditWebhookRetryInterval = "MINIO_AUDIT_WEBHOOK_RETRY_INTERVAL" + EnvAuditWebhookHTTPTimeout = "MINIO_AUDIT_WEBHOOK_HTTP_TIMEOUT" EnvKafkaEnable = "MINIO_AUDIT_KAFKA_ENABLE" EnvKafkaBrokers = "MINIO_AUDIT_KAFKA_BROKERS" @@ -146,6 +157,18 @@ var ( Key: QueueDir, Value: "", }, + config.KV{ + Key: MaxRetry, + Value: "0", + }, + config.KV{ + Key: RetryInterval, + Value: "3s", + }, + config.KV{ + Key: httpTimeout, + Value: "5s", + }, } DefaultAuditWebhookKVS = config.KVS{ @@ -181,6 +204,18 @@ var ( Key: QueueDir, Value: "", }, + config.KV{ + Key: MaxRetry, + Value: "0", + }, + config.KV{ + Key: RetryInterval, + Value: "3s", + }, + config.KV{ + Key: httpTimeout, + Value: "5s", + }, } DefaultAuditKafkaKVS = config.KVS{ @@ -299,7 +334,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { } url, err := xnet.ParseHTTPURL(endpoint) if err != nil { - LogOnceIf(ctx, err, "logger-webhook-"+endpoint) + LogOnceIf(ctx, "logging", err, "logger-webhook-"+endpoint) continue } cfg.HTTP[target] = http.Config{ @@ -327,7 +362,7 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { } url, err := xnet.ParseHTTPURL(endpoint) if err != nil { - LogOnceIf(ctx, err, "audit-webhook-"+endpoint) + LogOnceIf(ctx, "logging", err, "audit-webhook-"+endpoint) continue } cfg.AuditWebhook[target] = http.Config{ @@ -335,7 +370,6 @@ func lookupLegacyConfigForSubSys(ctx context.Context, subSys string) Config { Endpoint: url, } } - } return cfg } @@ -355,7 +389,7 @@ func lookupAuditKafkaConfig(scfg config.Config, cfg Config) (Config, error) { if len(kafkaBrokers) == 0 { return cfg, config.Errorf("kafka 'brokers' cannot be empty") } - for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) { + for s := range strings.SplitSeq(kafkaBrokers, config.ValueSeparator) { var host *xnet.Host host, err = xnet.ParseHost(s) if err != nil { @@ -457,17 +491,46 @@ func lookupLoggerWebhookConfig(scfg config.Config, cfg Config) (Config, error) { if batchSize <= 0 { return cfg, errInvalidBatchSize } + maxRetryCfgVal := getCfgVal(EnvLoggerWebhookMaxRetry, k, kv.Get(MaxRetry)) + maxRetry, err := strconv.Atoi(maxRetryCfgVal) + if err != nil { + return cfg, err + } + if maxRetry < 0 { + return cfg, fmt.Errorf("invalid %s max_retry", maxRetryCfgVal) + } + retryIntervalCfgVal := getCfgVal(EnvLoggerWebhookRetryInterval, k, kv.Get(RetryInterval)) + retryInterval, err := time.ParseDuration(retryIntervalCfgVal) + if err != nil { + return cfg, err + } + if retryInterval > time.Minute { + return cfg, fmt.Errorf("maximum allowed value for retry interval is '1m': %s", retryIntervalCfgVal) + } + + httpTimeoutCfgVal := getCfgVal(EnvLoggerWebhookHTTPTimeout, k, kv.Get(httpTimeout)) + httpTimeout, err := time.ParseDuration(httpTimeoutCfgVal) + if err != nil { + return cfg, err + } + if httpTimeout < time.Second { + return cfg, fmt.Errorf("minimum value allowed for http_timeout is '1s': %s", httpTimeout) + } + cfg.HTTP[k] = http.Config{ - Enabled: true, - Endpoint: url, - AuthToken: getCfgVal(EnvLoggerWebhookAuthToken, k, kv.Get(AuthToken)), - ClientCert: clientCert, - ClientKey: clientKey, - Proxy: getCfgVal(EnvLoggerWebhookProxy, k, kv.Get(Proxy)), - BatchSize: batchSize, - QueueSize: queueSize, - QueueDir: getCfgVal(EnvLoggerWebhookQueueDir, k, kv.Get(QueueDir)), - Name: loggerTargetNamePrefix + k, + HTTPTimeout: httpTimeout, + Enabled: true, + Endpoint: url, + AuthToken: getCfgVal(EnvLoggerWebhookAuthToken, k, kv.Get(AuthToken)), + ClientCert: clientCert, + ClientKey: clientKey, + Proxy: getCfgVal(EnvLoggerWebhookProxy, k, kv.Get(Proxy)), + BatchSize: batchSize, + QueueSize: queueSize, + QueueDir: getCfgVal(EnvLoggerWebhookQueueDir, k, kv.Get(QueueDir)), + MaxRetry: maxRetry, + RetryIntvl: retryInterval, + Name: loggerTargetNamePrefix + k, } } return cfg, nil @@ -519,16 +582,46 @@ func lookupAuditWebhookConfig(scfg config.Config, cfg Config) (Config, error) { if batchSize <= 0 { return cfg, errInvalidBatchSize } + maxRetryCfgVal := getCfgVal(EnvAuditWebhookMaxRetry, k, kv.Get(MaxRetry)) + maxRetry, err := strconv.Atoi(maxRetryCfgVal) + if err != nil { + return cfg, err + } + if maxRetry < 0 { + return cfg, fmt.Errorf("invalid %s max_retry", maxRetryCfgVal) + } + + retryIntervalCfgVal := getCfgVal(EnvAuditWebhookRetryInterval, k, kv.Get(RetryInterval)) + retryInterval, err := time.ParseDuration(retryIntervalCfgVal) + if err != nil { + return cfg, err + } + if retryInterval > time.Minute { + return cfg, fmt.Errorf("maximum allowed value for retry interval is '1m': %s", retryIntervalCfgVal) + } + + httpTimeoutCfgVal := getCfgVal(EnvAuditWebhookHTTPTimeout, k, kv.Get(httpTimeout)) + httpTimeout, err := time.ParseDuration(httpTimeoutCfgVal) + if err != nil { + return cfg, err + } + if httpTimeout < time.Second { + return cfg, fmt.Errorf("minimum value allowed for http_timeout is '1s': %s", httpTimeout) + } + cfg.AuditWebhook[k] = http.Config{ - Enabled: true, - Endpoint: url, - AuthToken: getCfgVal(EnvAuditWebhookAuthToken, k, kv.Get(AuthToken)), - ClientCert: clientCert, - ClientKey: clientKey, - BatchSize: batchSize, - QueueSize: queueSize, - QueueDir: getCfgVal(EnvAuditWebhookQueueDir, k, kv.Get(QueueDir)), - Name: auditTargetNamePrefix + k, + HTTPTimeout: httpTimeout, + Enabled: true, + Endpoint: url, + AuthToken: getCfgVal(EnvAuditWebhookAuthToken, k, kv.Get(AuthToken)), + ClientCert: clientCert, + ClientKey: clientKey, + BatchSize: batchSize, + QueueSize: queueSize, + QueueDir: getCfgVal(EnvAuditWebhookQueueDir, k, kv.Get(QueueDir)), + MaxRetry: maxRetry, + RetryIntvl: retryInterval, + Name: auditTargetNamePrefix + k, } } return cfg, nil diff --git a/internal/logger/console.go b/internal/logger/console.go index 4d8f4940ab2b8..d2a0226734273 100644 --- a/internal/logger/console.go +++ b/internal/logger/console.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015-2021 MinIO, Inc. +// Copyright (c) 2015-2024 MinIO, Inc. // // This file is part of MinIO Object Storage stack // @@ -24,9 +24,8 @@ import ( "strings" "time" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/color" - c "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/logger/message/log" ) // ConsoleLoggerTgt is a stringified value to represent console logging @@ -37,12 +36,12 @@ var ExitFunc = os.Exit // Logger interface describes the methods that need to be implemented to satisfy the interface requirements. type Logger interface { - json(msg string, args ...interface{}) - quiet(msg string, args ...interface{}) - pretty(msg string, args ...interface{}) + json(msg string, args ...any) + quiet(msg string, args ...any) + pretty(msg string, args ...any) } -func consoleLog(console Logger, msg string, args ...interface{}) { +func consoleLog(console Logger, msg string, args ...any) { switch { case jsonFlag: // Strip escape control characters from json message @@ -65,25 +64,28 @@ func consoleLog(console Logger, msg string, args ...interface{}) { // Fatal prints only fatal error message with no stack trace // it will be called for input validation failures -func Fatal(err error, msg string, data ...interface{}) { +func Fatal(err error, msg string, data ...any) { fatal(err, msg, data...) } -func fatal(err error, msg string, data ...interface{}) { - var errMsg string - if msg != "" { - errMsg = errorFmtFunc(fmt.Sprintf(msg, data...), err, jsonFlag) +func fatal(err error, msg string, data ...any) { + if msg == "" { + if len(data) > 0 { + msg = fmt.Sprint(data...) + } else { + msg = "a fatal error" + } } else { - errMsg = err.Error() + msg = fmt.Sprintf(msg, data...) } - consoleLog(fatalMessage, errMsg) + consoleLog(fatalMessage, errorFmtFunc(msg, err, jsonFlag)) } var fatalMessage fatalMsg type fatalMsg struct{} -func (f fatalMsg) json(msg string, args ...interface{}) { +func (f fatalMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -99,23 +101,22 @@ func (f fatalMsg) json(msg string, args ...interface{}) { if err != nil { panic(err) } - fmt.Println(string(logJSON)) - + fmt.Fprintln(Output, string(logJSON)) ExitFunc(1) } -func (f fatalMsg) quiet(msg string, args ...interface{}) { +func (f fatalMsg) quiet(msg string, args ...any) { f.pretty(msg, args...) } var ( - logTag = "ERROR" + logTag = "FATAL" logBanner = color.BgRed(color.FgWhite(color.Bold(logTag))) + " " emptyBanner = color.BgRed(strings.Repeat(" ", len(logTag))) + " " bannerWidth = len(logTag) + 1 ) -func (f fatalMsg) pretty(msg string, args ...interface{}) { +func (f fatalMsg) pretty(msg string, args ...any) { // Build the passed error message errMsg := fmt.Sprintf(msg, args...) @@ -127,30 +128,27 @@ func (f fatalMsg) pretty(msg string, args ...interface{}) { // message itself contains some colored text, we needed // to use some ANSI control escapes to cursor color state // and freely move in the screen. - for _, line := range strings.Split(errMsg, "\n") { + for line := range strings.SplitSeq(errMsg, "\n") { if len(line) == 0 { // No more text to print, just quit. break } - for { - // Save the attributes of the current cursor helps - // us save the text color of the passed error message - ansiSaveAttributes() - // Print banner with or without the log tag - if !tagPrinted { - c.Print(logBanner) - tagPrinted = true - } else { - c.Print(emptyBanner) - } - // Restore the text color of the error message - ansiRestoreAttributes() - ansiMoveRight(bannerWidth) - // Continue error message printing - c.Println(line) - break + // Save the attributes of the current cursor helps + // us save the text color of the passed error message + ansiSaveAttributes() + // Print banner with or without the log tag + if !tagPrinted { + fmt.Fprint(Output, logBanner) + tagPrinted = true + } else { + fmt.Fprint(Output, emptyBanner) } + // Restore the text color of the error message + ansiRestoreAttributes() + ansiMoveRight(bannerWidth) + // Continue error message printing + fmt.Fprintln(Output, line) } // Exit because this is a fatal error message @@ -161,7 +159,7 @@ type infoMsg struct{} var info infoMsg -func (i infoMsg) json(msg string, args ...interface{}) { +func (i infoMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -176,24 +174,25 @@ func (i infoMsg) json(msg string, args ...interface{}) { if err != nil { panic(err) } - fmt.Println(string(logJSON)) + fmt.Fprintln(Output, string(logJSON)) } -func (i infoMsg) quiet(msg string, args ...interface{}) { +func (i infoMsg) quiet(msg string, args ...any) { } -func (i infoMsg) pretty(msg string, args ...interface{}) { +func (i infoMsg) pretty(msg string, args ...any) { if msg == "" { - c.Println(args...) + fmt.Fprintln(Output, args...) + } else { + fmt.Fprintf(Output, `INFO: `+msg, args...) } - c.Printf(msg, args...) } type errorMsg struct{} -var errorm errorMsg +var errorMessage errorMsg -func (i errorMsg) json(msg string, args ...interface{}) { +func (i errorMsg) json(msg string, args ...any) { var message string if msg != "" { message = fmt.Sprintf(msg, args...) @@ -209,32 +208,117 @@ func (i errorMsg) json(msg string, args ...interface{}) { if err != nil { panic(err) } - fmt.Println(string(logJSON)) + fmt.Fprintln(Output, string(logJSON)) } -func (i errorMsg) quiet(msg string, args ...interface{}) { +func (i errorMsg) quiet(msg string, args ...any) { i.pretty(msg, args...) } -func (i errorMsg) pretty(msg string, args ...interface{}) { +func (i errorMsg) pretty(msg string, args ...any) { if msg == "" { - c.Println(args...) + fmt.Fprintln(Output, args...) + } else { + fmt.Fprintf(Output, `ERRO: `+msg, args...) } - c.Printf(msg, args...) } // Error : -func Error(msg string, data ...interface{}) { - if DisableErrorLog { +func Error(msg string, data ...any) { + if DisableLog { return } - consoleLog(errorm, msg, data...) + consoleLog(errorMessage, msg, data...) } // Info : -func Info(msg string, data ...interface{}) { - if DisableErrorLog { +func Info(msg string, data ...any) { + if DisableLog { return } consoleLog(info, msg, data...) } + +// Startup : +func Startup(msg string, data ...any) { + if DisableLog { + return + } + consoleLog(startup, msg, data...) +} + +type startupMsg struct{} + +var startup startupMsg + +func (i startupMsg) json(msg string, args ...any) { + var message string + if msg != "" { + message = fmt.Sprintf(msg, args...) + } else { + message = fmt.Sprint(args...) + } + logJSON, err := json.Marshal(&log.Entry{ + Level: InfoKind, + Message: message, + Time: time.Now().UTC(), + }) + if err != nil { + panic(err) + } + fmt.Fprintln(Output, string(logJSON)) +} + +func (i startupMsg) quiet(msg string, args ...any) { +} + +func (i startupMsg) pretty(msg string, args ...any) { + if msg == "" { + fmt.Fprintln(Output, args...) + } else { + fmt.Fprintf(Output, msg, args...) + } +} + +type warningMsg struct{} + +var warningMessage warningMsg + +func (i warningMsg) json(msg string, args ...any) { + var message string + if msg != "" { + message = fmt.Sprintf(msg, args...) + } else { + message = fmt.Sprint(args...) + } + logJSON, err := json.Marshal(&log.Entry{ + Level: WarningKind, + Message: message, + Time: time.Now().UTC(), + Trace: &log.Trace{Message: message, Source: []string{getSource(6)}}, + }) + if err != nil { + panic(err) + } + fmt.Fprintln(Output, string(logJSON)) +} + +func (i warningMsg) quiet(msg string, args ...any) { + i.pretty(msg, args...) +} + +func (i warningMsg) pretty(msg string, args ...any) { + if msg == "" { + fmt.Fprintln(Output, args...) + } else { + fmt.Fprintf(Output, `WARN: `+msg, args...) + } +} + +// Warning : +func Warning(msg string, data ...any) { + if DisableLog { + return + } + consoleLog(warningMessage, msg, data...) +} diff --git a/internal/logger/help.go b/internal/logger/help.go index b540774e799d4..cc489b99edc71 100644 --- a/internal/logger/help.go +++ b/internal/logger/help.go @@ -76,6 +76,24 @@ var ( Optional: true, Type: "string", }, + config.HelpKV{ + Key: MaxRetry, + Description: `maximum retry count before we start dropping logged event(s)`, + Optional: true, + Type: "number", + }, + config.HelpKV{ + Key: RetryInterval, + Description: `sleep between each retries, allowed maximum value is '1m' e.g. '10s'`, + Optional: true, + Type: "duration", + }, + config.HelpKV{ + Key: httpTimeout, + Description: `defines the maximum duration for each http request`, + Optional: true, + Type: "duration", + }, config.HelpKV{ Key: config.Comment, Description: config.DefaultComment, @@ -131,6 +149,24 @@ var ( Optional: true, Type: "string", }, + config.HelpKV{ + Key: MaxRetry, + Description: `maximum retry count before we start dropping audit event(s)`, + Optional: true, + Type: "number", + }, + config.HelpKV{ + Key: RetryInterval, + Description: `sleep between each retries, allowed maximum value is '1m' e.g. '10s'`, + Optional: true, + Type: "duration", + }, + config.HelpKV{ + Key: httpTimeout, + Description: `defines the maximum duration for each http request`, + Optional: true, + Type: "duration", + }, config.HelpKV{ Key: config.Comment, Description: config.DefaultComment, diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 3ef2f8fb1a6af..09573ab49daeb 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -23,6 +23,8 @@ import ( "errors" "fmt" "go/build" + "io" + "os" "path/filepath" "reflect" "runtime" @@ -32,8 +34,9 @@ import ( "github.com/minio/highwayhash" "github.com/minio/madmin-go/v3" + "github.com/minio/madmin-go/v3/logger/log" + "github.com/minio/minio/internal/color" xhttp "github.com/minio/minio/internal/http" - "github.com/minio/pkg/v2/logger/message/log" ) // HighwayHash key for logging in anonymous mode @@ -49,8 +52,12 @@ const ( InfoKind = madmin.LogKindInfo ) -// DisableErrorLog avoids printing error/event/info kind of logs -var DisableErrorLog = false +var ( + // DisableLog avoids printing error/event/info kind of logs + DisableLog = false + // Output allows configuring custom writer, defaults to os.Stderr + Output io.Writer = os.Stderr +) var trimStrings []string @@ -68,16 +75,21 @@ var matchingFuncNames = [...]string{ var ( quietFlag, jsonFlag, anonFlag bool // Custom function to format error - errorFmtFunc func(string, error, bool) string + // can be registered by RegisterError + errorFmtFunc = func(introMsg string, err error, jsonFlag bool) string { + return fmt.Sprintf("msg: %s\n err:%s", introMsg, err) + } ) // EnableQuiet - turns quiet option on. func EnableQuiet() { + color.TurnOff() // no colored outputs necessary in quiet mode. quietFlag = true } // EnableJSON - outputs logs in json format. func EnableJSON() { + color.TurnOff() // no colored outputs necessary in JSON mode. jsonFlag = true quietFlag = true } @@ -242,27 +254,26 @@ func HashString(input string) string { // LogAlwaysIf prints a detailed error message during // the execution of the server. -func LogAlwaysIf(ctx context.Context, err error, errKind ...interface{}) { +func LogAlwaysIf(ctx context.Context, subsystem string, err error, errKind ...any) { if err == nil { return } - - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } // LogIf prints a detailed error message during // the execution of the server, if it is not an // ignored error. -func LogIf(ctx context.Context, err error, errKind ...interface{}) { +func LogIf(ctx context.Context, subsystem string, err error, errKind ...any) { if logIgnoreError(err) { return } - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } // LogIfNot prints a detailed error message during // the execution of the server, if it is not an ignored error (either internal or given). -func LogIfNot(ctx context.Context, err error, ignored ...error) { +func LogIfNot(ctx context.Context, subsystem string, err error, ignored ...error) { if logIgnoreError(err) { return } @@ -271,24 +282,24 @@ func LogIfNot(ctx context.Context, err error, ignored ...error) { return } } - logIf(ctx, err) + logIf(ctx, subsystem, err) } -func errToEntry(ctx context.Context, err error, errKind ...interface{}) log.Entry { +func errToEntry(ctx context.Context, subsystem string, err error, errKind ...any) log.Entry { var l string if anonFlag { l = reflect.TypeOf(err).String() } else { l = fmt.Sprintf("%v (%T)", err, err) } - return buildLogEntry(ctx, l, getTrace(3), errKind...) + return buildLogEntry(ctx, subsystem, l, getTrace(3), errKind...) } -func logToEntry(ctx context.Context, message string, errKind ...interface{}) log.Entry { - return buildLogEntry(ctx, message, nil, errKind...) +func logToEntry(ctx context.Context, subsystem, message string, errKind ...any) log.Entry { + return buildLogEntry(ctx, subsystem, message, nil, errKind...) } -func buildLogEntry(ctx context.Context, message string, trace []string, errKind ...interface{}) log.Entry { +func buildLogEntry(ctx context.Context, subsystem, message string, trace []string, errKind ...any) log.Entry { logKind := madmin.LogKindError if len(errKind) > 0 { if ek, ok := errKind[0].(madmin.LogKind); ok { @@ -307,12 +318,15 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind defer req.RUnlock() API := "SYSTEM" - if req.API != "" { + switch { + case req.API != "": API = req.API + case subsystem != "": + API += "." + subsystem } // Copy tags. We hold read lock already. - tags := make(map[string]interface{}, len(req.tags)) + tags := make(map[string]any, len(req.tags)) for _, entry := range req.tags { tags[entry.Key] = entry.Val } @@ -364,7 +378,9 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind entry.API.Args.Bucket = HashString(entry.API.Args.Bucket) entry.API.Args.Object = HashString(entry.API.Args.Object) entry.RemoteHost = HashString(entry.RemoteHost) - entry.Trace.Variables = make(map[string]interface{}) + if entry.Trace != nil { + entry.Trace.Variables = make(map[string]any) + } } return entry @@ -372,28 +388,29 @@ func buildLogEntry(ctx context.Context, message string, trace []string, errKind // consoleLogIf prints a detailed error message during // the execution of the server. -func consoleLogIf(ctx context.Context, err error, errKind ...interface{}) { - if DisableErrorLog { +func consoleLogIf(ctx context.Context, subsystem string, err error, errKind ...any) { + if DisableLog { return } if err == nil { return } if consoleTgt != nil { - consoleTgt.Send(ctx, errToEntry(ctx, err, errKind...)) + entry := errToEntry(ctx, subsystem, err, errKind...) + consoleTgt.Send(ctx, entry) } } // logIf prints a detailed error message during // the execution of the server. -func logIf(ctx context.Context, err error, errKind ...interface{}) { - if DisableErrorLog { +func logIf(ctx context.Context, subsystem string, err error, errKind ...any) { + if DisableLog { return } if err == nil { return } - entry := errToEntry(ctx, err, errKind...) + entry := errToEntry(ctx, subsystem, err, errKind...) sendLog(ctx, entry) } @@ -407,19 +424,18 @@ func sendLog(ctx context.Context, entry log.Entry) { for _, t := range systemTgts { if err := t.Send(ctx, entry); err != nil { if consoleTgt != nil { // Sending to the console never fails - entry.Trace.Message = fmt.Sprintf("event(%#v) was not sent to Logger target (%#v): %#v", entry, t, err) - consoleTgt.Send(ctx, entry) + consoleTgt.Send(ctx, errToEntry(ctx, "logging", fmt.Errorf("unable to send log event to Logger target (%s): %v", t.String(), err), entry.Level)) } } } } // Event sends a event log to log targets -func Event(ctx context.Context, msg string, args ...interface{}) { - if DisableErrorLog { +func Event(ctx context.Context, subsystem, msg string, args ...any) { + if DisableLog { return } - entry := logToEntry(ctx, fmt.Sprintf(msg, args...), EventKind) + entry := logToEntry(ctx, subsystem, fmt.Sprintf(msg, args...), EventKind) sendLog(ctx, entry) } @@ -428,15 +444,15 @@ var ErrCritical struct{} // CriticalIf logs the provided error on the console. It fails the // current go-routine by causing a `panic(ErrCritical)`. -func CriticalIf(ctx context.Context, err error, errKind ...interface{}) { +func CriticalIf(ctx context.Context, err error, errKind ...any) { if err != nil { - LogIf(ctx, err, errKind...) + LogIf(ctx, "", err, errKind...) panic(ErrCritical) } } // FatalIf is similar to Fatal() but it ignores passed nil error -func FatalIf(err error, msg string, data ...interface{}) { +func FatalIf(err error, msg string, data ...any) { if err == nil { return } diff --git a/internal/logger/logonce.go b/internal/logger/logonce.go index 6cfa9998d8756..319e57b9a0690 100644 --- a/internal/logger/logonce.go +++ b/internal/logger/logonce.go @@ -25,7 +25,7 @@ import ( ) // LogOnce provides the function type for logger.LogOnceIf() function -type LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) +type LogOnce func(ctx context.Context, err error, id string, errKind ...any) type onceErr struct { Err error @@ -38,7 +38,7 @@ type logOnceType struct { sync.Mutex } -func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if err == nil { return } @@ -61,7 +61,7 @@ func (l *logOnceType) logOnceConsoleIf(ctx context.Context, err error, id string l.Unlock() if shouldLog { - consoleLogIf(ctx, err, errKind...) + consoleLogIf(ctx, subsystem, err, errKind...) } } @@ -92,7 +92,7 @@ func unwrapErrs(err error) (leafErr error) { } // One log message per error. -func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func (l *logOnceType) logOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if err == nil { return } @@ -115,7 +115,7 @@ func (l *logOnceType) logOnceIf(ctx context.Context, err error, id string, errKi l.Unlock() if shouldLog { - logIf(ctx, err, errKind...) + logIf(ctx, subsystem, err, errKind...) } } @@ -142,17 +142,17 @@ var logOnce = newLogOnceType() // LogOnceIf - Logs notification errors - once per error. // id is a unique identifier for related log messages, refer to cmd/notification.go // on how it is used. -func LogOnceIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func LogOnceIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if logIgnoreError(err) { return } - logOnce.logOnceIf(ctx, err, id, errKind...) + logOnce.logOnceIf(ctx, subsystem, err, id, errKind...) } // LogOnceConsoleIf - similar to LogOnceIf but exclusively only logs to console target. -func LogOnceConsoleIf(ctx context.Context, err error, id string, errKind ...interface{}) { +func LogOnceConsoleIf(ctx context.Context, subsystem string, err error, id string, errKind ...any) { if logIgnoreError(err) { return } - logOnce.logOnceConsoleIf(ctx, err, id, errKind...) + logOnce.logOnceConsoleIf(ctx, subsystem, err, id, errKind...) } diff --git a/internal/logger/logrotate.go b/internal/logger/logrotate.go new file mode 100644 index 0000000000000..0f47901c9a38e --- /dev/null +++ b/internal/logger/logrotate.go @@ -0,0 +1,239 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package logger + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "time" + + "github.com/klauspost/compress/gzip" + "github.com/minio/madmin-go/v3/logger/log" + xioutil "github.com/minio/minio/internal/ioutil" +) + +func defaultFilenameFunc() string { + return fmt.Sprintf("minio-%s.log", fmt.Sprintf("%X", time.Now().UTC().UnixNano())) +} + +// Options define configuration options for Writer +type Options struct { + // Directory defines the directory where log files will be written to. + // If the directory does not exist, it will be created. + Directory string + + // MaximumFileSize defines the maximum size of each log file in bytes. + MaximumFileSize int64 + + // FileNameFunc specifies the name a new file will take. + // FileNameFunc must ensure collisions in filenames do not occur. + // Do not rely on timestamps to be unique, high throughput writes + // may fall on the same timestamp. + // Eg. + // 2020-03-28_15-00-945-.log + // When FileNameFunc is not specified, DefaultFilenameFunc will be used. + FileNameFunc func() string + + // Compress specify if you want the logs to be compressed after rotation. + Compress bool +} + +// Writer is a concurrency-safe writer with file rotation. +type Writer struct { + // opts are the configuration options for this Writer + opts Options + + // f is the currently open file used for appends. + // Writes to f are only synchronized once Close() is called, + // or when files are being rotated. + f *os.File + + pw *xioutil.PipeWriter + pr *xioutil.PipeReader +} + +// Write writes p into the current file, rotating if necessary. +// Write is non-blocking, if the writer's queue is not full. +// Write is blocking otherwise. +func (w *Writer) Write(p []byte) (n int, err error) { + return w.pw.Write(p) +} + +// Close closes the writer. +// Any accepted writes will be flushed. Any new writes will be rejected. +// Once Close() exits, files are synchronized to disk. +func (w *Writer) Close() error { + w.pw.CloseWithError(nil) + + if w.f != nil { + if err := w.closeCurrentFile(); err != nil { + return err + } + } + + return nil +} + +var stdErrEnc = json.NewEncoder(os.Stderr) + +func (w *Writer) listen() { + for { + var r io.Reader = w.pr + if w.opts.MaximumFileSize > 0 { + r = io.LimitReader(w.pr, w.opts.MaximumFileSize) + } + if _, err := io.Copy(w.f, r); err != nil { + msg := fmt.Sprintf("unable to write to log file %v: %v", w.f.Name(), err) + stdErrEnc.Encode(&log.Entry{ + Level: ErrorKind, + Message: msg, + Time: time.Now().UTC(), + Trace: &log.Trace{Message: msg}, + }) + } + if err := w.rotate(); err != nil { + msg := fmt.Sprintf("unable to rotate log file %v: %v", w.f.Name(), err) + stdErrEnc.Encode(&log.Entry{ + Level: ErrorKind, + Message: msg, + Time: time.Now().UTC(), + Trace: &log.Trace{Message: msg}, + }) + } + } +} + +func (w *Writer) closeCurrentFile() error { + if err := w.f.Close(); err != nil { + return fmt.Errorf("unable to close current log file: %w", err) + } + + return nil +} + +func (w *Writer) compress() error { + if !w.opts.Compress { + return nil + } + + oldLgFile := w.f.Name() + r, err := os.Open(oldLgFile) + if err != nil { + return err + } + defer r.Close() + + gw, err := os.Create(oldLgFile + ".gz") + if err != nil { + return err + } + defer gw.Close() + + var wc io.WriteCloser = gzip.NewWriter(gw) + if _, err = io.Copy(wc, r); err != nil { + return err + } + + if err = wc.Close(); err != nil { + return err + } + + // Persist to disk any caches. + if err = gw.Sync(); err != nil { + return err + } + + // close everything before we delete. + if err = gw.Close(); err != nil { + return err + } + + if err = r.Close(); err != nil { + return err + } + + // Attempt to remove after all fd's are closed. + return os.Remove(oldLgFile) +} + +func (w *Writer) rotate() error { + if w.f != nil { + if err := w.closeCurrentFile(); err != nil { + return err + } + + // This function is a no-op if opts.Compress is false + // writes an error in JSON form to stderr, if we cannot + // compress. + if err := w.compress(); err != nil { + msg := fmt.Sprintf("unable to compress log file %v: %v, ignoring and moving on", w.f.Name(), err) + stdErrEnc.Encode(&log.Entry{ + Level: ErrorKind, + Message: msg, + Time: time.Now().UTC(), + Trace: &log.Trace{Message: msg}, + }) + } + } + + path := filepath.Join(w.opts.Directory, w.opts.FileNameFunc()) + f, err := newFile(path) + if err != nil { + return fmt.Errorf("unable to create new file at %v: %w", path, err) + } + + w.f = f + + return nil +} + +// NewDir creates a new concurrency safe Writer which performs log rotation. +func NewDir(opts Options) (io.WriteCloser, error) { + if err := os.MkdirAll(opts.Directory, os.ModePerm); err != nil { + return nil, fmt.Errorf("directory %v does not exist and could not be created: %w", opts.Directory, err) + } + + if opts.FileNameFunc == nil { + opts.FileNameFunc = defaultFilenameFunc + } + + pr, pw := xioutil.WaitPipe() + + w := &Writer{ + opts: opts, + pw: pw, + pr: pr, + } + + if w.f == nil { + if err := w.rotate(); err != nil { + return nil, fmt.Errorf("Failed to create log file: %w", err) + } + } + + go w.listen() + + return w, nil +} + +func newFile(path string) (*os.File, error) { + return os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE|os.O_SYNC, 0o666) +} diff --git a/internal/logger/message/audit/entry.go b/internal/logger/message/audit/entry.go index 91b72e0850590..4ef635f24f27a 100644 --- a/internal/logger/message/audit/entry.go +++ b/internal/logger/message/audit/entry.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/minio/pkg/v2/logger/message/audit" + "github.com/minio/madmin-go/v3/logger/audit" "github.com/minio/minio/internal/handlers" xhttp "github.com/minio/minio/internal/http" @@ -41,7 +41,7 @@ func NewEntry(deploymentID string) audit.Entry { } // ToEntry - constructs an audit entry from a http request -func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]interface{}, deploymentID string) audit.Entry { +func ToEntry(w http.ResponseWriter, r *http.Request, reqClaims map[string]any, deploymentID string) audit.Entry { entry := NewEntry(deploymentID) entry.RemoteHost = handlers.GetSourceIP(r) diff --git a/internal/logger/reqinfo.go b/internal/logger/reqinfo.go index 10d71a5d74872..0280c8aee8b78 100644 --- a/internal/logger/reqinfo.go +++ b/internal/logger/reqinfo.go @@ -33,7 +33,7 @@ const contextLogKey = contextKeyType("miniolog") // KeyVal - appended to ReqInfo.Tags type KeyVal struct { Key string - Val interface{} + Val string } // ObjectVersion object version key/versionId @@ -77,7 +77,7 @@ func NewReqInfo(remoteHost, userAgent, deploymentID, requestID, api, bucket, obj } // AppendTags - appends key/val to ReqInfo.tags -func (r *ReqInfo) AppendTags(key string, val interface{}) *ReqInfo { +func (r *ReqInfo) AppendTags(key, val string) *ReqInfo { if r == nil { return nil } @@ -88,7 +88,7 @@ func (r *ReqInfo) AppendTags(key string, val interface{}) *ReqInfo { } // SetTags - sets key/val to ReqInfo.tags -func (r *ReqInfo) SetTags(key string, val interface{}) *ReqInfo { +func (r *ReqInfo) SetTags(key, val string) *ReqInfo { if r == nil { return nil } @@ -121,13 +121,13 @@ func (r *ReqInfo) GetTags() []KeyVal { } // GetTagsMap - returns the user defined tags in a map structure -func (r *ReqInfo) GetTagsMap() map[string]interface{} { +func (r *ReqInfo) GetTagsMap() map[string]string { if r == nil { return nil } r.RLock() defer r.RUnlock() - m := make(map[string]interface{}, len(r.tags)) + m := make(map[string]string, len(r.tags)) for _, t := range r.tags { m[t.Key] = t.Val } @@ -135,7 +135,7 @@ func (r *ReqInfo) GetTagsMap() map[string]interface{} { } // PopulateTagsMap - returns the user defined tags in a map structure -func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]interface{}) { +func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]string) { if r == nil { return } @@ -147,13 +147,12 @@ func (r *ReqInfo) PopulateTagsMap(tagsMap map[string]interface{}) { for _, t := range r.tags { tagsMap[t.Key] = t.Val } - return } // SetReqInfo sets ReqInfo in the context. func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context { if ctx == nil { - LogIf(context.Background(), fmt.Errorf("context is nil")) + LogIf(context.Background(), "", fmt.Errorf("context is nil")) return nil } return context.WithValue(ctx, contextLogKey, req) diff --git a/internal/logger/target/console/console.go b/internal/logger/target/console/console.go index e96c373c2ce26..a3b6dce009250 100644 --- a/internal/logger/target/console/console.go +++ b/internal/logger/target/console/console.go @@ -20,18 +20,20 @@ package console import ( "encoding/json" "fmt" + "io" "strconv" "strings" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/color" "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/v2/console" - "github.com/minio/pkg/v2/logger/message/log" ) // Target implements loggerTarget to send log // in plain or json format to the standard output. -type Target struct{} +type Target struct { + output io.Writer +} // Validate - validate if the tty can be written to func (c *Target) Validate() error { @@ -48,7 +50,7 @@ func (c *Target) String() string { } // Send log message 'e' to console -func (c *Target) Send(e interface{}) error { +func (c *Target) Send(e any) error { entry, ok := e.(log.Entry) if !ok { return fmt.Errorf("Uexpected log entry structure %#v", e) @@ -58,12 +60,12 @@ func (c *Target) Send(e interface{}) error { if err != nil { return err } - fmt.Println(string(logJSON)) + fmt.Fprintln(c.output, string(logJSON)) return nil } if entry.Level == logger.EventKind { - fmt.Println(entry.Message) + fmt.Fprintln(c.output, entry.Message) return nil } @@ -88,22 +90,25 @@ func (c *Target) Send(e interface{}) error { var apiString string if entry.API != nil { - apiString = "API: " + entry.API.Name + "(" + apiString = "API: " + entry.API.Name if entry.API.Args != nil { + args := "" if entry.API.Args.Bucket != "" { - apiString = apiString + "bucket=" + entry.API.Args.Bucket + args = args + "bucket=" + entry.API.Args.Bucket } if entry.API.Args.Object != "" { - apiString = apiString + ", object=" + entry.API.Args.Object + args = args + ", object=" + entry.API.Args.Object } if entry.API.Args.VersionID != "" { - apiString = apiString + ", versionId=" + entry.API.Args.VersionID + args = args + ", versionId=" + entry.API.Args.VersionID } if len(entry.API.Args.Objects) > 0 { - apiString = apiString + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects)) + args = args + ", multiObject=true, numberOfObjects=" + strconv.Itoa(len(entry.API.Args.Objects)) + } + if len(args) > 0 { + apiString += "(" + args + ")" } } - apiString += ")" } else { apiString = "INTERNAL" } @@ -143,13 +148,13 @@ func (c *Target) Send(e interface{}) error { apiString, timeString, deploymentID, requestID, remoteHost, host, userAgent, msg, tagString, strings.Join(trace, "\n")) - console.Println(output) + fmt.Fprintln(c.output, output) return nil } // New initializes a new logger target // which prints log directly in the standard // output. -func New() *Target { - return &Target{} +func New(w io.Writer) *Target { + return &Target{output: w} } diff --git a/internal/logger/target/http/http.go b/internal/logger/target/http/http.go index 9dcc24f2ead1f..0b0277845bb22 100644 --- a/internal/logger/target/http/http.go +++ b/internal/logger/target/http/http.go @@ -20,15 +20,14 @@ package http import ( "bytes" "context" - "encoding/json" "errors" "fmt" - "math" - "math/rand" "net/http" "net/url" "os" "path/filepath" + "strconv" + "strings" "sync" "sync/atomic" "time" @@ -36,16 +35,14 @@ import ( jsoniter "github.com/json-iterator/go" xhttp "github.com/minio/minio/internal/http" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger/target/types" + types "github.com/minio/minio/internal/logger/target/loggertypes" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" "github.com/valyala/bytebufferpool" ) const ( - // Timeout for the webhook http call - webhookCallTimeout = 3 * time.Second // maxWorkers is the maximum number of concurrent http loggers maxWorkers = 16 @@ -63,23 +60,31 @@ const ( statusClosed ) +var ( + logChBuffers = make(map[string]chan any) + logChLock = sync.Mutex{} +) + // Config http logger target type Config struct { - Enabled bool `json:"enabled"` - Name string `json:"name"` - UserAgent string `json:"userAgent"` - Endpoint *xnet.URL `json:"endpoint"` - AuthToken string `json:"authToken"` - ClientCert string `json:"clientCert"` - ClientKey string `json:"clientKey"` - BatchSize int `json:"batchSize"` - QueueSize int `json:"queueSize"` - QueueDir string `json:"queueDir"` - Proxy string `json:"string"` - Transport http.RoundTripper `json:"-"` + Enabled bool `json:"enabled"` + Name string `json:"name"` + UserAgent string `json:"userAgent"` + Endpoint *xnet.URL `json:"endpoint"` + AuthToken string `json:"authToken"` + ClientCert string `json:"clientCert"` + ClientKey string `json:"clientKey"` + BatchSize int `json:"batchSize"` + QueueSize int `json:"queueSize"` + QueueDir string `json:"queueDir"` + MaxRetry int `json:"maxRetry"` + RetryIntvl time.Duration `json:"retryInterval"` + Proxy string `json:"string"` + Transport http.RoundTripper `json:"-"` + HTTPTimeout time.Duration `json:"httpTimeout"` // Custom logger - LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"` + LogOnceIf func(ctx context.Context, err error, id string, errKind ...any) `json:"-"` } // Target implements logger.Target and sends the json @@ -88,43 +93,52 @@ type Config struct { // buffer is full, new logs are just ignored and an error // is returned to the caller. type Target struct { - totalMessages int64 - failedMessages int64 - status int32 + totalMessages atomic.Int64 + failedMessages atomic.Int64 + status atomic.Int32 // Worker control - workers int64 - workerStartMu sync.Mutex - lastStarted time.Time + workers atomic.Int64 + maxWorkers int64 + + // workerStartMu sync.Mutex + lastStarted time.Time wg sync.WaitGroup // Channel of log entries. // Reading logCh must hold read lock on logChMu (to avoid read race) // Sending a value on logCh must hold read lock on logChMu (to avoid closing) - logCh chan interface{} + logCh chan any logChMu sync.RWMutex + // If this webhook is being re-configured we will + // assign the new webhook target to this field. + // The Send() method will then re-direct entries + // to the new target when the current one + // has been set to status "statusClosed". + // Once the glogal target slice has been migrated + // the current target will stop receiving entries. + migrateTarget *Target + // Number of events per HTTP send to webhook target // this is ideally useful only if your endpoint can // support reading multiple events on a stream for example // like : Splunk HTTP Event collector, if you are unsure // set this to '1'. - batchSize int - - // If the first init fails, this starts a goroutine that - // will attempt to establish the connection. - revive sync.Once + batchSize int + payloadType string // store to persist and replay the logs to the target // to avoid missing events when the target is down. - store store.Store[interface{}] + store store.Store[any] storeCtxCancel context.CancelFunc - initQueueStoreOnce once.Init + initQueueOnce once.Init - config Config - client *http.Client + config Config + client *http.Client + httpTimeout time.Duration } // Name returns the name of the target @@ -132,6 +146,11 @@ func (h *Target) Name() string { return "minio-http-" + h.config.Name } +// Type - returns type of the target +func (h *Target) Type() types.TargetType { + return types.TargetHTTP +} + // Endpoint returns the backend endpoint func (h *Target) Endpoint() string { return h.config.Endpoint.String() @@ -143,20 +162,7 @@ func (h *Target) String() string { // IsOnline returns true if the target is reachable using a cached value func (h *Target) IsOnline(ctx context.Context) bool { - return atomic.LoadInt32(&h.status) == statusOnline -} - -// ping returns true if the target is reachable. -func (h *Target) ping(ctx context.Context) bool { - if err := h.send(ctx, []byte(`{}`), "application/json", webhookCallTimeout); err != nil { - return !xnet.IsNetworkOrHostDown(err, false) && !xnet.IsConnRefusedErr(err) - } - // We are online. - h.workerStartMu.Lock() - h.lastStarted = time.Now() - h.workerStartMu.Unlock() - go h.startHTTPLogger(ctx) - return true + return h.status.Load() == statusOnline } // Stats returns the target statistics. @@ -165,78 +171,73 @@ func (h *Target) Stats() types.TargetStats { queueLength := len(h.logCh) h.logChMu.RUnlock() stats := types.TargetStats{ - TotalMessages: atomic.LoadInt64(&h.totalMessages), - FailedMessages: atomic.LoadInt64(&h.failedMessages), + TotalMessages: h.totalMessages.Load(), + FailedMessages: h.failedMessages.Load(), QueueLength: queueLength, } return stats } +// AssignMigrateTarget assigns a target +// which will eventually replace the current target. +func (h *Target) AssignMigrateTarget(migrateTgt *Target) { + h.migrateTarget = migrateTgt +} + // Init validate and initialize the http target func (h *Target) Init(ctx context.Context) (err error) { if h.config.QueueDir != "" { - return h.initQueueStoreOnce.DoWithContext(ctx, h.initQueueStore) + return h.initQueueOnce.DoWithContext(ctx, h.initDiskStore) } - return h.init(ctx) + return h.initQueueOnce.DoWithContext(ctx, h.initMemoryStore) } -func (h *Target) initQueueStore(ctx context.Context) (err error) { - var queueStore store.Store[interface{}] - queueDir := filepath.Join(h.config.QueueDir, h.Name()) - queueStore = store.NewQueueStore[interface{}](queueDir, uint64(h.config.QueueSize), httpLoggerExtension) - if err = queueStore.Open(); err != nil { - return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err) - } +func (h *Target) initDiskStore(ctx context.Context) (err error) { ctx, cancel := context.WithCancel(ctx) - h.store = queueStore h.storeCtxCancel = cancel - store.StreamItems(h.store, h, ctx.Done(), h.config.LogOnce) - return -} + h.lastStarted = time.Now() + go h.startQueueProcessor(ctx, true) -func (h *Target) init(ctx context.Context) (err error) { - switch atomic.LoadInt32(&h.status) { - case statusOnline: - return nil - case statusClosed: - return errors.New("target is closed") - } + queueStore := store.NewQueueStore[any]( + filepath.Join(h.config.QueueDir, h.Name()), + uint64(h.config.QueueSize), + httpLoggerExtension, + ) - if !h.ping(ctx) { - // Start a goroutine that will continue to check if we can reach - h.revive.Do(func() { - go func() { - // Avoid stamping herd, add jitter. - t := time.NewTicker(time.Second + time.Duration(rand.Int63n(int64(5*time.Second)))) - defer t.Stop() - - for range t.C { - if atomic.LoadInt32(&h.status) != statusOffline { - return - } - if h.ping(ctx) { - return - } - } - }() - }) - return err + if err := queueStore.Open(); err != nil { + return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err) } + + h.store = queueStore + store.StreamItems(h.store, h, ctx.Done(), h.config.LogOnceIf) + + return nil +} + +func (h *Target) initMemoryStore(ctx context.Context) (err error) { + ctx, cancel := context.WithCancel(ctx) + h.storeCtxCancel = cancel + h.lastStarted = time.Now() + go h.startQueueProcessor(ctx, true) return nil } -func (h *Target) send(ctx context.Context, payload []byte, payloadType string, timeout time.Duration) (err error) { +func (h *Target) send(ctx context.Context, payload []byte, payloadCount int, payloadType string, timeout time.Duration) (err error) { defer func() { if err != nil { - atomic.StoreInt32(&h.status, statusOffline) + if xnet.IsNetworkOrHostDown(err, false) { + h.status.Store(statusOffline) + } + h.failedMessages.Add(int64(payloadCount)) } else { - atomic.StoreInt32(&h.status, statusOnline) + h.status.Store(statusOnline) } }() ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() + req, err := http.NewRequestWithContext(ctx, http.MethodPost, h.Endpoint(), bytes.NewReader(payload)) if err != nil { @@ -245,6 +246,7 @@ func (h *Target) send(ctx context.Context, payload []byte, payloadType string, t if payloadType != "" { req.Header.Set(xhttp.ContentType, payloadType) } + req.Header.Set(xhttp.WebhookEventPayloadCount, strconv.Itoa(payloadCount)) req.Header.Set(xhttp.MinIOVersion, xhttp.GlobalMinIOVersion) req.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID) @@ -264,154 +266,327 @@ func (h *Target) send(ctx context.Context, payload []byte, payloadType string, t // Drain any response. xhttp.DrainBody(resp.Body) - switch resp.StatusCode { - case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent: + if resp.StatusCode >= 200 && resp.StatusCode <= 299 { // accepted HTTP status codes. return nil - case http.StatusForbidden: + } else if resp.StatusCode == http.StatusForbidden { return fmt.Errorf("%s returned '%s', please check if your auth token is correctly set", h.Endpoint(), resp.Status) - default: - return fmt.Errorf("%s returned '%s', please check your endpoint configuration", h.Endpoint(), resp.Status) } + return fmt.Errorf("%s returned '%s', please check your endpoint configuration", h.Endpoint(), resp.Status) } -func (h *Target) logEntry(ctx context.Context, payload []byte, payloadType string) { - const maxTries = 3 - tries := 0 - for tries < maxTries { - if atomic.LoadInt32(&h.status) == statusClosed { - // Don't retry when closing... +func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) { + h.logChMu.RLock() + if h.logCh == nil { + h.logChMu.RUnlock() + return + } + h.logChMu.RUnlock() + + h.workers.Add(1) + defer h.workers.Add(-1) + + h.wg.Add(1) + defer h.wg.Done() + + entries := make([]any, 0) + name := h.Name() + + defer func() { + // re-load the global buffer pointer + // in case it was modified by a new target. + logChLock.Lock() + currentGlobalBuffer, ok := logChBuffers[name] + logChLock.Unlock() + if !ok { return } - // sleep = (tries+2) ^ 2 milliseconds. - sleep := time.Duration(math.Pow(float64(tries+2), 2)) * time.Millisecond - if sleep > time.Second { - sleep = time.Second - } - time.Sleep(sleep) - tries++ - err := h.send(ctx, payload, payloadType, webhookCallTimeout) - if err == nil { - return + + for _, v := range entries { + select { + case currentGlobalBuffer <- v: + default: + } } - h.config.LogOnce(ctx, err, h.Endpoint()) - } - if tries == maxTries { - // Even with multiple retries, count failed messages as only one. - atomic.AddInt64(&h.failedMessages, 1) - } -} -func (h *Target) startHTTPLogger(ctx context.Context) { - atomic.AddInt64(&h.workers, 1) - defer atomic.AddInt64(&h.workers, -1) + if mainWorker { + drain: + for { + select { + case v, ok := <-h.logCh: + if !ok { + break drain + } - h.logChMu.RLock() - logCh := h.logCh - if logCh != nil { - // We are not allowed to add when logCh is nil - h.wg.Add(1) - defer h.wg.Done() - } - h.logChMu.RUnlock() - if logCh == nil { - return - } + currentGlobalBuffer <- v + default: + break drain + } + } + } + }() + + lastBatchProcess := time.Now() buf := bytebufferpool.Get() + enc := jsoniter.ConfigCompatibleWithStandardLibrary.NewEncoder(buf) defer bytebufferpool.Put(buf) - json := jsoniter.ConfigCompatibleWithStandardLibrary - enc := json.NewEncoder(buf) - batchSize := h.batchSize - if batchSize <= 0 { - batchSize = 1 - } + isDirQueue := h.config.QueueDir != "" + + // globalBuffer is always created or adjusted + // before this method is launched. + logChLock.Lock() + globalBuffer := logChBuffers[name] + logChLock.Unlock() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + var count int + for { + var ( + ok bool + entry any + ) + + if count < h.batchSize { + tickered := false + select { + case <-ticker.C: + tickered = true + case entry = <-globalBuffer: + case entry, ok = <-h.logCh: + if !ok { + return + } + case <-ctx.Done(): + return + } - payloadType := "application/json" - if batchSize > 1 { - payloadType = "" - } + if !tickered { + h.totalMessages.Add(1) + if !isDirQueue { + if err := enc.Encode(&entry); err != nil { + h.config.LogOnceIf( + ctx, + fmt.Errorf("unable to encode webhook log entry, err '%w' entry: %v\n", err, entry), + h.Name(), + ) + h.failedMessages.Add(1) + continue + } + } else { + entries = append(entries, entry) + } + count++ + } + + if len(h.logCh) > 0 || len(globalBuffer) > 0 || count == 0 { + // there is something in the log queue + // process it first, even if we tickered + // first, or we have not received any events + // yet, still wait on it. + continue + } + + // If we are doing batching, we should wait + // at least for a second, before sending. + // Even if there is nothing in the queue. + if h.batchSize > 1 && time.Since(lastBatchProcess) < time.Second { + continue + } + } + + // if we have reached the count send at once + // or we have crossed last second before batch was sent, send at once + lastBatchProcess = time.Now() + + var retries int + retryIntvl := h.config.RetryIntvl + if retryIntvl <= 0 { + retryIntvl = 3 * time.Second + } + + maxRetries := h.config.MaxRetry + + retry: + // If the channel reaches above half capacity + // we spawn more workers. The workers spawned + // from this main worker routine will exit + // once the channel drops below half capacity + // and when it's been at least 30 seconds since + // we launched a new worker. + if mainWorker && len(h.logCh) > cap(h.logCh)/2 { + nWorkers := h.workers.Load() + if nWorkers < h.maxWorkers { + if time.Since(h.lastStarted).Milliseconds() > 10 { + h.lastStarted = time.Now() + go h.startQueueProcessor(ctx, false) + } + } + } + + var err error + if !isDirQueue { + err = h.send(ctx, buf.Bytes(), count, h.payloadType, h.httpTimeout) + } else { + _, err = h.store.PutMultiple(entries) + } - var nevents int - // Send messages until channel is closed. - for entry := range logCh { - atomic.AddInt64(&h.totalMessages, 1) - nevents++ - if err := enc.Encode(&entry); err != nil { - atomic.AddInt64(&h.failedMessages, 1) - nevents-- - continue + if err != nil { + if errors.Is(err, context.Canceled) { + return + } + + h.config.LogOnceIf( + context.Background(), + fmt.Errorf("unable to send audit/log entry(s) to '%s' err '%w': %d", name, err, count), + name, + ) + + time.Sleep(retryIntvl) + if maxRetries == 0 { + goto retry + } + retries++ + if retries <= maxRetries { + goto retry + } } - if (nevents == batchSize || len(logCh) == 0) && buf.Len() > 0 { - h.logEntry(ctx, buf.Bytes(), payloadType) + + entries = make([]any, 0) + count = 0 + if !isDirQueue { buf.Reset() - nevents = 0 + } + + if !mainWorker && len(h.logCh) < cap(h.logCh)/2 { + if time.Since(h.lastStarted).Seconds() > 30 { + return + } + } + } +} + +// CreateOrAdjustGlobalBuffer will create or adjust the global log entry buffers +// which are used to migrate log entries between old and new targets. +func CreateOrAdjustGlobalBuffer(currentTgt *Target, newTgt *Target) { + logChLock.Lock() + defer logChLock.Unlock() + + requiredCap := currentTgt.config.QueueSize + (currentTgt.config.BatchSize * int(currentTgt.maxWorkers)) + currentCap := 0 + name := newTgt.Name() + + currentBuff, ok := logChBuffers[name] + if !ok { + logChBuffers[name] = make(chan any, requiredCap) + currentCap = requiredCap + } else { + currentCap = cap(currentBuff) + requiredCap += len(currentBuff) + } + + if requiredCap > currentCap { + logChBuffers[name] = make(chan any, requiredCap) + + if len(currentBuff) > 0 { + drain: + for { + select { + case v, ok := <-currentBuff: + if !ok { + break drain + } + logChBuffers[newTgt.Name()] <- v + default: + break drain + } + } } } } // New initializes a new logger target which // sends log over http to the specified endpoint -func New(config Config) *Target { +func New(config Config) (*Target, error) { + maxWorkers := maxWorkers + if config.BatchSize > 100 { + maxWorkers = maxWorkersWithBatchEvents + } else if config.BatchSize <= 0 { + config.BatchSize = 1 + } + h := &Target{ - logCh: make(chan interface{}, config.QueueSize), - config: config, - status: statusOffline, - batchSize: config.BatchSize, + logCh: make(chan any, config.QueueSize), + config: config, + batchSize: config.BatchSize, + maxWorkers: int64(maxWorkers), + httpTimeout: config.HTTPTimeout, + } + h.status.Store(statusOffline) + + if config.BatchSize > 1 { + h.payloadType = "" + } else { + h.payloadType = "application/json" } // If proxy available, set the same if h.config.Proxy != "" { proxyURL, _ := url.Parse(h.config.Proxy) transport := h.config.Transport - ctransport := transport.(*http.Transport).Clone() - ctransport.Proxy = http.ProxyURL(proxyURL) - h.config.Transport = ctransport + if tr, ok := transport.(*http.Transport); ok { + ctransport := tr.Clone() + ctransport.Proxy = http.ProxyURL(proxyURL) + h.config.Transport = ctransport + } } - h.client = &http.Client{Transport: h.config.Transport} - return h + h.client = &http.Client{Transport: h.config.Transport} + return h, nil } // SendFromStore - reads the log from store and sends it to webhook. func (h *Target) SendFromStore(key store.Key) (err error) { - var eventData interface{} - eventData, err = h.store.Get(key.Name) + var eventData []byte + eventData, err = h.store.GetRaw(key) if err != nil { if os.IsNotExist(err) { return nil } return err } - atomic.AddInt64(&h.totalMessages, 1) - logJSON, err := json.Marshal(&eventData) - if err != nil { - atomic.AddInt64(&h.failedMessages, 1) - return - } - if err := h.send(context.Background(), logJSON, "application/json", webhookCallTimeout); err != nil { - atomic.AddInt64(&h.failedMessages, 1) - if xnet.IsNetworkOrHostDown(err, true) { - return store.ErrNotConnected + + count := 1 + v := strings.Split(key.Name, ":") + if len(v) == 2 { + count, err = strconv.Atoi(v[0]) + if err != nil { + return err } + } + + if err := h.send(context.Background(), eventData, count, h.payloadType, h.httpTimeout); err != nil { return err } + // Delete the event from store. - return h.store.Del(key.Name) + return h.store.Del(key) } // Send the log message 'entry' to the http target. // Messages are queued in the disk if the store is enabled // If Cancel has been called the message is ignored. -func (h *Target) Send(ctx context.Context, entry interface{}) error { - if atomic.LoadInt32(&h.status) == statusClosed { +func (h *Target) Send(ctx context.Context, entry any) error { + if h.status.Load() == statusClosed { + if h.migrateTarget != nil { + return h.migrateTarget.Send(ctx, entry) + } return nil } - if h.store != nil { - // save the entry to the queue store which will be replayed to the target. - return h.store.Put(entry) - } + h.logChMu.RLock() defer h.logChMu.RUnlock() if h.logCh == nil { @@ -419,15 +594,9 @@ func (h *Target) Send(ctx context.Context, entry interface{}) error { return nil } - mworkers := maxWorkers - if h.batchSize > 100 { - mworkers = maxWorkersWithBatchEvents - } - -retry: select { case h.logCh <- entry: - atomic.AddInt64(&h.totalMessages, 1) + h.totalMessages.Add(1) case <-ctx.Done(): // return error only for context timedout. if errors.Is(ctx.Err(), context.DeadlineExceeded) { @@ -435,20 +604,8 @@ retry: } return nil default: - nWorkers := atomic.LoadInt64(&h.workers) - if nWorkers < int64(mworkers) { - // Only have one try to start at the same time. - h.workerStartMu.Lock() - if time.Since(h.lastStarted) > time.Second { - h.lastStarted = time.Now() - go h.startHTTPLogger(ctx) - } - h.workerStartMu.Unlock() - - goto retry - } - atomic.AddInt64(&h.totalMessages, 1) - atomic.AddInt64(&h.failedMessages, 1) + h.totalMessages.Add(1) + h.failedMessages.Add(1) return errors.New("log buffer full") } @@ -459,13 +616,11 @@ retry: // All queued messages are flushed and the function returns afterwards. // All messages sent to the target after this function has been called will be dropped. func (h *Target) Cancel() { - atomic.StoreInt32(&h.status, statusClosed) + h.status.Store(statusClosed) + h.storeCtxCancel() - // If queuestore is configured, cancel it's context to - // stop the replay go-routine. - if h.store != nil { - h.storeCtxCancel() - } + // Wait for messages to be sent... + h.wg.Wait() // Set logch to nil and close it. // This will block all Send operations, @@ -475,12 +630,4 @@ func (h *Target) Cancel() { xioutil.SafeClose(h.logCh) h.logCh = nil h.logChMu.Unlock() - - // Wait for messages to be sent... - h.wg.Wait() -} - -// Type - returns type of the target -func (h *Target) Type() types.TargetType { - return types.TargetHTTP } diff --git a/internal/logger/target/kafka/kafka.go b/internal/logger/target/kafka/kafka.go index 4edc29b7c9315..4720f85d29f62 100644 --- a/internal/logger/target/kafka/kafka.go +++ b/internal/logger/target/kafka/kafka.go @@ -24,6 +24,7 @@ import ( "encoding/json" "errors" "fmt" + "log" "os" "path/filepath" "sync" @@ -34,10 +35,10 @@ import ( saramatls "github.com/IBM/sarama/tools/tls" xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger/target/types" + types "github.com/minio/minio/internal/logger/target/loggertypes" "github.com/minio/minio/internal/once" "github.com/minio/minio/internal/store" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) // the suffix for the configured queue dir where the logs will be persisted. @@ -74,7 +75,7 @@ type Config struct { QueueDir string `json:"queueDir"` // Custom logger - LogOnce func(ctx context.Context, err error, id string, errKind ...interface{}) `json:"-"` + LogOnce func(ctx context.Context, err error, id string, errKind ...any) `json:"-"` } // Target - Kafka target. @@ -89,12 +90,12 @@ type Target struct { // Channel of log entries. // Reading logCh must hold read lock on logChMu (to avoid read race) // Sending a value on logCh must hold read lock on logChMu (to avoid closing) - logCh chan interface{} + logCh chan any logChMu sync.RWMutex // store to persist and replay the logs to the target // to avoid missing events when the target is down. - store store.Store[interface{}] + store store.Store[any] storeCtxCancel context.CancelFunc initKafkaOnce once.Init @@ -168,9 +169,8 @@ func (h *Target) Init(ctx context.Context) error { } func (h *Target) initQueueStore(ctx context.Context) (err error) { - var queueStore store.Store[interface{}] queueDir := filepath.Join(h.kconfig.QueueDir, h.Name()) - queueStore = store.NewQueueStore[interface{}](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension) + queueStore := store.NewQueueStore[any](queueDir, uint64(h.kconfig.QueueSize), kafkaLoggerExtension) if err = queueStore.Open(); err != nil { return fmt.Errorf("unable to initialize the queue store of %s webhook: %w", h.Name(), err) } @@ -178,7 +178,7 @@ func (h *Target) initQueueStore(ctx context.Context) (err error) { h.store = queueStore h.storeCtxCancel = cancel store.StreamItems(h.store, h, ctx.Done(), h.kconfig.LogOnce) - return + return err } func (h *Target) startKafkaLogger() { @@ -188,7 +188,6 @@ func (h *Target) startKafkaLogger() { // We are not allowed to add when logCh is nil h.wg.Add(1) defer h.wg.Done() - } h.logChMu.RUnlock() @@ -203,7 +202,7 @@ func (h *Target) startKafkaLogger() { } } -func (h *Target) logEntry(entry interface{}) { +func (h *Target) logEntry(entry any) { atomic.AddInt64(&h.totalMessages, 1) if err := h.send(entry); err != nil { atomic.AddInt64(&h.failedMessages, 1) @@ -211,7 +210,7 @@ func (h *Target) logEntry(entry interface{}) { } } -func (h *Target) send(entry interface{}) error { +func (h *Target) send(entry any) error { if err := h.initKafkaOnce.Do(h.init); err != nil { return err } @@ -234,6 +233,10 @@ func (h *Target) send(entry interface{}) error { // Init initialize kafka target func (h *Target) init() error { + if os.Getenv("_MINIO_KAFKA_DEBUG") != "" { + sarama.DebugLogger = log.Default() + } + sconfig := sarama.NewConfig() if h.kconfig.Version != "" { kafkaVersion, err := sarama.ParseKafkaVersion(h.kconfig.Version) @@ -308,10 +311,11 @@ func (h *Target) IsOnline(_ context.Context) bool { } // Send log message 'e' to kafka target. -func (h *Target) Send(ctx context.Context, entry interface{}) error { +func (h *Target) Send(ctx context.Context, entry any) error { if h.store != nil { // save the entry to the queue store which will be replayed to the target. - return h.store.Put(entry) + _, err := h.store.Put(entry) + return err } h.logChMu.RLock() defer h.logChMu.RUnlock() @@ -340,7 +344,7 @@ func (h *Target) Send(ctx context.Context, entry interface{}) error { // SendFromStore - reads the log from store and sends it to kafka. func (h *Target) SendFromStore(key store.Key) (err error) { - auditEntry, err := h.store.Get(key.Name) + auditEntry, err := h.store.Get(key) if err != nil { if os.IsNotExist(err) { return nil @@ -351,10 +355,10 @@ func (h *Target) SendFromStore(key store.Key) (err error) { err = h.send(auditEntry) if err != nil { atomic.AddInt64(&h.failedMessages, 1) - return + return err } // Delete the event from store. - return h.store.Del(key.Name) + return h.store.Del(key) } // Cancel - cancels the target @@ -387,7 +391,7 @@ func (h *Target) Cancel() { // sends log over http to the specified endpoint func New(config Config) *Target { target := &Target{ - logCh: make(chan interface{}, config.QueueSize), + logCh: make(chan any, config.QueueSize), kconfig: config, status: statusOffline, } diff --git a/internal/logger/target/kafka/kafka_scram_client_contrib.go b/internal/logger/target/kafka/kafka_scram_client_contrib.go index 3e11bea3c3613..1c90d40f27860 100644 --- a/internal/logger/target/kafka/kafka_scram_client_contrib.go +++ b/internal/logger/target/kafka/kafka_scram_client_contrib.go @@ -63,11 +63,11 @@ type XDGSCRAMClient struct { // and authzID via the SASLprep algorithm, as recommended by RFC-5802. If // SASLprep fails, the method returns an error. func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + x.Client, err = x.NewClient(userName, password, authzID) if err != nil { return err } - x.ClientConversation = x.Client.NewConversation() + x.ClientConversation = x.NewConversation() return nil } @@ -78,7 +78,7 @@ func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { // completes is also an error. func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { response, err = x.ClientConversation.Step(challenge) - return + return response, err } // Done returns true if the conversation is completed or has errored. diff --git a/internal/logger/target/types/targettype_string.go b/internal/logger/target/loggertypes/targettype_string.go similarity index 97% rename from internal/logger/target/types/targettype_string.go rename to internal/logger/target/loggertypes/targettype_string.go index 6aa5e3974b970..715a9fef142aa 100644 --- a/internal/logger/target/types/targettype_string.go +++ b/internal/logger/target/loggertypes/targettype_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=TargetType -trimprefix=Target types.go"; DO NOT EDIT. -package types +package loggertypes import "strconv" diff --git a/internal/logger/target/types/types.go b/internal/logger/target/loggertypes/types.go similarity index 98% rename from internal/logger/target/types/types.go rename to internal/logger/target/loggertypes/types.go index d20b0cc027604..8d8e710412088 100644 --- a/internal/logger/target/types/types.go +++ b/internal/logger/target/loggertypes/types.go @@ -15,7 +15,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -package types +package loggertypes // TargetType indicates type of the target e.g. console, http, kafka type TargetType uint8 diff --git a/internal/logger/target/testlogger/testlogger.go b/internal/logger/target/testlogger/testlogger.go index 35f5b3da65a68..68f0f82dd7ddb 100644 --- a/internal/logger/target/testlogger/testlogger.go +++ b/internal/logger/target/testlogger/testlogger.go @@ -34,9 +34,9 @@ import ( "sync/atomic" "testing" + "github.com/minio/madmin-go/v3/logger/log" "github.com/minio/minio/internal/logger" - "github.com/minio/minio/internal/logger/target/types" - "github.com/minio/pkg/v2/logger/message/log" + types "github.com/minio/minio/internal/logger/target/loggertypes" ) const ( @@ -113,7 +113,7 @@ func (t *testLogger) Cancel() { t.current.Store(nil) } -func (t *testLogger) Send(ctx context.Context, entry interface{}) error { +func (t *testLogger) Send(ctx context.Context, entry any) error { tb := t.current.Load() var logf func(format string, args ...any) if tb != nil { diff --git a/internal/logger/targets.go b/internal/logger/targets.go index 893e06664effe..774237e1dd3e8 100644 --- a/internal/logger/targets.go +++ b/internal/logger/targets.go @@ -25,7 +25,7 @@ import ( "github.com/minio/minio/internal/logger/target/http" "github.com/minio/minio/internal/logger/target/kafka" - "github.com/minio/minio/internal/logger/target/types" + types "github.com/minio/minio/internal/logger/target/loggertypes" ) // Target is the entity that we will receive @@ -39,18 +39,47 @@ type Target interface { Init(ctx context.Context) error IsOnline(ctx context.Context) bool Cancel() - Send(ctx context.Context, entry interface{}) error + Send(ctx context.Context, entry any) error Type() types.TargetType } +type targetsList struct { + list []Target + mu sync.RWMutex +} + +func newTargetsList() *targetsList { + return &targetsList{} +} + +func (tl *targetsList) get() []Target { + tl.mu.RLock() + defer tl.mu.RUnlock() + + return tl.list +} + +func (tl *targetsList) add(t Target) { + tl.mu.Lock() + defer tl.mu.Unlock() + + tl.list = append(tl.list, t) +} + +func (tl *targetsList) set(tgts []Target) { + tl.mu.Lock() + defer tl.mu.Unlock() + + tl.list = tgts +} + var ( - swapAuditMuRW sync.RWMutex - swapSystemMuRW sync.RWMutex // systemTargets is the set of enabled loggers. - // Must be immutable at all times. - // Can be swapped to another while holding swapMu - systemTargets = []Target{} + systemTargets = newTargetsList() + + // auditTargets is the list of enabled audit loggers + auditTargets = newTargetsList() // This is always set represent /dev/console target consoleTgt Target @@ -59,21 +88,13 @@ var ( // SystemTargets returns active targets. // Returned slice may not be modified in any way. func SystemTargets() []Target { - swapSystemMuRW.RLock() - defer swapSystemMuRW.RUnlock() - - res := systemTargets - return res + return systemTargets.get() } // AuditTargets returns active audit targets. // Returned slice may not be modified in any way. func AuditTargets() []Target { - swapAuditMuRW.RLock() - defer swapAuditMuRW.RUnlock() - - res := auditTargets - return res + return auditTargets.get() } // CurrentStats returns the current statistics. @@ -103,13 +124,6 @@ func CurrentStats() map[string]types.TargetStats { return res } -// auditTargets is the list of enabled audit loggers -// Must be immutable at all times. -// Can be swapped to another while holding swapMu -var ( - auditTargets = []Target{} -) - // AddSystemTarget adds a new logger target to the // list of enabled loggers func AddSystemTarget(ctx context.Context, t Target) error { @@ -117,38 +131,16 @@ func AddSystemTarget(ctx context.Context, t Target) error { return err } - swapSystemMuRW.Lock() - defer swapSystemMuRW.Unlock() - if consoleTgt == nil { if t.Type() == types.TargetConsole { consoleTgt = t } } - updated := append(make([]Target, 0, len(systemTargets)+1), systemTargets...) - updated = append(updated, t) - systemTargets = updated + systemTargets.add(t) return nil } -func initSystemTargets(ctx context.Context, cfgMap map[string]http.Config) ([]Target, []error) { - tgts := []Target{} - errs := []error{} - for _, l := range cfgMap { - if l.Enabled { - t := http.New(l) - tgts = append(tgts, t) - - e := t.Init(ctx) - if e != nil { - errs = append(errs, e) - } - } - } - return tgts, errs -} - func initKafkaTargets(ctx context.Context, cfgMap map[string]kafka.Config) ([]Target, []error) { tgts := []Target{} errs := []error{} @@ -178,41 +170,70 @@ func splitTargets(targets []Target, t types.TargetType) (group1 []Target, group2 group2 = append(group2, target) } } - return + return group1, group2 } func cancelTargets(targets []Target) { for _, target := range targets { - target.Cancel() + go target.Cancel() } } -// UpdateSystemTargets swaps targets with newly loaded ones from the cfg -func UpdateSystemTargets(ctx context.Context, cfg Config) []error { - newTgts, errs := initSystemTargets(ctx, cfg.HTTP) - - swapSystemMuRW.Lock() - consoleTargets, otherTargets := splitTargets(systemTargets, types.TargetConsole) - newTgts = append(newTgts, consoleTargets...) - systemTargets = newTgts - swapSystemMuRW.Unlock() +// UpdateHTTPWebhooks swaps system webhook targets with newly loaded ones from the cfg +func UpdateHTTPWebhooks(ctx context.Context, cfgs map[string]http.Config) (errs []error) { + return updateHTTPTargets(ctx, cfgs, systemTargets) +} - cancelTargets(otherTargets) // cancel running targets - return errs +// UpdateAuditWebhooks swaps audit webhook targets with newly loaded ones from the cfg +func UpdateAuditWebhooks(ctx context.Context, cfgs map[string]http.Config) (errs []error) { + return updateHTTPTargets(ctx, cfgs, auditTargets) } -// UpdateAuditWebhookTargets swaps audit webhook targets with newly loaded ones from the cfg -func UpdateAuditWebhookTargets(ctx context.Context, cfg Config) []error { - newWebhookTgts, errs := initSystemTargets(ctx, cfg.AuditWebhook) +func updateHTTPTargets(ctx context.Context, cfgs map[string]http.Config, targetsList *targetsList) (errs []error) { + tgts := make([]*http.Target, 0) + newWebhooks := make([]Target, 0) + for _, cfg := range cfgs { + if cfg.Enabled { + t, err := http.New(cfg) + if err != nil { + errs = append(errs, err) + } + tgts = append(tgts, t) + newWebhooks = append(newWebhooks, t) + } + } + + oldTargets, others := splitTargets(targetsList.get(), types.TargetHTTP) + newWebhooks = append(newWebhooks, others...) + + for i := range oldTargets { + currentTgt, ok := oldTargets[i].(*http.Target) + if !ok { + continue + } + var newTgt *http.Target + + for ii := range tgts { + if currentTgt.Name() == tgts[ii].Name() { + newTgt = tgts[ii] + currentTgt.AssignMigrateTarget(newTgt) + http.CreateOrAdjustGlobalBuffer(currentTgt, newTgt) + break + } + } + } + + for _, t := range tgts { + err := t.Init(ctx) + if err != nil { + errs = append(errs, err) + } + } + + targetsList.set(newWebhooks) - swapAuditMuRW.Lock() - // Retain kafka targets - oldWebhookTgts, otherTgts := splitTargets(auditTargets, types.TargetHTTP) - newWebhookTgts = append(newWebhookTgts, otherTgts...) - auditTargets = newWebhookTgts - swapAuditMuRW.Unlock() + cancelTargets(oldTargets) - cancelTargets(oldWebhookTgts) // cancel running targets return errs } @@ -220,12 +241,10 @@ func UpdateAuditWebhookTargets(ctx context.Context, cfg Config) []error { func UpdateAuditKafkaTargets(ctx context.Context, cfg Config) []error { newKafkaTgts, errs := initKafkaTargets(ctx, cfg.AuditKafka) - swapAuditMuRW.Lock() // Retain webhook targets - oldKafkaTgts, otherTgts := splitTargets(auditTargets, types.TargetKafka) + oldKafkaTgts, otherTgts := splitTargets(auditTargets.get(), types.TargetKafka) newKafkaTgts = append(newKafkaTgts, otherTgts...) - auditTargets = newKafkaTgts - swapAuditMuRW.Unlock() + auditTargets.set(newKafkaTgts) cancelTargets(oldKafkaTgts) // cancel running targets return errs diff --git a/internal/logger/utils.go b/internal/logger/utils.go index b0fdfdd2252db..0a10949cde824 100644 --- a/internal/logger/utils.go +++ b/internal/logger/utils.go @@ -31,7 +31,7 @@ import ( var ansiRE = regexp.MustCompile("(\x1b[^m]*m)") // Print ANSI Control escape -func ansiEscape(format string, args ...interface{}) { +func ansiEscape(format string, args ...any) { Esc := "\x1b" fmt.Printf("%s%s", Esc, fmt.Sprintf(format, args...)) } diff --git a/internal/lsync/lrwmutex_test.go b/internal/lsync/lrwmutex_test.go index 39bcebc4c462f..076c52ade0139 100644 --- a/internal/lsync/lrwmutex_test.go +++ b/internal/lsync/lrwmutex_test.go @@ -30,7 +30,7 @@ import ( ) func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { - ctx := context.Background() + ctx := t.Context() lrwm := NewLRWMutex() if !lrwm.GetRLock(ctx, "", "object1", time.Second) { @@ -65,7 +65,7 @@ func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { } else { t.Log("Write lock failed due to timeout") } - return + return locked } func TestSimpleWriteLockAcquired(t *testing.T) { @@ -87,7 +87,7 @@ func TestSimpleWriteLockTimedOut(t *testing.T) { } func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { - ctx := context.Background() + ctx := t.Context() lrwm := NewLRWMutex() // fmt.Println("Getting initial write lock") @@ -111,7 +111,7 @@ func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { } else { t.Log("2nd write lock failed due to timeout") } - return + return locked } func TestDualWriteLockAcquired(t *testing.T) { @@ -152,18 +152,18 @@ func doTestParallelReaders(numReaders, gomaxprocs int) { clocked := make(chan bool) cunlock := make(chan bool) cdone := make(chan bool) - for i := 0; i < numReaders; i++ { + for range numReaders { go parallelReader(context.Background(), m, clocked, cunlock, cdone) } // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { + for range numReaders { <-clocked } - for i := 0; i < numReaders; i++ { + for range numReaders { cunlock <- true } // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { + for range numReaders { <-cdone } } @@ -178,13 +178,13 @@ func TestParallelReaders(t *testing.T) { // Borrowed from rwmutex_test.go func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetRLock(context.Background(), "", "", time.Second) { n := atomic.AddInt32(activity, 1) if n < 1 || n >= 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -1) rwm.RUnlock() @@ -195,13 +195,13 @@ func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) // Borrowed from rwmutex_test.go func writer(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { + for range numIterations { if rwm.GetLock(context.Background(), "", "", time.Second) { n := atomic.AddInt32(activity, 10000) if n != 10000 { panic(fmt.Sprintf("wlock(%d)\n", n)) } - for i := 0; i < 100; i++ { + for range 100 { } atomic.AddInt32(activity, -10000) rwm.Unlock() @@ -260,7 +260,7 @@ func TestDRLocker(t *testing.T) { rl = wl.DRLocker() n := 10 go func() { - for i := 0; i < n; i++ { + for range n { rl.Lock() rl.Lock() rlocked <- true @@ -268,7 +268,7 @@ func TestDRLocker(t *testing.T) { wlocked <- true } }() - for i := 0; i < n; i++ { + for range n { <-rlocked rl.Unlock() select { diff --git a/internal/mountinfo/mountinfo_linux.go b/internal/mountinfo/mountinfo_linux.go index 4217a81a3cdd3..f489dca9e296a 100644 --- a/internal/mountinfo/mountinfo_linux.go +++ b/internal/mountinfo/mountinfo_linux.go @@ -56,13 +56,13 @@ func IsLikelyMountPoint(path string) bool { } // If the directory has a different device as parent, then it is a mountpoint. - if s1.Sys().(*syscall.Stat_t).Dev != s2.Sys().(*syscall.Stat_t).Dev { - // path/.. on a different device as path - return true - } - - // path/.. is the same i-node as path - this check is for bind mounts. - return s1.Sys().(*syscall.Stat_t).Ino == s2.Sys().(*syscall.Stat_t).Ino + ss1, ok1 := s1.Sys().(*syscall.Stat_t) + ss2, ok2 := s2.Sys().(*syscall.Stat_t) + return ok1 && ok2 && + // path/.. on a different device as path + (ss1.Dev != ss2.Dev || + // path/.. is the same i-node as path - this check is for bind mounts. + ss1.Ino == ss2.Ino) } // CheckCrossDevice - check if any list of paths has any sub-mounts at /proc/mounts. diff --git a/internal/mountinfo/mountinfo_windows.go b/internal/mountinfo/mountinfo_windows.go index 244da9aa924c8..a40a70d206e09 100644 --- a/internal/mountinfo/mountinfo_windows.go +++ b/internal/mountinfo/mountinfo_windows.go @@ -40,7 +40,9 @@ var mountPointCache sync.Map func IsLikelyMountPoint(path string) bool { path = filepath.Dir(path) if v, ok := mountPointCache.Load(path); ok { - return v.(bool) + if b, ok := v.(bool); ok { + return b + } } wpath, _ := windows.UTF16PtrFromString(path) wvolume := make([]uint16, len(path)+1) diff --git a/internal/once/singleton.go b/internal/once/singleton.go new file mode 100644 index 0000000000000..7d7f304504595 --- /dev/null +++ b/internal/once/singleton.go @@ -0,0 +1,46 @@ +package once + +// Singleton contains a pointer to T that must be set once. +// Until the value is set all Get() calls will block. +type Singleton[T any] struct { + v *T + set chan struct{} +} + +// NewSingleton creates a new unset singleton. +func NewSingleton[T any]() *Singleton[T] { + return &Singleton[T]{set: make(chan struct{}), v: nil} +} + +// Get will return the singleton value. +func (s *Singleton[T]) Get() *T { + <-s.set + return s.v +} + +// GetNonBlocking will return the singleton value or nil if not set yet. +func (s *Singleton[T]) GetNonBlocking() *T { + select { + case <-s.set: + return s.v + default: + return nil + } +} + +// IsSet will return whether the singleton has been set. +func (s *Singleton[T]) IsSet() bool { + select { + case <-s.set: + return true + default: + return false + } +} + +// Set the value and unblock all Get requests. +// This may only be called once, a second call will panic. +func (s *Singleton[T]) Set(v *T) { + s.v = v + close(s.set) +} diff --git a/internal/pubsub/pubsub_test.go b/internal/pubsub/pubsub_test.go index be5e9325df80b..c1ed6fb3fff18 100644 --- a/internal/pubsub/pubsub_test.go +++ b/internal/pubsub/pubsub_test.go @@ -18,7 +18,6 @@ package pubsub import ( - "fmt" "testing" "time" ) @@ -138,7 +137,7 @@ func TestPubSub(t *testing.T) { ps.Publish(val) msg := <-ch1 if msg != val { - t.Fatalf(fmt.Sprintf("expected %s , found %s", val, msg)) + t.Fatalf("expected %s , found %s", val, msg) } } @@ -160,7 +159,7 @@ func TestMultiPubSub(t *testing.T) { msg1 := <-ch1 msg2 := <-ch2 if msg1 != val && msg2 != val { - t.Fatalf(fmt.Sprintf("expected both subscribers to have%s , found %s and %s", val, msg1, msg2)) + t.Fatalf("expected both subscribers to have%s , found %s and %s", val, msg1, msg2) } } @@ -189,12 +188,12 @@ func TestMultiPubSubMask(t *testing.T) { msg1 := <-ch1 msg2 := <-ch2 if msg1 != val && msg2 != val { - t.Fatalf(fmt.Sprintf("expected both subscribers to have%s , found %s and %s", val, msg1, msg2)) + t.Fatalf("expected both subscribers to have%s , found %s and %s", val, msg1, msg2) } select { case msg := <-ch3: - t.Fatalf(fmt.Sprintf("unexpected msg, f got %s", msg)) + t.Fatalf("unexpected msg, f got %s", msg) default: } } diff --git a/internal/rest/client.go b/internal/rest/client.go index 1fb6d4b323b55..e115bdc57de5a 100644 --- a/internal/rest/client.go +++ b/internal/rest/client.go @@ -28,6 +28,7 @@ import ( "net/http/httputil" "net/url" "path" + "strconv" "strings" "sync" "sync/atomic" @@ -36,9 +37,11 @@ import ( xhttp "github.com/minio/minio/internal/http" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/mcontext" - xnet "github.com/minio/pkg/v2/net" + xnet "github.com/minio/pkg/v3/net" ) +const logSubsys = "internodes" + // DefaultTimeout - default REST timeout is 10 seconds. const DefaultTimeout = 10 * time.Second @@ -93,9 +96,9 @@ type Client struct { // TraceOutput will print debug information on non-200 calls if set. TraceOutput io.Writer // Debug trace output - httpClient *http.Client - url *url.URL - newAuthToken func(audience string) string + httpClient *http.Client + url *url.URL + auth func() string sync.RWMutex // mutex for lastErr lastErr error @@ -126,13 +129,13 @@ func removeEmptyPort(host string) string { } // Copied from http.NewRequest but implemented to ensure we reuse `url.URL` instance. -func (c *Client) newRequest(ctx context.Context, u url.URL, body io.Reader) (*http.Request, error) { +func (c *Client) newRequest(ctx context.Context, method string, u url.URL, body io.Reader) (*http.Request, error) { rc, ok := body.(io.ReadCloser) if !ok && body != nil { rc = io.NopCloser(body) } req := &http.Request{ - Method: http.MethodPost, + Method: method, URL: &u, Proto: "HTTP/1.1", ProtoMajor: 1, @@ -186,10 +189,10 @@ func (c *Client) newRequest(ctx context.Context, u url.URL, body io.Reader) (*ht } } - if c.newAuthToken != nil { - req.Header.Set("Authorization", "Bearer "+c.newAuthToken(u.RawQuery)) + if c.auth != nil { + req.Header.Set("Authorization", "Bearer "+c.auth()) } - req.Header.Set("X-Minio-Time", time.Now().UTC().Format(time.RFC3339)) + req.Header.Set("X-Minio-Time", strconv.FormatInt(time.Now().UnixNano(), 10)) if tc, ok := ctx.Value(mcontext.ContextTraceKey).(*mcontext.TraceCtxt); ok { req.Header.Set(xhttp.AmzRequestID, tc.AmzReqID) @@ -207,13 +210,13 @@ type respBodyMonitor struct { func (r *respBodyMonitor) Read(p []byte) (n int, err error) { n, err = r.ReadCloser.Read(p) r.errorStatus(err) - return + return n, err } func (r *respBodyMonitor) Close() (err error) { err = r.ReadCloser.Close() r.errorStatus(err) - return + return err } func (r *respBodyMonitor) errorStatus(err error) { @@ -281,22 +284,32 @@ func (c *Client) dumpHTTP(req *http.Request, resp *http.Response) { } // Returns success. - return } -// Call - make a REST call with context. -func (c *Client) Call(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) { - if !c.IsOnline() { +// ErrClientClosed returned when *Client is closed. +var ErrClientClosed = errors.New("rest client is closed") + +// CallWithHTTPMethod - make a REST call with context, using a custom HTTP method. +func (c *Client) CallWithHTTPMethod(ctx context.Context, httpMethod, rpcMethod string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) { + switch atomic.LoadInt32(&c.connected) { + case closed: + // client closed, this is usually a manual process + // so return a local error as client is closed + return nil, &NetworkError{Err: ErrClientClosed} + case offline: + // client offline, return last error captured. return nil, &NetworkError{Err: c.LastError()} } + // client is still connected, attempt the request. + // Shallow copy. We don't modify the *UserInfo, if set. // All other fields are copied. u := *c.url - u.Path = path.Join(u.Path, method) + u.Path = path.Join(u.Path, rpcMethod) u.RawQuery = values.Encode() - req, err := c.newRequest(ctx, u, body) + req, err := c.newRequest(ctx, httpMethod, u, body) if err != nil { return nil, &NetworkError{Err: err} } @@ -316,7 +329,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod atomic.AddUint64(&globalStats.errs, 1) } if c.MarkOffline(err) { - logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) + logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) } } return nil, &NetworkError{err} @@ -340,7 +353,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod // instead, see cmd/storage-rest-server.go for ideas. if c.HealthCheckFn != nil && resp.StatusCode == http.StatusPreconditionFailed { err = fmt.Errorf("Marking %s offline temporarily; caused by PreconditionFailed with drive ID mismatch", c.url.Host) - logger.LogOnceIf(ctx, err, c.url.Host) + logger.LogOnceIf(ctx, logSubsys, err, c.url.Host) c.MarkOffline(err) } defer xhttp.DrainBody(resp.Body) @@ -352,7 +365,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod atomic.AddUint64(&globalStats.errs, 1) } if c.MarkOffline(err) { - logger.LogOnceIf(ctx, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) + logger.LogOnceIf(ctx, logSubsys, fmt.Errorf("Marking %s offline temporarily; caused by %w", c.url.Host, err), c.url.Host) } } return nil, err @@ -368,13 +381,18 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod return resp.Body, nil } +// Call - make a REST call with context. +func (c *Client) Call(ctx context.Context, rpcMethod string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) { + return c.CallWithHTTPMethod(ctx, http.MethodPost, rpcMethod, values, body, length) +} + // Close closes all idle connections of the underlying http client func (c *Client) Close() { atomic.StoreInt32(&c.connected, closed) } // NewClient - returns new REST client. -func NewClient(uu *url.URL, tr http.RoundTripper, newAuthToken func(aud string) string) *Client { +func NewClient(uu *url.URL, tr http.RoundTripper, auth func() string) *Client { connected := int32(online) urlStr := uu.String() u, err := url.Parse(urlStr) @@ -391,15 +409,18 @@ func NewClient(uu *url.URL, tr http.RoundTripper, newAuthToken func(aud string) clnt := &Client{ httpClient: &http.Client{Transport: tr}, url: u, - lastErr: err, - lastErrTime: time.Now(), - newAuthToken: newAuthToken, + auth: auth, connected: connected, lastConn: time.Now().UnixNano(), MaxErrResponseSize: 4096, HealthCheckReconnectUnit: 200 * time.Millisecond, HealthCheckTimeout: time.Second, } + if err != nil { + clnt.lastErr = err + clnt.lastErrTime = time.Now() + } + if clnt.HealthCheckFn != nil { // make connection pre-emptively. go clnt.HealthCheckFn() @@ -426,7 +447,7 @@ func (c *Client) LastError() error { // computes the exponential backoff duration according to // https://www.awsarchitectureblog.com/2015/03/backoff.html -func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) time.Duration { +func exponentialBackoffWait(r *rand.Rand, unit, maxSleep time.Duration) func(uint) time.Duration { if unit > time.Hour { // Protect against integer overflow panic("unit cannot exceed one hour") @@ -437,10 +458,7 @@ func exponentialBackoffWait(r *rand.Rand, unit, cap time.Duration) func(uint) ti attempt = 16 } // sleep = random_between(unit, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } + sleep := min(unit*time.Duration(1< 0 { s.DialAvgDuration = atomic.LoadUint64(&globalStats.tcpDialTotalDur) / v + s.TTFBAvgDuration = atomic.LoadUint64(&globalStats.tcpTimeForFirstByteTotalDur) / v } return s } @@ -55,8 +58,11 @@ func GetRPCStats() RPCStats { // Return a function which update the global stats related to tcp connections func setupReqStatsUpdate(req *http.Request) (*http.Request, func()) { var dialStart, dialEnd int64 - + start := time.Now() trace := &httptrace.ClientTrace{ + GotFirstResponseByte: func() { + atomic.AddUint64(&globalStats.tcpTimeForFirstByteTotalDur, uint64(time.Since(start))) + }, ConnectStart: func(network, addr string) { atomic.StoreInt64(&dialStart, time.Now().UnixNano()) }, diff --git a/internal/ringbuffer/LICENSE b/internal/ringbuffer/LICENSE new file mode 100644 index 0000000000000..c4852bb45969a --- /dev/null +++ b/internal/ringbuffer/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 smallnest + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/internal/ringbuffer/README.md b/internal/ringbuffer/README.md new file mode 100644 index 0000000000000..83266952a5109 --- /dev/null +++ b/internal/ringbuffer/README.md @@ -0,0 +1,60 @@ +# ringbuffer + +[![License](https://img.shields.io/:license-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![GoDoc](https://godoc.org/github.com/smallnest/ringbuffer?status.png)](http://godoc.org/github.com/smallnest/ringbuffer) [![Go Report Card](https://goreportcard.com/badge/github.com/smallnest/ringbuffer)](https://goreportcard.com/report/github.com/smallnest/ringbuffer) [![coveralls](https://coveralls.io/repos/smallnest/ringbuffer/badge.svg?branch=master&service=github)](https://coveralls.io/github/smallnest/ringbuffer?branch=master) + +A circular buffer (ring buffer) in Go, implemented io.ReaderWriter interface + +[![wikipedia](Circular_Buffer_Animation.gif)](https://github.com/smallnest/ringbuffer) + +# Usage + +```go +package main + +import ( + "fmt" + + "github.com/smallnest/ringbuffer" +) + +func main() { + rb := ringbuffer.New(1024) + + // write + rb.Write([]byte("abcd")) + fmt.Println(rb.Length()) + fmt.Println(rb.Free()) + + // read + buf := make([]byte, 4) + rb.Read(buf) + fmt.Println(string(buf)) +} +``` + +It is possible to use an existing buffer with by replacing `New` with `NewBuffer`. + + +# Blocking vs Non-blocking + +The default behavior of the ring buffer is non-blocking, +meaning that reads and writes will return immediately with an error if the operation cannot be completed. +If you want to block when reading or writing, you must enable it: + +```go + rb := ringbuffer.New(1024).SetBlocking(true) +``` + +Enabling blocking will cause the ring buffer to behave like a buffered [io.Pipe](https://pkg.go.dev/io#Pipe). + +Regular Reads will block until data is available, but not wait for a full buffer. +Writes will block until there is space available and writes bigger than the buffer will wait for reads to make space. + +`TryRead` and `TryWrite` are still available for non-blocking reads and writes. + +To signify the end of the stream, close the ring buffer from the writer side with `rb.CloseWriter()` + +Either side can use `rb.CloseWithError(err error)` to signal an error and close the ring buffer. +Any reads or writes will return the error on next call. + +In blocking mode errors are stateful and the same error will be returned until `rb.Reset()` is called. \ No newline at end of file diff --git a/internal/ringbuffer/ring_buffer.go b/internal/ringbuffer/ring_buffer.go new file mode 100644 index 0000000000000..b8472af5cc097 --- /dev/null +++ b/internal/ringbuffer/ring_buffer.go @@ -0,0 +1,610 @@ +// Copyright 2019 smallnest. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package ringbuffer + +import ( + "context" + "errors" + "io" + "sync" + "unsafe" +) + +var ( + // ErrTooMuchDataToWrite is returned when the data to write is more than the buffer size. + ErrTooMuchDataToWrite = errors.New("too much data to write") + + // ErrIsFull is returned when the buffer is full and not blocking. + ErrIsFull = errors.New("ringbuffer is full") + + // ErrIsEmpty is returned when the buffer is empty and not blocking. + ErrIsEmpty = errors.New("ringbuffer is empty") + + // ErrIsNotEmpty is returned when the buffer is not empty and not blocking. + ErrIsNotEmpty = errors.New("ringbuffer is not empty") + + // ErrAcquireLock is returned when the lock is not acquired on Try operations. + ErrAcquireLock = errors.New("unable to acquire lock") + + // ErrWriteOnClosed is returned when write on a closed ringbuffer. + ErrWriteOnClosed = errors.New("write on closed ringbuffer") +) + +// RingBuffer is a circular buffer that implement io.ReaderWriter interface. +// It operates like a buffered pipe, where data written to a RingBuffer +// and can be read back from another goroutine. +// It is safe to concurrently read and write RingBuffer. +type RingBuffer struct { + buf []byte + size int + r int // next position to read + w int // next position to write + isFull bool + err error + block bool + mu sync.Mutex + wg sync.WaitGroup + readCond *sync.Cond // Signaled when data has been read. + writeCond *sync.Cond // Signaled when data has been written. +} + +// New returns a new RingBuffer whose buffer has the given size. +func New(size int) *RingBuffer { + return &RingBuffer{ + buf: make([]byte, size), + size: size, + } +} + +// NewBuffer returns a new RingBuffer whose buffer is provided. +func NewBuffer(b []byte) *RingBuffer { + return &RingBuffer{ + buf: b, + size: len(b), + } +} + +// SetBlocking sets the blocking mode of the ring buffer. +// If block is true, Read and Write will block when there is no data to read or no space to write. +// If block is false, Read and Write will return ErrIsEmpty or ErrIsFull immediately. +// By default, the ring buffer is not blocking. +// This setting should be called before any Read or Write operation or after a Reset. +func (r *RingBuffer) SetBlocking(block bool) *RingBuffer { + r.block = block + if block { + r.readCond = sync.NewCond(&r.mu) + r.writeCond = sync.NewCond(&r.mu) + } + return r +} + +// WithCancel sets a context to cancel the ring buffer. +// When the context is canceled, the ring buffer will be closed with the context error. +// A goroutine will be started and run until the provided context is canceled. +func (r *RingBuffer) WithCancel(ctx context.Context) *RingBuffer { + go func() { + <-ctx.Done() + r.CloseWithError(ctx.Err()) + }() + return r +} + +func (r *RingBuffer) setErr(err error, locked bool) error { + if !locked { + r.mu.Lock() + defer r.mu.Unlock() + } + if r.err != nil && r.err != io.EOF { + return r.err + } + + switch err { + // Internal errors are transient + case nil, ErrIsEmpty, ErrIsFull, ErrAcquireLock, ErrTooMuchDataToWrite, ErrIsNotEmpty: + return err + default: + r.err = err + if r.block { + r.readCond.Broadcast() + r.writeCond.Broadcast() + } + } + return err +} + +func (r *RingBuffer) readErr(locked bool) error { + if !locked { + r.mu.Lock() + defer r.mu.Unlock() + } + if r.err != nil { + if r.err == io.EOF { + if r.w == r.r && !r.isFull { + return io.EOF + } + return nil + } + return r.err + } + return nil +} + +// Read reads up to len(p) bytes into p. It returns the number of bytes read (0 <= n <= len(p)) and any error encountered. +// Even if Read returns n < len(p), it may use all of p as scratch space during the call. +// If some data is available but not len(p) bytes, Read conventionally returns what is available instead of waiting for more. +// When Read encounters an error or end-of-file condition after successfully reading n > 0 bytes, it returns the number of bytes read. +// It may return the (non-nil) error from the same call or return the error (and n == 0) from a subsequent call. +// Callers should always process the n > 0 bytes returned before considering the error err. +// Doing so correctly handles I/O errors that happen after reading some bytes and also both of the allowed EOF behaviors. +func (r *RingBuffer) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, r.readErr(false) + } + + r.mu.Lock() + defer r.mu.Unlock() + if err := r.readErr(true); err != nil { + return 0, err + } + + r.wg.Add(1) + defer r.wg.Done() + n, err = r.read(p) + for err == ErrIsEmpty && r.block { + r.writeCond.Wait() + if err = r.readErr(true); err != nil { + break + } + n, err = r.read(p) + } + if r.block && n > 0 { + r.readCond.Broadcast() + } + return n, err +} + +// TryRead read up to len(p) bytes into p like Read but it is not blocking. +// If it has not succeeded to acquire the lock, it return 0 as n and ErrAcquireLock. +func (r *RingBuffer) TryRead(p []byte) (n int, err error) { + ok := r.mu.TryLock() + if !ok { + return 0, ErrAcquireLock + } + defer r.mu.Unlock() + if err := r.readErr(true); err != nil { + return 0, err + } + if len(p) == 0 { + return 0, r.readErr(true) + } + + n, err = r.read(p) + if r.block && n > 0 { + r.readCond.Broadcast() + } + return n, err +} + +func (r *RingBuffer) read(p []byte) (n int, err error) { + if r.w == r.r && !r.isFull { + return 0, ErrIsEmpty + } + + if r.w > r.r { + n = min(r.w-r.r, len(p)) + copy(p, r.buf[r.r:r.r+n]) + r.r = (r.r + n) % r.size + return n, err + } + + n = min(r.size-r.r+r.w, len(p)) + + if r.r+n <= r.size { + copy(p, r.buf[r.r:r.r+n]) + } else { + c1 := r.size - r.r + copy(p, r.buf[r.r:r.size]) + c2 := n - c1 + copy(p[c1:], r.buf[0:c2]) + } + r.r = (r.r + n) % r.size + + r.isFull = false + + return n, r.readErr(true) +} + +// ReadByte reads and returns the next byte from the input or ErrIsEmpty. +func (r *RingBuffer) ReadByte() (b byte, err error) { + r.mu.Lock() + defer r.mu.Unlock() + if err = r.readErr(true); err != nil { + return 0, err + } + for r.w == r.r && !r.isFull { + if r.block { + r.writeCond.Wait() + err = r.readErr(true) + if err != nil { + return 0, err + } + continue + } + return 0, ErrIsEmpty + } + b = r.buf[r.r] + r.r++ + if r.r == r.size { + r.r = 0 + } + + r.isFull = false + return b, r.readErr(true) +} + +// Write writes len(p) bytes from p to the underlying buf. +// It returns the number of bytes written from p (0 <= n <= len(p)) +// and any error encountered that caused the write to stop early. +// If blocking n < len(p) will be returned only if an error occurred. +// Write returns a non-nil error if it returns n < len(p). +// Write will not modify the slice data, even temporarily. +func (r *RingBuffer) Write(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, r.setErr(nil, false) + } + r.mu.Lock() + defer r.mu.Unlock() + if err := r.err; err != nil { + if err == io.EOF { + err = ErrWriteOnClosed + } + return 0, err + } + wrote := 0 + for len(p) > 0 { + n, err = r.write(p) + wrote += n + if !r.block || err == nil { + break + } + err = r.setErr(err, true) + if r.block && (err == ErrIsFull || err == ErrTooMuchDataToWrite) { + r.writeCond.Broadcast() + r.readCond.Wait() + p = p[n:] + err = nil + continue + } + break + } + if r.block && wrote > 0 { + r.writeCond.Broadcast() + } + + return wrote, r.setErr(err, true) +} + +// TryWrite writes len(p) bytes from p to the underlying buf like Write, but it is not blocking. +// If it has not succeeded to acquire the lock, it return 0 as n and ErrAcquireLock. +func (r *RingBuffer) TryWrite(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, r.setErr(nil, false) + } + ok := r.mu.TryLock() + if !ok { + return 0, ErrAcquireLock + } + defer r.mu.Unlock() + if err := r.err; err != nil { + if err == io.EOF { + err = ErrWriteOnClosed + } + return 0, err + } + + n, err = r.write(p) + if r.block && n > 0 { + r.writeCond.Broadcast() + } + return n, r.setErr(err, true) +} + +func (r *RingBuffer) write(p []byte) (n int, err error) { + if r.isFull { + return 0, ErrIsFull + } + + var avail int + if r.w >= r.r { + avail = r.size - r.w + r.r + } else { + avail = r.r - r.w + } + + if len(p) > avail { + err = ErrTooMuchDataToWrite + p = p[:avail] + } + n = len(p) + + if r.w >= r.r { + c1 := r.size - r.w + if c1 >= n { + copy(r.buf[r.w:], p) + r.w += n + } else { + copy(r.buf[r.w:], p[:c1]) + c2 := n - c1 + copy(r.buf[0:], p[c1:]) + r.w = c2 + } + } else { + copy(r.buf[r.w:], p) + r.w += n + } + + if r.w == r.size { + r.w = 0 + } + if r.w == r.r { + r.isFull = true + } + + return n, err +} + +// WriteByte writes one byte into buffer, and returns ErrIsFull if buffer is full. +func (r *RingBuffer) WriteByte(c byte) error { + r.mu.Lock() + defer r.mu.Unlock() + if err := r.err; err != nil { + if err == io.EOF { + err = ErrWriteOnClosed + } + return err + } + err := r.writeByte(c) + for err == ErrIsFull && r.block { + r.readCond.Wait() + err = r.setErr(r.writeByte(c), true) + } + if r.block && err == nil { + r.writeCond.Broadcast() + } + return err +} + +// TryWriteByte writes one byte into buffer without blocking. +// If it has not succeeded to acquire the lock, it return ErrAcquireLock. +func (r *RingBuffer) TryWriteByte(c byte) error { + ok := r.mu.TryLock() + if !ok { + return ErrAcquireLock + } + defer r.mu.Unlock() + if err := r.err; err != nil { + if err == io.EOF { + err = ErrWriteOnClosed + } + return err + } + + err := r.writeByte(c) + if err == nil && r.block { + r.writeCond.Broadcast() + } + return err +} + +func (r *RingBuffer) writeByte(c byte) error { + if r.w == r.r && r.isFull { + return ErrIsFull + } + r.buf[r.w] = c + r.w++ + + if r.w == r.size { + r.w = 0 + } + if r.w == r.r { + r.isFull = true + } + + return nil +} + +// Length return the length of available read bytes. +func (r *RingBuffer) Length() int { + r.mu.Lock() + defer r.mu.Unlock() + + if r.w == r.r { + if r.isFull { + return r.size + } + return 0 + } + + if r.w > r.r { + return r.w - r.r + } + + return r.size - r.r + r.w +} + +// Capacity returns the size of the underlying buffer. +func (r *RingBuffer) Capacity() int { + return r.size +} + +// Free returns the length of available bytes to write. +func (r *RingBuffer) Free() int { + r.mu.Lock() + defer r.mu.Unlock() + + if r.w == r.r { + if r.isFull { + return 0 + } + return r.size + } + + if r.w < r.r { + return r.r - r.w + } + + return r.size - r.w + r.r +} + +// WriteString writes the contents of the string s to buffer, which accepts a slice of bytes. +func (r *RingBuffer) WriteString(s string) (n int, err error) { + x := (*[2]uintptr)(unsafe.Pointer(&s)) + h := [3]uintptr{x[0], x[1], x[1]} + buf := *(*[]byte)(unsafe.Pointer(&h)) + return r.Write(buf) +} + +// Bytes returns all available read bytes. +// It does not move the read pointer and only copy the available data. +// If the dst is big enough it will be used as destination, +// otherwise a new buffer will be allocated. +func (r *RingBuffer) Bytes(dst []byte) []byte { + r.mu.Lock() + defer r.mu.Unlock() + getDst := func(n int) []byte { + if cap(dst) < n { + return make([]byte, n) + } + return dst[:n] + } + + if r.w == r.r { + if r.isFull { + buf := getDst(r.size) + copy(buf, r.buf[r.r:]) + copy(buf[r.size-r.r:], r.buf[:r.w]) + return buf + } + return nil + } + + if r.w > r.r { + buf := getDst(r.w - r.r) + copy(buf, r.buf[r.r:r.w]) + return buf + } + + n := r.size - r.r + r.w + buf := getDst(n) + + if r.r+n < r.size { + copy(buf, r.buf[r.r:r.r+n]) + } else { + c1 := r.size - r.r + copy(buf, r.buf[r.r:r.size]) + c2 := n - c1 + copy(buf[c1:], r.buf[0:c2]) + } + + return buf +} + +// IsFull returns this ringbuffer is full. +func (r *RingBuffer) IsFull() bool { + r.mu.Lock() + defer r.mu.Unlock() + + return r.isFull +} + +// IsEmpty returns this ringbuffer is empty. +func (r *RingBuffer) IsEmpty() bool { + r.mu.Lock() + defer r.mu.Unlock() + + return !r.isFull && r.w == r.r +} + +// CloseWithError closes the writer; reads will return +// no bytes and the error err, or EOF if err is nil. +// +// CloseWithError never overwrites the previous error if it exists +// and always returns nil. +func (r *RingBuffer) CloseWithError(err error) { + if err == nil { + err = io.EOF + } + r.setErr(err, false) +} + +// CloseWriter closes the writer. +// Reads will return any remaining bytes and io.EOF. +func (r *RingBuffer) CloseWriter() { + r.setErr(io.EOF, false) +} + +// Flush waits for the buffer to be empty and fully read. +// If not blocking ErrIsNotEmpty will be returned if the buffer still contains data. +func (r *RingBuffer) Flush() error { + r.mu.Lock() + defer r.mu.Unlock() + for r.w != r.r || r.isFull { + err := r.readErr(true) + if err != nil { + if err == io.EOF { + err = nil + } + return err + } + if !r.block { + return ErrIsNotEmpty + } + r.readCond.Wait() + } + + err := r.readErr(true) + if err == io.EOF { + return nil + } + return err +} + +// Reset the read pointer and writer pointer to zero. +func (r *RingBuffer) Reset() { + r.mu.Lock() + defer r.mu.Unlock() + + // Set error so any readers/writers will return immediately. + r.setErr(errors.New("reset called"), true) + if r.block { + r.readCond.Broadcast() + r.writeCond.Broadcast() + } + + // Unlock the mutex so readers/writers can finish. + r.mu.Unlock() + r.wg.Wait() + r.mu.Lock() + r.r = 0 + r.w = 0 + r.err = nil + r.isFull = false +} + +// WriteCloser returns a WriteCloser that writes to the ring buffer. +// When the returned WriteCloser is closed, it will wait for all data to be read before returning. +func (r *RingBuffer) WriteCloser() io.WriteCloser { + return &writeCloser{RingBuffer: r} +} + +type writeCloser struct { + *RingBuffer +} + +// Close provides a close method for the WriteCloser. +func (wc *writeCloser) Close() error { + wc.CloseWriter() + return wc.Flush() +} diff --git a/internal/ringbuffer/ring_buffer_benchmark_test.go b/internal/ringbuffer/ring_buffer_benchmark_test.go new file mode 100644 index 0000000000000..5de127addfa0b --- /dev/null +++ b/internal/ringbuffer/ring_buffer_benchmark_test.go @@ -0,0 +1,105 @@ +package ringbuffer + +import ( + "io" + "strings" + "testing" +) + +func BenchmarkRingBuffer_Sync(b *testing.B) { + rb := New(1024) + data := []byte(strings.Repeat("a", 512)) + buf := make([]byte, 512) + + for b.Loop() { + rb.Write(data) + rb.Read(buf) + } +} + +func BenchmarkRingBuffer_AsyncRead(b *testing.B) { + // Pretty useless benchmark, but it's here for completeness. + rb := New(1024) + data := []byte(strings.Repeat("a", 512)) + buf := make([]byte, 512) + + go func() { + for { + rb.Read(buf) + } + }() + + for b.Loop() { + rb.Write(data) + } +} + +func BenchmarkRingBuffer_AsyncReadBlocking(b *testing.B) { + const sz = 512 + const buffers = 10 + rb := New(sz * buffers) + rb.SetBlocking(true) + data := []byte(strings.Repeat("a", sz)) + buf := make([]byte, sz) + + go func() { + for { + rb.Read(buf) + } + }() + + for b.Loop() { + rb.Write(data) + } +} + +func BenchmarkRingBuffer_AsyncWrite(b *testing.B) { + rb := New(1024) + data := []byte(strings.Repeat("a", 512)) + buf := make([]byte, 512) + + go func() { + for { + rb.Write(data) + } + }() + + for b.Loop() { + rb.Read(buf) + } +} + +func BenchmarkRingBuffer_AsyncWriteBlocking(b *testing.B) { + const sz = 512 + const buffers = 10 + rb := New(sz * buffers) + rb.SetBlocking(true) + data := []byte(strings.Repeat("a", sz)) + buf := make([]byte, sz) + + go func() { + for { + rb.Write(data) + } + }() + + for b.Loop() { + rb.Read(buf) + } +} + +func BenchmarkIoPipeReader(b *testing.B) { + pr, pw := io.Pipe() + data := []byte(strings.Repeat("a", 512)) + buf := make([]byte, 512) + + go func() { + for { + pw.Write(data) + } + }() + + for b.Loop() { + pr.Read(buf) + } +} diff --git a/internal/ringbuffer/ring_buffer_test.go b/internal/ringbuffer/ring_buffer_test.go new file mode 100644 index 0000000000000..72f1c5d1ac59d --- /dev/null +++ b/internal/ringbuffer/ring_buffer_test.go @@ -0,0 +1,1049 @@ +package ringbuffer + +import ( + "bytes" + "errors" + "fmt" + "hash/crc32" + "io" + "math/rand" + "os" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +func TestRingBuffer_interface(t *testing.T) { + rb := New(1) + var _ io.Writer = rb + var _ io.Reader = rb + // var _ io.StringWriter = rb + var _ io.ByteReader = rb + var _ io.ByteWriter = rb +} + +func TestRingBuffer_Write(t *testing.T) { + rb := New(64) + + // check empty or full + if !rb.IsEmpty() { + t.Fatalf("expect IsEmpty is true but got false") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + + // write 4 * 4 = 16 bytes + n, err := rb.Write([]byte(strings.Repeat("abcd", 4))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 16 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Length() != 16 { + t.Fatalf("expect len 16 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 48 { + t.Fatalf("expect free 48 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte(strings.Repeat("abcd", 4))) { + t.Fatalf("expect 4 abcd but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + + // write 48 bytes, should full + n, err = rb.Write([]byte(strings.Repeat("abcd", 12))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 48 { + t.Fatalf("expect write 48 bytes but got %d", n) + } + if rb.Length() != 64 { + t.Fatalf("expect len 64 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.w != 0 { + t.Fatalf("expect r.w=0 but got %d. r.r=%d", rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte(strings.Repeat("abcd", 16))) { + t.Fatalf("expect 16 abcd but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + // write more 4 bytes, should reject + n, err = rb.Write([]byte(strings.Repeat("abcd", 1))) + if err == nil { + t.Fatalf("expect an error but got nil. n=%d, r.w=%d, r.r=%d", n, rb.w, rb.r) + } + if err != ErrIsFull { + t.Fatalf("expect ErrIsFull but got nil") + } + if n != 0 { + t.Fatalf("expect write 0 bytes but got %d", n) + } + if rb.Length() != 64 { + t.Fatalf("expect len 64 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + // reset this ringbuffer and set a long slice + rb.Reset() + n, err = rb.Write([]byte(strings.Repeat("abcd", 20))) + if err == nil { + t.Fatalf("expect ErrTooManyDataToWrite but got nil") + } + if n != 64 { + t.Fatalf("expect write 64 bytes but got %d", n) + } + if rb.Length() != 64 { + t.Fatalf("expect len 64 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.w != 0 { + t.Fatalf("expect r.w=0 but got %d. r.r=%d", rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + if !bytes.Equal(rb.Bytes(nil), []byte(strings.Repeat("abcd", 16))) { + t.Fatalf("expect 16 abcd but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + rb.Reset() + // write 4 * 2 = 8 bytes + n, err = rb.Write([]byte(strings.Repeat("abcd", 2))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 8 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Length() != 8 { + t.Fatalf("expect len 16 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 56 { + t.Fatalf("expect free 48 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + buf := make([]byte, 5) + rb.Read(buf) + if rb.Length() != 3 { + t.Fatalf("expect len 3 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + rb.Write([]byte(strings.Repeat("abcd", 15))) + + if !bytes.Equal(rb.Bytes(nil), []byte("bcd"+strings.Repeat("abcd", 15))) { + t.Fatalf("expect 63 ... but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + rb.Reset() + n, err = rb.Write([]byte(strings.Repeat("abcd", 16))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 64 { + t.Fatalf("expect write 64 bytes but got %d", n) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + buf = make([]byte, 16) + rb.Read(buf) + n, err = rb.Write([]byte(strings.Repeat("1234", 4))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 16 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(append(buf, rb.Bytes(nil)...), []byte(strings.Repeat("abcd", 16)+strings.Repeat("1234", 4))) { + t.Fatalf("expect 16 abcd and 4 1234 but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } +} + +func TestRingBuffer_WriteBlocking(t *testing.T) { + rb := New(64).SetBlocking(true) + + // check empty or full + if !rb.IsEmpty() { + t.Fatalf("expect IsEmpty is true but got false") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + + // write 4 * 4 = 16 bytes + n, err := rb.Write([]byte(strings.Repeat("abcd", 4))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 16 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Length() != 16 { + t.Fatalf("expect len 16 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 48 { + t.Fatalf("expect free 48 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte(strings.Repeat("abcd", 4))) { + t.Fatalf("expect 4 abcd but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + + // write 48 bytes, should full + n, err = rb.Write([]byte(strings.Repeat("abcd", 12))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 48 { + t.Fatalf("expect write 48 bytes but got %d", n) + } + if rb.Length() != 64 { + t.Fatalf("expect len 64 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.w != 0 { + t.Fatalf("expect r.w=0 but got %d. r.r=%d", rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte(strings.Repeat("abcd", 16))) { + t.Fatalf("expect 16 abcd but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + rb.Reset() + // write 4 * 2 = 8 bytes + n, err = rb.Write([]byte(strings.Repeat("abcd", 2))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 8 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Length() != 8 { + t.Fatalf("expect len 16 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 56 { + t.Fatalf("expect free 48 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + buf := make([]byte, 5) + rb.Read(buf) + if rb.Length() != 3 { + t.Fatalf("expect len 3 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + rb.Write([]byte(strings.Repeat("abcd", 15))) + + if !bytes.Equal(rb.Bytes(nil), []byte("bcd"+strings.Repeat("abcd", 15))) { + t.Fatalf("expect 63 ... but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + + rb.Reset() + n, err = rb.Write([]byte(strings.Repeat("abcd", 16))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 64 { + t.Fatalf("expect write 64 bytes but got %d", n) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + buf = make([]byte, 16) + rb.Read(buf) + n, err = rb.Write([]byte(strings.Repeat("1234", 4))) + if err != nil { + t.Fatalf("write failed: %v", err) + } + if n != 16 { + t.Fatalf("expect write 16 bytes but got %d", n) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(append(buf, rb.Bytes(nil)...), []byte(strings.Repeat("abcd", 16)+strings.Repeat("1234", 4))) { + t.Fatalf("expect 16 abcd and 4 1234 but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } +} + +func TestRingBuffer_Read(t *testing.T) { + defer timeout(5 * time.Second)() + rb := New(64) + + // check empty or full + if !rb.IsEmpty() { + t.Fatalf("expect IsEmpty is true but got false") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + + // read empty + buf := make([]byte, 1024) + n, err := rb.Read(buf) + if err == nil { + t.Fatalf("expect an error but got nil") + } + if err != ErrIsEmpty { + t.Fatalf("expect ErrIsEmpty but got nil") + } + if n != 0 { + t.Fatalf("expect read 0 bytes but got %d", n) + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.r != 0 { + t.Fatalf("expect r.r=0 but got %d. r.w=%d", rb.r, rb.w) + } + + // write 16 bytes to read + rb.Write([]byte(strings.Repeat("abcd", 4))) + n, err = rb.Read(buf) + if err != nil { + t.Fatalf("read failed: %v", err) + } + if n != 16 { + t.Fatalf("expect read 16 bytes but got %d", n) + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.r != 16 { + t.Fatalf("expect r.r=16 but got %d. r.w=%d", rb.r, rb.w) + } + + // write long slice to read + rb.Write([]byte(strings.Repeat("abcd", 20))) + n, err = rb.Read(buf) + if err != nil { + t.Fatalf("read failed: %v", err) + } + if n != 64 { + t.Fatalf("expect read 64 bytes but got %d", n) + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 64 { + t.Fatalf("expect free 64 bytes but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if rb.r != 16 { + t.Fatalf("expect r.r=16 but got %d. r.w=%d", rb.r, rb.w) + } +} + +func TestRingBuffer_Blocking(t *testing.T) { + // Typical runtime is ~5-10s. + defer timeout(60 * time.Second)() + const debug = false + + var readBytes int + var wroteBytes int + var readBuf bytes.Buffer + var wroteBuf bytes.Buffer + readHash := crc32.NewIEEE() + wroteHash := crc32.NewIEEE() + read := io.Writer(readHash) + wrote := io.Writer(wroteHash) + if debug { + read = io.MultiWriter(read, &readBuf) + wrote = io.MultiWriter(wrote, &wroteBuf) + } + debugln := func(args ...any) { + if debug { + fmt.Println(args...) + } + } + // Inject random reader/writer sleeps. + const maxSleep = int(1 * time.Millisecond) + doSleep := !testing.Short() + rb := New(4 << 10).SetBlocking(true) + + // Reader + var readErr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + readRng := rand.New(rand.NewSource(1)) + defer wg.Done() + defer rb.CloseWithError(readErr) + buf := make([]byte, 1024) + for { + // Read + n, err := rb.Read(buf[:readRng.Intn(len(buf))]) + readBytes += n + read.Write(buf[:n]) + debugln("READ 1\t", n, readBytes) + if err != nil { + readErr = err + break + } + + // ReadByte + b, err := rb.ReadByte() + if err != nil { + readErr = err + break + } + readBytes++ + read.Write([]byte{b}) + debugln("READ 2\t", 1, readBytes) + + // TryRead + n, err = rb.TryRead(buf[:readRng.Intn(len(buf))]) + readBytes += n + read.Write(buf[:n]) + debugln("READ 3\t", n, readBytes) + if err != nil && err != ErrAcquireLock && err != ErrIsEmpty { + readErr = err + break + } + if doSleep && readRng.Intn(20) == 0 { + time.Sleep(time.Duration(readRng.Intn(maxSleep))) + } + } + }() + + // Writer + { + buf := make([]byte, 1024) + writeRng := rand.New(rand.NewSource(2)) + for range 2500 { + writeRng.Read(buf) + // Write + n, err := rb.Write(buf[:writeRng.Intn(len(buf))]) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 1\t", n, wroteBytes) + + // WriteString + n, err = rb.WriteString(string(buf[:writeRng.Intn(len(buf))])) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 2\t", writeRng.Intn(len(buf)), wroteBytes) + + // WriteByte + err = rb.WriteByte(buf[0]) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes++ + wrote.Write(buf[:1]) + debugln("WRITE 3\t", 1, wroteBytes) + + // TryWrite + n, err = rb.TryWrite(buf[:writeRng.Intn(len(buf))]) + if err != nil && err != ErrAcquireLock && err != ErrTooMuchDataToWrite && err != ErrIsFull { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 4\t", n, wroteBytes) + + // TryWriteByte + err = rb.TryWriteByte(buf[0]) + if err != nil && err != ErrAcquireLock && err != ErrTooMuchDataToWrite && err != ErrIsFull { + t.Fatalf("write failed: %v", err) + } + if err == nil { + wroteBytes++ + wrote.Write(buf[:1]) + debugln("WRITE 5\t", 1, wroteBytes) + } + if doSleep && writeRng.Intn(10) == 0 { + time.Sleep(time.Duration(writeRng.Intn(maxSleep))) + } + } + if err := rb.Flush(); err != nil { + t.Fatalf("flush failed: %v", err) + } + rb.CloseWriter() + } + wg.Wait() + if !errors.Is(readErr, io.EOF) { + t.Fatalf("expect io.EOF but got %v", readErr) + } + if readBytes != wroteBytes { + a, b := readBuf.Bytes(), wroteBuf.Bytes() + if debug && !bytes.Equal(a, b) { + common := len(a) + for i := range a { + if a[i] != b[i] { + common = i + break + } + } + a, b = a[common:], b[common:] + if len(a) > 64 { + a = a[:64] + } + if len(b) > 64 { + b = b[:64] + } + t.Errorf("after %d common bytes, difference \nread: %x\nwrote:%x", common, a, b) + } + t.Fatalf("expect read %d bytes but got %d", wroteBytes, readBytes) + } + if readHash.Sum32() != wroteHash.Sum32() { + t.Fatalf("expect read hash 0x%08x but got 0x%08x", readHash.Sum32(), wroteHash.Sum32()) + } +} + +func TestRingBuffer_BlockingBig(t *testing.T) { + // Typical runtime is ~5-10s. + defer timeout(60 * time.Second)() + const debug = false + + var readBytes int + var wroteBytes int + readHash := crc32.NewIEEE() + wroteHash := crc32.NewIEEE() + var readBuf bytes.Buffer + var wroteBuf bytes.Buffer + read := io.Writer(readHash) + wrote := io.Writer(wroteHash) + if debug { + read = io.MultiWriter(read, &readBuf) + wrote = io.MultiWriter(wrote, &wroteBuf) + } + debugln := func(args ...any) { + if debug { + fmt.Println(args...) + } + } + // Inject random reader/writer sleeps. + const maxSleep = int(1 * time.Millisecond) + doSleep := !testing.Short() + rb := New(4 << 10).SetBlocking(true) + + // Reader + var readErr error + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer rb.CloseWithError(readErr) + readRng := rand.New(rand.NewSource(1)) + buf := make([]byte, 64<<10) + for { + // Read + n, err := rb.Read(buf[:readRng.Intn(len(buf))]) + readBytes += n + read.Write(buf[:n]) + if err != nil { + readErr = err + break + } + debugln("READ 1\t", n, readBytes) + + // ReadByte + b, err := rb.ReadByte() + if err != nil { + readErr = err + break + } + readBytes++ + read.Write([]byte{b}) + debugln("READ 2\t", 1, readBytes) + + // TryRead + n, err = rb.TryRead(buf[:readRng.Intn(len(buf))]) + readBytes += n + read.Write(buf[:n]) + if err != nil && err != ErrAcquireLock && err != ErrIsEmpty { + readErr = err + break + } + debugln("READ 3\t", n, readBytes) + if doSleep && readRng.Intn(20) == 0 { + time.Sleep(time.Duration(readRng.Intn(maxSleep))) + } + } + }() + + // Writer + { + writeRng := rand.New(rand.NewSource(2)) + buf := make([]byte, 64<<10) + for range 500 { + writeRng.Read(buf) + // Write + n, err := rb.Write(buf[:writeRng.Intn(len(buf))]) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 1\t", n, wroteBytes) + + // WriteString + n, err = rb.WriteString(string(buf[:writeRng.Intn(len(buf))])) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 2\t", writeRng.Intn(len(buf)), wroteBytes) + + // WriteByte + err = rb.WriteByte(buf[0]) + if err != nil { + t.Fatalf("write failed: %v", err) + } + wroteBytes++ + wrote.Write(buf[:1]) + debugln("WRITE 3\t", 1, wroteBytes) + + // TryWrite + n, err = rb.TryWrite(buf[:writeRng.Intn(len(buf))]) + if err != nil && err != ErrAcquireLock && err != ErrTooMuchDataToWrite && err != ErrIsFull { + t.Fatalf("write failed: %v", err) + } + wroteBytes += n + wrote.Write(buf[:n]) + debugln("WRITE 4\t", n, wroteBytes) + + // TryWriteByte + err = rb.TryWriteByte(buf[0]) + if err != nil && err != ErrAcquireLock && err != ErrTooMuchDataToWrite && err != ErrIsFull { + t.Fatalf("write failed: %v", err) + } + if err == nil { + wroteBytes++ + wrote.Write(buf[:1]) + debugln("WRITE 5\t", 1, wroteBytes) + } + if doSleep && writeRng.Intn(10) == 0 { + time.Sleep(time.Duration(writeRng.Intn(maxSleep))) + } + } + if err := rb.Flush(); err != nil { + t.Fatalf("flush failed: %v", err) + } + rb.CloseWriter() + } + wg.Wait() + if !errors.Is(readErr, io.EOF) { + t.Fatalf("expect io.EOF but got %v", readErr) + } + if readBytes != wroteBytes { + a, b := readBuf.Bytes(), wroteBuf.Bytes() + if debug && !bytes.Equal(a, b) { + common := len(a) + for i := range a { + if a[i] != b[i] { + t.Errorf("%x != %x", a[i], b[i]) + common = i + break + } + } + a, b = a[common:], b[common:] + if len(a) > 64 { + a = a[:64] + } + if len(b) > 64 { + b = b[:64] + } + t.Errorf("after %d common bytes, difference \nread: %x\nwrote:%x", common, a, b) + } + t.Fatalf("expect read %d bytes but got %d", wroteBytes, readBytes) + } + if readHash.Sum32() != wroteHash.Sum32() { + t.Fatalf("expect read hash 0x%08x but got 0x%08x", readHash.Sum32(), wroteHash.Sum32()) + } +} + +func TestRingBuffer_ByteInterface(t *testing.T) { + defer timeout(5 * time.Second)() + rb := New(2) + + // write one + err := rb.WriteByte('a') + if err != nil { + t.Fatalf("WriteByte failed: %v", err) + } + if rb.Length() != 1 { + t.Fatalf("expect len 1 byte but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 1 { + t.Fatalf("expect free 1 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte{'a'}) { + t.Fatalf("expect a but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + + // write to, isFull + err = rb.WriteByte('b') + if err != nil { + t.Fatalf("WriteByte failed: %v", err) + } + if rb.Length() != 2 { + t.Fatalf("expect len 2 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte{'a', 'b'}) { + t.Fatalf("expect a but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + // write + err = rb.WriteByte('c') + if err == nil { + t.Fatalf("expect ErrIsFull but got nil") + } + if rb.Length() != 2 { + t.Fatalf("expect len 2 bytes but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 0 { + t.Fatalf("expect free 0 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte{'a', 'b'}) { + t.Fatalf("expect a but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if !rb.IsFull() { + t.Fatalf("expect IsFull is true but got false") + } + + // read one + b, err := rb.ReadByte() + if err != nil { + t.Fatalf("ReadByte failed: %v", err) + } + if b != 'a' { + t.Fatalf("expect a but got %c. r.w=%d, r.r=%d", b, rb.w, rb.r) + } + if rb.Length() != 1 { + t.Fatalf("expect len 1 byte but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 1 { + t.Fatalf("expect free 1 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + if !bytes.Equal(rb.Bytes(nil), []byte{'b'}) { + t.Fatalf("expect a but got %s. r.w=%d, r.r=%d", rb.Bytes(nil), rb.w, rb.r) + } + // check empty or full + if rb.IsEmpty() { + t.Fatalf("expect IsEmpty is false but got true") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + + // read two, empty + b, err = rb.ReadByte() + if err != nil { + t.Fatalf("ReadByte failed: %v", err) + } + if b != 'b' { + t.Fatalf("expect b but got %c. r.w=%d, r.r=%d", b, rb.w, rb.r) + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 byte but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 2 { + t.Fatalf("expect free 2 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + // check empty or full + if !rb.IsEmpty() { + t.Fatalf("expect IsEmpty is true but got false") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } + + // read three, error + _, err = rb.ReadByte() + if err == nil { + t.Fatalf("expect ErrIsEmpty but got nil") + } + if rb.Length() != 0 { + t.Fatalf("expect len 0 byte but got %d. r.w=%d, r.r=%d", rb.Length(), rb.w, rb.r) + } + if rb.Free() != 2 { + t.Fatalf("expect free 2 byte but got %d. r.w=%d, r.r=%d", rb.Free(), rb.w, rb.r) + } + // check empty or full + if !rb.IsEmpty() { + t.Fatalf("expect IsEmpty is true but got false") + } + if rb.IsFull() { + t.Fatalf("expect IsFull is false but got true") + } +} + +func TestRingBufferCloseError(t *testing.T) { + type testError1 struct{ error } + type testError2 struct{ error } + + rb := New(100) + rb.CloseWithError(testError1{}) + if _, err := rb.Write(nil); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if _, err := rb.Write([]byte{1}); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if err := rb.WriteByte(0); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if _, err := rb.TryWrite(nil); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if _, err := rb.TryWrite([]byte{1}); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if err := rb.TryWriteByte(0); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + if err := rb.Flush(); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + + rb.CloseWithError(testError2{}) + if _, err := rb.Write(nil); err != (testError1{}) { + t.Errorf("Write error: got %T, want testError1", err) + } + + rb.Reset() + rb.CloseWithError(testError1{}) + if _, err := rb.Read(nil); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.Read([]byte{0}); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.ReadByte(); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.TryRead(nil); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.TryRead([]byte{0}); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + rb.CloseWithError(testError2{}) + if _, err := rb.Read(nil); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.Read([]byte{0}); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.ReadByte(); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.TryRead(nil); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } + if _, err := rb.TryRead([]byte{0}); err != (testError1{}) { + t.Errorf("Read error: got %T, want testError1", err) + } +} + +func TestRingBufferCloseErrorUnblocks(t *testing.T) { + const sz = 100 + rb := New(sz).SetBlocking(true) + + testCancel := func(fn func()) { + t.Helper() + defer timeout(5 * time.Second)() + rb.Reset() + done := make(chan struct{}) + go func() { + defer close(done) + time.Sleep(10 * time.Millisecond) + fn() + }() + rb.CloseWithError(errors.New("test error")) + <-done + + rb.Reset() + done = make(chan struct{}) + go func() { + defer close(done) + fn() + }() + time.Sleep(10 * time.Millisecond) + rb.CloseWithError(errors.New("test error")) + <-done + } + testCancel(func() { + rb.Write([]byte{sz + 5: 1}) + }) + testCancel(func() { + rb.Write(make([]byte, sz)) + rb.WriteByte(0) + }) + testCancel(func() { + rb.Read([]byte{10: 1}) + }) + testCancel(func() { + rb.ReadByte() + }) + testCancel(func() { + rb.Write(make([]byte, sz)) + rb.Flush() + }) +} + +func TestWriteAfterWriterClose(t *testing.T) { + rb := New(100).SetBlocking(true) + + done := make(chan error) + go func() { + defer close(done) + _, err := rb.Write([]byte("hello")) + if err != nil { + t.Errorf("got error: %q; expected none", err) + } + rb.CloseWriter() + _, err = rb.Write([]byte("world")) + done <- err + err = rb.WriteByte(0) + done <- err + _, err = rb.TryWrite([]byte("world")) + done <- err + err = rb.TryWriteByte(0) + done <- err + }() + + buf := make([]byte, 100) + n, err := io.ReadFull(rb, buf) + if err != nil && err != io.ErrUnexpectedEOF { + t.Fatalf("got: %q; want: %q", err, io.ErrUnexpectedEOF) + } + for writeErr := range done { + if writeErr != ErrWriteOnClosed { + t.Errorf("got: %q; want: %q", writeErr, ErrWriteOnClosed) + } else { + t.Log("ok") + } + } + result := string(buf[0:n]) + if result != "hello" { + t.Errorf("got: %q; want: %q", result, "hello") + } +} + +func timeout(after time.Duration) (cancel func()) { + c := time.After(after) + cc := make(chan struct{}) + go func() { + select { + case <-cc: + return + case <-c: + buf := make([]byte, 1<<20) + stacklen := runtime.Stack(buf, true) + fmt.Printf("=== Timeout, assuming deadlock ===\n*** goroutine dump...\n%s\n*** end\n", string(buf[:stacklen])) + os.Exit(2) + } + }() + return func() { + close(cc) + } +} diff --git a/internal/s3select/csv/args.go b/internal/s3select/csv/args.go index 68e5a3a850254..d7e90210d2783 100644 --- a/internal/s3select/csv/args.go +++ b/internal/s3select/csv/args.go @@ -27,8 +27,9 @@ import ( ) const ( - none = "none" - use = "use" + none = "none" + use = "use" + ignore = "ignore" defaultRecordDelimiter = "\n" defaultFieldDelimiter = "," @@ -92,11 +93,22 @@ func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (er } switch tagName { case "FileHeaderInfo": - args.FileHeaderInfo = strings.ToLower(s) + s = strings.ToLower(s) + if len(s) != 0 { + if s != none && s != use && s != ignore { + return errors.New("unsupported FileHeaderInfo") + } + args.FileHeaderInfo = s + } + case "RecordDelimiter": - args.RecordDelimiter = s + if len(s) != 0 { + args.RecordDelimiter = s + } case "FieldDelimiter": - args.FieldDelimiter = s + if len(s) != 0 { + args.FieldDelimiter = s + } case "QuoteCharacter": if utf8.RuneCountInString(s) > 1 { return fmt.Errorf("unsupported QuoteCharacter '%v'", s) @@ -112,7 +124,9 @@ func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (er return fmt.Errorf("unsupported QuoteEscapeCharacter '%v'", s) } case "Comments": - args.CommentCharacter = s + if len(s) != 0 { + args.CommentCharacter = s + } default: return errors.New("unrecognized option") } diff --git a/internal/s3select/csv/reader.go b/internal/s3select/csv/reader.go index 032a9b0c1e4db..66110983027a7 100644 --- a/internal/s3select/csv/reader.go +++ b/internal/s3select/csv/reader.go @@ -27,25 +27,26 @@ import ( "unicode/utf8" csv "github.com/minio/csvparser" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/s3select/sql" ) // Reader - CSV record reader for S3Select. type Reader struct { args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - columnNames []string // names of columns - nameIndexMap map[string]int64 // name to column index - current [][]string // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - csvDstPool sync.Pool // pool of [][]string used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. + readCloser io.ReadCloser // raw input + buf *bufio.Reader // input to the splitter + columnNames []string // names of columns + nameIndexMap map[string]int64 // name to column index + current [][]string // current block of results to be returned + recordsRead int // number of records read in current slice + input chan *queueItem // input for workers + queue chan *queueItem // output from workers in order + err error // global error state, only touched by Reader.Read + bufferPool bpool.Pool[[]byte] // pool of []byte objects for input + csvDstPool bpool.Pool[[][]string] // pool of [][]string used for output + close chan struct{} // used for shutting down the splitter before end of stream + readerWg sync.WaitGroup // used to keep track of async reader. } // queueItem is an item in the queue. @@ -69,7 +70,7 @@ func (r *Reader) Read(dst sql.Record) (sql.Record, error) { r.err = io.EOF return nil, r.err } - //nolint:staticcheck // SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. + r.csvDstPool.Put(r.current) r.current = <-item.dst r.err = item.err @@ -182,12 +183,12 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { } } - r.bufferPool.New = func() interface{} { + r.bufferPool.New = func() []byte { return make([]byte, csvSplitSize+1024) } // Return first block - next, nextErr := r.nextSplit(csvSplitSize, r.bufferPool.Get().([]byte)) + next, nextErr := r.nextSplit(csvSplitSize, r.bufferPool.Get()) // Check if first block is valid. if !utf8.Valid(next) { return errInvalidTextEncodingError() @@ -224,20 +225,20 @@ func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { // Exit on any error. return } - next, nextErr = r.nextSplit(csvSplitSize, r.bufferPool.Get().([]byte)) + next, nextErr = r.nextSplit(csvSplitSize, r.bufferPool.Get()) } }() // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { + for range runtime.GOMAXPROCS(0) { go func() { for in := range r.input { if len(in.input) == 0 { in.dst <- nil continue } - dst, ok := r.csvDstPool.Get().([][]string) - if !ok { + dst := r.csvDstPool.Get() + if len(dst) < 1000 { dst = make([][]string, 0, 1000) } diff --git a/internal/s3select/csv/reader_contrib_test.go b/internal/s3select/csv/reader_contrib_test.go index 16c412e714a0d..f2262f5e83e1e 100644 --- a/internal/s3select/csv/reader_contrib_test.go +++ b/internal/s3select/csv/reader_contrib_test.go @@ -84,7 +84,7 @@ func TestRead(t *testing.T) { } type tester interface { - Fatal(...interface{}) + Fatal(...any) } func openTestFile(t tester, file string) []byte { @@ -508,10 +508,10 @@ func BenchmarkReaderBasic(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -537,7 +537,7 @@ func BenchmarkReaderHuge(b *testing.B) { AllowQuotedRecordDelimiter: false, unmarshaled: true, } - for n := 0; n < 11; n++ { + for n := range 11 { f := openTestFile(b, "nyc-taxi-data-100k.csv") want := 309 for i := 0; i < n; i++ { @@ -549,7 +549,7 @@ func BenchmarkReaderHuge(b *testing.B) { b.SetBytes(int64(len(f))) b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -590,10 +590,10 @@ func BenchmarkReaderReplace(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) @@ -627,10 +627,10 @@ func BenchmarkReaderReplaceTwo(b *testing.B) { } defer r.Close() b.ReportAllocs() - b.ResetTimer() + b.SetBytes(int64(len(f))) var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args) if err != nil { b.Fatalf("Reading init failed with %s", err) diff --git a/internal/s3select/csv/record.go b/internal/s3select/csv/record.go index 18e467834bfa7..57d4c96382291 100644 --- a/internal/s3select/csv/record.go +++ b/internal/s3select/csv/record.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" - "github.com/bcicen/jstream" csv "github.com/minio/csvparser" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" ) @@ -46,8 +46,8 @@ func (r *Record) Get(name string) (*sql.Value, error) { index, found := r.nameIndexMap[name] if !found { // Check if index. - if strings.HasPrefix(name, "_") { - idx, err := strconv.Atoi(strings.TrimPrefix(name, "_")) + if after, ok := strings.CutPrefix(name, "_"); ok { + idx, err := strconv.Atoi(after) if err != nil { return nil, fmt.Errorf("column %v not found", name) } @@ -87,9 +87,7 @@ func (r *Record) Reset() { if len(r.csvRecord) > 0 { r.csvRecord = r.csvRecord[:0] } - for k := range r.nameIndexMap { - delete(r.nameIndexMap, k) - } + clear(r.nameIndexMap) } // Clone the record. @@ -135,12 +133,12 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Raw - returns the underlying data with format info. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return sql.SelectFmtCSV, r } // Replace - is not supported for CSV -func (r *Record) Replace(_ interface{}) error { +func (r *Record) Replace(_ any) error { return errors.New("Replace is not supported for CSV") } diff --git a/internal/s3select/json/preader.go b/internal/s3select/json/preader.go index fa2cf84814a96..a9ef1343585f2 100644 --- a/internal/s3select/json/preader.go +++ b/internal/s3select/json/preader.go @@ -24,7 +24,8 @@ import ( "runtime" "sync" - "github.com/bcicen/jstream" + "github.com/minio/minio/internal/bpool" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" ) @@ -32,17 +33,17 @@ import ( // Operates concurrently on line-delimited JSON. type PReader struct { args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - current []jstream.KVS // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - kvDstPool sync.Pool // pool of []jstream.KV used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. + readCloser io.ReadCloser // raw input + buf *bufio.Reader // input to the splitter + current []jstream.KVS // current block of results to be returned + recordsRead int // number of records read in current slice + input chan *queueItem // input for workers + queue chan *queueItem // output from workers in order + err error // global error state, only touched by Reader.Read + bufferPool bpool.Pool[[]byte] // pool of []byte objects for input + kvDstPool bpool.Pool[[]jstream.KVS] // pool of []jstream.KVS used for output + close chan struct{} // used for shutting down the splitter before end of stream + readerWg sync.WaitGroup // used to keep track of async reader. } // queueItem is an item in the queue. @@ -66,7 +67,6 @@ func (r *PReader) Read(dst sql.Record) (sql.Record, error) { r.err = io.EOF return nil, r.err } - //nolint:staticcheck // SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. r.kvDstPool.Put(r.current) r.current = <-item.dst r.err = item.err @@ -133,7 +133,7 @@ const jsonSplitSize = 128 << 10 // and a number of workers based on GOMAXPROCS. // If an error is returned no goroutines have been started and r.err will have been set. func (r *PReader) startReaders() { - r.bufferPool.New = func() interface{} { + r.bufferPool.New = func() []byte { return make([]byte, jsonSplitSize+1024) } @@ -148,7 +148,7 @@ func (r *PReader) startReaders() { defer close(r.queue) defer r.readerWg.Done() for { - next, err := r.nextSplit(jsonSplitSize, r.bufferPool.Get().([]byte)) + next, err := r.nextSplit(jsonSplitSize, r.bufferPool.Get()) q := queueItem{ input: next, dst: make(chan []jstream.KVS, 1), @@ -173,19 +173,19 @@ func (r *PReader) startReaders() { }() // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { + for range runtime.GOMAXPROCS(0) { go func() { for in := range r.input { if len(in.input) == 0 { in.dst <- nil continue } - dst, ok := r.kvDstPool.Get().([]jstream.KVS) - if !ok { + dst := r.kvDstPool.Get() + if len(dst) < 1000 { dst = make([]jstream.KVS, 0, 1000) } - d := jstream.NewDecoder(bytes.NewBuffer(in.input), 0).ObjectAsKVS() + d := jstream.NewDecoder(bytes.NewBuffer(in.input), 0).ObjectAsKVS().MaxDepth(100) stream := d.Stream() all := dst[:0] for mv := range stream { @@ -193,7 +193,7 @@ func (r *PReader) startReaders() { if mv.ValueType == jstream.Object { // This is a JSON object type (that preserves key // order) - kvs = mv.Value.(jstream.KVS) + kvs, _ = mv.Value.(jstream.KVS) } else { // To be AWS S3 compatible Select for JSON needs to // output non-object JSON as single column value diff --git a/internal/s3select/json/preader_test.go b/internal/s3select/json/preader_test.go index fcdb5afc5a6de..200befe8631c5 100644 --- a/internal/s3select/json/preader_test.go +++ b/internal/s3select/json/preader_test.go @@ -88,7 +88,7 @@ func BenchmarkPReader(b *testing.B) { b.ReportAllocs() b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r := NewPReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) for { record, err = r.Read(record) diff --git a/internal/s3select/json/reader.go b/internal/s3select/json/reader.go index 4285c23fa5211..780a1a972132c 100644 --- a/internal/s3select/json/reader.go +++ b/internal/s3select/json/reader.go @@ -21,11 +21,14 @@ import ( "io" "sync" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" - - "github.com/bcicen/jstream" ) +// Limit single document size to 10MiB, 10x the AWS limit: +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/selecting-content-from-objects.html +const maxDocumentSize = 10 << 20 + // Reader - JSON record reader for S3Select. type Reader struct { args *ReaderArgs @@ -48,7 +51,7 @@ func (r *Reader) Read(dst sql.Record) (sql.Record, error) { if v.ValueType == jstream.Object { // This is a JSON object type (that preserves key // order) - kvs = v.Value.(jstream.KVS) + kvs, _ = v.Value.(jstream.KVS) } else { // To be AWS S3 compatible Select for JSON needs to // output non-object JSON as single column value @@ -80,7 +83,7 @@ func (r *Reader) Close() error { // NewReader - creates new JSON reader using readCloser. func NewReader(readCloser io.ReadCloser, args *ReaderArgs) *Reader { readCloser = &syncReadCloser{rc: readCloser} - d := jstream.NewDecoder(readCloser, 0).ObjectAsKVS() + d := jstream.NewDecoder(io.LimitReader(readCloser, maxDocumentSize), 0).ObjectAsKVS().MaxDepth(100) return &Reader{ args: args, decoder: d, diff --git a/internal/s3select/json/reader_test.go b/internal/s3select/json/reader_test.go index 3a98fc5003508..6840cd18ed92c 100644 --- a/internal/s3select/json/reader_test.go +++ b/internal/s3select/json/reader_test.go @@ -88,7 +88,7 @@ func BenchmarkReader(b *testing.B) { b.ReportAllocs() b.ResetTimer() var record sql.Record - for i := 0; i < b.N; i++ { + for b.Loop() { r := NewReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) for { record, err = r.Read(record) diff --git a/internal/s3select/json/record.go b/internal/s3select/json/record.go index 7b6ddad76cd40..80b7019f9c1db 100644 --- a/internal/s3select/json/record.go +++ b/internal/s3select/json/record.go @@ -26,8 +26,8 @@ import ( "strconv" "strings" - "github.com/bcicen/jstream" csv "github.com/minio/csvparser" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" ) @@ -76,7 +76,7 @@ func (r *Record) Clone(dst sql.Record) sql.Record { // Set - sets the value for a column name. func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { - var v interface{} + var v any if b, ok := value.ToBool(); ok { v = b } else if f, ok := value.ToFloat(); ok { @@ -126,7 +126,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { columnValue = "" case RawJSON: columnValue = string([]byte(val)) - case []interface{}: + case []any: b, err := json.Marshal(val) if err != nil { return err @@ -151,7 +151,7 @@ func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { } // Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return r.SelectFormat, r.KVS } @@ -161,7 +161,7 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { +func (r *Record) Replace(k any) error { v, ok := k.(jstream.KVS) if !ok { return fmt.Errorf("cannot replace internal data in json record with type %T", k) diff --git a/internal/s3select/jstream/LICENSE b/internal/s3select/jstream/LICENSE new file mode 100644 index 0000000000000..1c5d82df625ab --- /dev/null +++ b/internal/s3select/jstream/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2018 Bradley Cicenas + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/internal/s3select/jstream/README.md b/internal/s3select/jstream/README.md new file mode 100644 index 0000000000000..2797b3ba6348c --- /dev/null +++ b/internal/s3select/jstream/README.md @@ -0,0 +1,116 @@ +

jstream

+ +# + +[![GoDoc](https://godoc.org/github.com/bcicen/jstream?status.svg)](https://godoc.org/github.com/bcicen/jstream) + + +`jstream` is a streaming JSON parser and value extraction library for Go. + +Unlike most JSON parsers, `jstream` is document position- and depth-aware -- this enables the extraction of values at a specified depth, eliminating the overhead of allocating encompassing arrays or objects; e.g: + +Using the below example document: +jstream + +we can choose to extract and act only the objects within the top-level array: +```go +f, _ := os.Open("input.json") +decoder := jstream.NewDecoder(f, 1) // extract JSON values at a depth level of 1 +for mv := range decoder.Stream() { + fmt.Printf("%v\n ", mv.Value) +} +``` + +output: +``` +map[desc:RGB colors:[red green blue]] +map[desc:CMYK colors:[cyan magenta yellow black]] +``` + +likewise, increasing depth level to `3` yields: +``` +red +green +blue +cyan +magenta +yellow +black +``` + +optionally, kev:value pairs can be emitted as an individual struct: +```go +decoder := jstream.NewDecoder(f, 2).EmitKV() // enable KV streaming at a depth level of 2 +``` + +``` +jstream.KV{desc RGB} +jstream.KV{colors [red green blue]} +jstream.KV{desc CMYK} +jstream.KV{colors [cyan magenta yellow black]} +``` + +## Installing + +```bash +go get github.com/bcicen/jstream +``` + +## Commandline + +`jstream` comes with a cli tool for quick viewing of parsed values from JSON input: + +```bash +jstream -d 1 < input.json +``` + +```json +{"colors":["red","green","blue"],"desc":"RGB"} +{"colors":["cyan","magenta","yellow","black"],"desc":"CMYK"} +``` + +detailed output with `-v` option: +```bash +cat input.json | jstream -v -d -1 + +depth start end type | value +2 018 023 string | "RGB" +3 041 046 string | "red" +3 048 055 string | "green" +3 057 063 string | "blue" +2 039 065 array | ["red","green","blue"] +1 004 069 object | {"colors":["red","green","blue"],"desc":"RGB"} +2 087 093 string | "CMYK" +3 111 117 string | "cyan" +3 119 128 string | "magenta" +3 130 138 string | "yellow" +3 140 147 string | "black" +2 109 149 array | ["cyan","magenta","yellow","black"] +1 073 153 object | {"colors":["cyan","magenta","yellow","black"],"desc":"CMYK"} +0 000 155 array | [{"colors":["red","green","blue"],"desc":"RGB"},{"colors":["cyan","magenta","yellow","black"],"desc":"CMYK"}] +``` + +### Options + +Opt | Description +--- | --- +-d \ | emit values at depth n. if n < 0, all values will be emitted +-kv | output inner key value pairs as newly formed objects +-v | output depth and offset details for each value +-h | display help dialog + +## Benchmarks + +Obligatory benchmarks performed on files with arrays of objects, where the decoded objects are to be extracted. + +Two file sizes are used -- regular (1.6mb, 1000 objects) and large (128mb, 100000 objects) + +input size | lib | MB/s | Allocated +--- | --- | --- | --- +regular | standard | 97 | 3.6MB +regular | jstream | 175 | 2.1MB +large | standard | 92 | 305MB +large | jstream | 404 | 69MB + +In a real world scenario, including initialization and reader overhead from varying blob sizes, performance can be expected as below: +jstream diff --git a/internal/s3select/jstream/decoder.go b/internal/s3select/jstream/decoder.go new file mode 100644 index 0000000000000..4b21fcf4e5e2f --- /dev/null +++ b/internal/s3select/jstream/decoder.go @@ -0,0 +1,675 @@ +package jstream + +import ( + "bytes" + "encoding/json" + "io" + "strconv" + "sync/atomic" + "unicode/utf16" +) + +// ValueType - defines the type of each JSON value +type ValueType int + +// Different types of JSON value +const ( + Unknown ValueType = iota + Null + String + Number + Boolean + Array + Object +) + +// MetaValue wraps a decoded interface value with the document +// position and depth at which the value was parsed +type MetaValue struct { + Offset int + Length int + Depth int + Value any + ValueType ValueType +} + +// KV contains a key and value pair parsed from a decoded object +type KV struct { + Key string `json:"key"` + Value any `json:"value"` +} + +// KVS - represents key values in an JSON object +type KVS []KV + +// MarshalJSON - implements converting a KVS datastructure into a JSON +// object with multiple keys and values. +func (kvs KVS) MarshalJSON() ([]byte, error) { + b := new(bytes.Buffer) + b.Write([]byte("{")) + for i, kv := range kvs { + b.Write([]byte("\"" + kv.Key + "\"" + ":")) + valBuf, err := json.Marshal(kv.Value) + if err != nil { + return nil, err + } + b.Write(valBuf) + if i < len(kvs)-1 { + b.Write([]byte(",")) + } + } + b.Write([]byte("}")) + return b.Bytes(), nil +} + +// Decoder wraps an io.Reader to provide incremental decoding of +// JSON values +type Decoder struct { + *scanner + emitDepth int + maxDepth int + emitKV bool + emitRecursive bool + objectAsKVS bool + + depth int + scratch *scratch + metaCh chan *MetaValue + err error + + // follow line position to add context to errors + lineNo int + lineStart int64 +} + +// NewDecoder creates new Decoder to read JSON values at the provided +// emitDepth from the provider io.Reader. +// If emitDepth is < 0, values at every depth will be emitted. +func NewDecoder(r io.Reader, emitDepth int) *Decoder { + d := &Decoder{ + scanner: newScanner(r), + emitDepth: emitDepth, + scratch: &scratch{data: make([]byte, 1024)}, + metaCh: make(chan *MetaValue, 128), + } + if emitDepth < 0 { + d.emitDepth = 0 + d.emitRecursive = true + } + return d +} + +// ObjectAsKVS - by default JSON returns map[string]interface{} this +// is usually fine in most cases, but when you need to preserve the +// input order its not a right data structure. To preserve input +// order please use this option. +func (d *Decoder) ObjectAsKVS() *Decoder { + d.objectAsKVS = true + return d +} + +// EmitKV enables emitting a jstream.KV struct when the items(s) parsed +// at configured emit depth are within a JSON object. By default, only +// the object values are emitted. +func (d *Decoder) EmitKV() *Decoder { + d.emitKV = true + return d +} + +// Recursive enables emitting all values at a depth higher than the +// configured emit depth; e.g. if an array is found at emit depth, all +// values within the array are emitted to the stream, then the array +// containing those values is emitted. +func (d *Decoder) Recursive() *Decoder { + d.emitRecursive = true + return d +} + +// Stream begins decoding from the underlying reader and returns a +// streaming MetaValue channel for JSON values at the configured emitDepth. +func (d *Decoder) Stream() chan *MetaValue { + go d.decode() + return d.metaCh +} + +// Pos returns the number of bytes consumed from the underlying reader +func (d *Decoder) Pos() int { return int(d.pos) } + +// Err returns the most recent decoder error if any, or nil +func (d *Decoder) Err() error { return d.err } + +// MaxDepth will set the maximum recursion depth. +// If the maximum depth is exceeded, ErrMaxDepth is returned. +// Less than or 0 means no limit (default). +func (d *Decoder) MaxDepth(n int) *Decoder { + d.maxDepth = n + return d +} + +// Decode parses the JSON-encoded data and returns an interface value +func (d *Decoder) decode() { + defer close(d.metaCh) + d.skipSpaces() + for d.remaining() > 0 { + _, err := d.emitAny() + if err != nil { + d.err = err + break + } + d.skipSpaces() + } +} + +func (d *Decoder) emitAny() (any, error) { + if d.pos >= atomic.LoadInt64(&d.end) { + return nil, d.mkError(ErrUnexpectedEOF) + } + offset := d.pos - 1 + i, t, err := d.any() + if d.willEmit() { + d.metaCh <- &MetaValue{ + Offset: int(offset), + Length: int(d.pos - offset), + Depth: d.depth, + Value: i, + ValueType: t, + } + } + return i, err +} + +// return whether, at the current depth, the value being decoded will +// be emitted to stream +func (d *Decoder) willEmit() bool { + if d.emitRecursive { + return d.depth >= d.emitDepth + } + return d.depth == d.emitDepth +} + +// any used to decode any valid JSON value, and returns an +// interface{} that holds the actual data +func (d *Decoder) any() (any, ValueType, error) { + c := d.cur() + + switch c { + case '"': + i, err := d.string() + return i, String, err + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + i, err := d.number() + return i, Number, err + case '-': + if c = d.next(); c < '0' || c > '9' { + return nil, Unknown, d.mkError(ErrSyntax, "in negative numeric literal") + } + n, err := d.number() + if err != nil { + return nil, Unknown, err + } + return -n, Number, nil + case 'f': + if d.remaining() < 4 { + return nil, Unknown, d.mkError(ErrUnexpectedEOF) + } + //nolint:gocritic + if d.next() == 'a' && d.next() == 'l' && d.next() == 's' && d.next() == 'e' { + return false, Boolean, nil + } + return nil, Unknown, d.mkError(ErrSyntax, "in literal false") + case 't': + if d.remaining() < 3 { + return nil, Unknown, d.mkError(ErrUnexpectedEOF) + } + //nolint:gocritic + if d.next() == 'r' && d.next() == 'u' && d.next() == 'e' { + return true, Boolean, nil + } + return nil, Unknown, d.mkError(ErrSyntax, "in literal true") + case 'n': + if d.remaining() < 3 { + return nil, Unknown, d.mkError(ErrUnexpectedEOF) + } + //nolint:gocritic + if d.next() == 'u' && d.next() == 'l' && d.next() == 'l' { + return nil, Null, nil + } + return nil, Unknown, d.mkError(ErrSyntax, "in literal null") + case '[': + i, err := d.array() + return i, Array, err + case '{': + var i any + var err error + if d.objectAsKVS { + i, err = d.objectOrdered() + } else { + i, err = d.object() + } + return i, Object, err + default: + return nil, Unknown, d.mkError(ErrSyntax, "looking for beginning of value") + } +} + +// string called by `any` or `object`(for map keys) after reading `"` +func (d *Decoder) string() (string, error) { + d.scratch.reset() + c := d.next() + +scan: + for { + switch { + case c == '"': + return string(d.scratch.bytes()), nil + case c == '\\': + c = d.next() + goto scan_esc + case c < 0x20: + return "", d.mkError(ErrSyntax, "in string literal") + // Coerce to well-formed UTF-8. + default: + d.scratch.add(c) + if d.remaining() == 0 { + return "", d.mkError(ErrSyntax, "in string literal") + } + c = d.next() + } + } + +scan_esc: + switch c { + case '"', '\\', '/', '\'': + d.scratch.add(c) + case 'u': + goto scan_u + case 'b': + d.scratch.add('\b') + case 'f': + d.scratch.add('\f') + case 'n': + d.scratch.add('\n') + case 'r': + d.scratch.add('\r') + case 't': + d.scratch.add('\t') + default: + return "", d.mkError(ErrSyntax, "in string escape code") + } + c = d.next() + goto scan + +scan_u: + r := d.u4() + if r < 0 { + return "", d.mkError(ErrSyntax, "in unicode escape sequence") + } + + // check for proceeding surrogate pair + c = d.next() + if !utf16.IsSurrogate(r) || c != '\\' { + d.scratch.addRune(r) + goto scan + } + if c = d.next(); c != 'u' { + d.scratch.addRune(r) + goto scan_esc + } + + r2 := d.u4() + if r2 < 0 { + return "", d.mkError(ErrSyntax, "in unicode escape sequence") + } + + // write surrogate pair + d.scratch.addRune(utf16.DecodeRune(r, r2)) + c = d.next() + goto scan +} + +// u4 reads four bytes following a \u escape +func (d *Decoder) u4() rune { + // logic taken from: + // github.com/buger/jsonparser/blob/master/escape.go#L20 + var h [4]int + for i := range 4 { + c := d.next() + switch { + case c >= '0' && c <= '9': + h[i] = int(c - '0') + case c >= 'A' && c <= 'F': + h[i] = int(c - 'A' + 10) + case c >= 'a' && c <= 'f': + h[i] = int(c - 'a' + 10) + default: + return -1 + } + } + return rune(h[0]<<12 + h[1]<<8 + h[2]<<4 + h[3]) +} + +// number called by `any` after reading number between 0 to 9 +func (d *Decoder) number() (float64, error) { + d.scratch.reset() + + var ( + c = d.cur() + n float64 + isFloat bool + ) + + // digits first + switch { + case c == '0': + d.scratch.add(c) + c = d.next() + case '1' <= c && c <= '9': + for ; c >= '0' && c <= '9'; c = d.next() { + n = 10*n + float64(c-'0') + d.scratch.add(c) + } + } + + // . followed by 1 or more digits + if c == '.' { + isFloat = true + d.scratch.add(c) + + // first char following must be digit + if c = d.next(); c < '0' || c > '9' { + return 0, d.mkError(ErrSyntax, "after decimal point in numeric literal") + } + d.scratch.add(c) + + for { + if d.remaining() == 0 { + return 0, d.mkError(ErrUnexpectedEOF) + } + if c = d.next(); c < '0' || c > '9' { + break + } + d.scratch.add(c) + } + } + + // e or E followed by an optional - or + and + // 1 or more digits. + if c == 'e' || c == 'E' { + isFloat = true + d.scratch.add(c) + + if c = d.next(); c == '+' || c == '-' { + d.scratch.add(c) + if c = d.next(); c < '0' || c > '9' { + return 0, d.mkError(ErrSyntax, "in exponent of numeric literal") + } + d.scratch.add(c) + } + for ; c >= '0' && c <= '9'; c = d.next() { + d.scratch.add(c) + } + } + + if isFloat { + var ( + err error + sn string + ) + sn = string(d.scratch.bytes()) + if n, err = strconv.ParseFloat(sn, 64); err != nil { + return 0, err + } + } + + d.back() + return n, nil +} + +// array accept valid JSON array value +func (d *Decoder) array() ([]any, error) { + d.depth++ + if d.maxDepth > 0 && d.depth > d.maxDepth { + return nil, ErrMaxDepth + } + + var ( + c byte + v any + err error + array = make([]any, 0) + ) + + // look ahead for ] - if the array is empty. + if c = d.skipSpaces(); c == ']' { + goto out + } + +scan: + if v, err = d.emitAny(); err != nil { + goto out + } + + if d.depth > d.emitDepth { // skip alloc for array if it won't be emitted + array = append(array, v) + } + + // next token must be ',' or ']' + switch c = d.skipSpaces(); c { + case ',': + d.skipSpaces() + goto scan + case ']': + goto out + default: + err = d.mkError(ErrSyntax, "after array element") + } + +out: + d.depth-- + return array, err +} + +// object accept valid JSON array value +func (d *Decoder) object() (map[string]any, error) { + d.depth++ + if d.maxDepth > 0 && d.depth > d.maxDepth { + return nil, ErrMaxDepth + } + + var ( + c byte + k string + v any + t ValueType + err error + obj map[string]any + ) + + // skip allocating map if it will not be emitted + if d.depth > d.emitDepth { + obj = make(map[string]any) + } + + // if the object has no keys + if c = d.skipSpaces(); c == '}' { + goto out + } + +scan: + for { + offset := d.pos - 1 + + // read string key + if c != '"' { + err = d.mkError(ErrSyntax, "looking for beginning of object key string") + break + } + if k, err = d.string(); err != nil { + break + } + + // read colon before value + if c = d.skipSpaces(); c != ':' { + err = d.mkError(ErrSyntax, "after object key") + break + } + + // read value + d.skipSpaces() + if d.emitKV { + if v, t, err = d.any(); err != nil { + break + } + if d.willEmit() { + d.metaCh <- &MetaValue{ + Offset: int(offset), + Length: int(d.pos - offset), + Depth: d.depth, + Value: KV{k, v}, + ValueType: t, + } + } + } else { + if v, err = d.emitAny(); err != nil { + break + } + } + + if obj != nil { + obj[k] = v + } + + // next token must be ',' or '}' + switch c = d.skipSpaces(); c { + case '}': + goto out + case ',': + c = d.skipSpaces() + goto scan + default: + err = d.mkError(ErrSyntax, "after object key:value pair") + goto out + } + } + +out: + d.depth-- + return obj, err +} + +// object (ordered) accept valid JSON array value +func (d *Decoder) objectOrdered() (KVS, error) { + d.depth++ + if d.maxDepth > 0 && d.depth > d.maxDepth { + return nil, ErrMaxDepth + } + + var ( + c byte + k string + v any + t ValueType + err error + obj KVS + ) + + // skip allocating map if it will not be emitted + if d.depth > d.emitDepth { + obj = make(KVS, 0) + } + + // if the object has no keys + if c = d.skipSpaces(); c == '}' { + goto out + } + +scan: + for { + offset := d.pos - 1 + + // read string key + if c != '"' { + err = d.mkError(ErrSyntax, "looking for beginning of object key string") + break + } + if k, err = d.string(); err != nil { + break + } + + // read colon before value + if c = d.skipSpaces(); c != ':' { + err = d.mkError(ErrSyntax, "after object key") + break + } + + // read value + d.skipSpaces() + if d.emitKV { + if v, t, err = d.any(); err != nil { + break + } + if d.willEmit() { + d.metaCh <- &MetaValue{ + Offset: int(offset), + Length: int(d.pos - offset), + Depth: d.depth, + Value: KV{k, v}, + ValueType: t, + } + } + } else { + if v, err = d.emitAny(); err != nil { + break + } + } + + if obj != nil { + obj = append(obj, KV{k, v}) + } + + // next token must be ',' or '}' + switch c = d.skipSpaces(); c { + case '}': + goto out + case ',': + c = d.skipSpaces() + goto scan + default: + err = d.mkError(ErrSyntax, "after object key:value pair") + goto out + } + } + +out: + d.depth-- + return obj, err +} + +// returns the next char after white spaces +func (d *Decoder) skipSpaces() byte { + for d.pos < atomic.LoadInt64(&d.end) { + switch c := d.next(); c { + case '\n': + d.lineStart = d.pos + d.lineNo++ + continue + case ' ', '\t', '\r': + continue + default: + return c + } + } + return 0 +} + +// create syntax errors at current position, with optional context +func (d *Decoder) mkError(err DecoderError, context ...string) error { + if len(context) > 0 { + err.context = context[0] + } + err.atChar = d.cur() + err.pos[0] = d.lineNo + 1 + err.pos[1] = int(d.pos - d.lineStart) + err.readerErr = d.readerErr + return err +} diff --git a/internal/s3select/jstream/decoder_test.go b/internal/s3select/jstream/decoder_test.go new file mode 100644 index 0000000000000..8f6fd7aa10007 --- /dev/null +++ b/internal/s3select/jstream/decoder_test.go @@ -0,0 +1,276 @@ +package jstream + +import ( + "bytes" + "testing" +) + +func mkReader(s string) *bytes.Reader { return bytes.NewReader([]byte(s)) } + +func TestDecoderSimple(t *testing.T) { + var ( + counter int + mv *MetaValue + body = `[{"bio":"bada bing bada boom","id":1,"name":"Charles","falseVal":false}]` + ) + + decoder := NewDecoder(mkReader(body), 1) + + for mv = range decoder.Stream() { + counter++ + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } +} + +func TestDecoderNested(t *testing.T) { + var ( + counter int + mv *MetaValue + body = `{ + "1": { + "bio": "bada bing bada boom", + "id": 0, + "name": "Roberto", + "nested1": { + "bio": "utf16 surrogate (\ud834\udcb2)\n\u201cutf 8\u201d", + "id": 1.5, + "name": "Roberto*Maestro", + "nested2": { "nested2arr": [0,1,2], "nested3": { + "nested4": { "depth": "recursion" }} + } + } + }, + "2": { + "nullfield": null, + "id": -2 + } +}` + ) + + decoder := NewDecoder(mkReader(body), 2) + + for mv = range decoder.Stream() { + counter++ + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } +} + +func TestDecoderFlat(t *testing.T) { + var ( + counter int + mv *MetaValue + body = `[ + "1st test string", + "Roberto*Maestro", "Charles", + 0, null, false, + 1, 2.5 +]` + expected = []struct { + Value any + ValueType ValueType + }{ + { + "1st test string", + String, + }, + { + "Roberto*Maestro", + String, + }, + { + "Charles", + String, + }, + { + 0.0, + Number, + }, + { + nil, + Null, + }, + { + false, + Boolean, + }, + { + 1.0, + Number, + }, + { + 2.5, + Number, + }, + } + ) + + decoder := NewDecoder(mkReader(body), 1) + + for mv = range decoder.Stream() { + if mv.Value != expected[counter].Value { + t.Fatalf("got %v, expected: %v", mv.Value, expected[counter]) + } + if mv.ValueType != expected[counter].ValueType { + t.Fatalf("got %v value type, expected: %v value type", mv.ValueType, expected[counter].ValueType) + } + counter++ + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } +} + +func TestDecoderMultiDoc(t *testing.T) { + var ( + counter int + mv *MetaValue + body = `{ "bio": "bada bing bada boom", "id": 1, "name": "Charles" } +{ "bio": "bada bing bada boom", "id": 2, "name": "Charles" } +{ "bio": "bada bing bada boom", "id": 3, "name": "Charles" } +{ "bio": "bada bing bada boom", "id": 4, "name": "Charles" } +{ "bio": "bada bing bada boom", "id": 5, "name": "Charles" } +` + ) + + decoder := NewDecoder(mkReader(body), 0) + + for mv = range decoder.Stream() { + if mv.ValueType != Object { + t.Fatalf("got %v value type, expected: Object value type", mv.ValueType) + } + counter++ + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } + if counter != 5 { + t.Fatalf("expected 5 items, got %d", counter) + } + + // test at depth level 1 + counter = 0 + kvcounter := 0 + decoder = NewDecoder(mkReader(body), 1) + + for mv = range decoder.Stream() { + switch mv.Value.(type) { + case KV: + kvcounter++ + default: + counter++ + } + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } + if kvcounter != 0 { + t.Fatalf("expected 0 keyvalue items, got %d", kvcounter) + } + if counter != 15 { + t.Fatalf("expected 15 items, got %d", counter) + } + + // test at depth level 1 w/ emitKV + counter = 0 + kvcounter = 0 + decoder = NewDecoder(mkReader(body), 1).EmitKV() + + for mv = range decoder.Stream() { + switch mv.Value.(type) { + case KV: + kvcounter++ + default: + counter++ + } + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + if err := decoder.Err(); err != nil { + t.Fatalf("decoder error: %s", err) + } + if kvcounter != 15 { + t.Fatalf("expected 15 keyvalue items, got %d", kvcounter) + } + if counter != 0 { + t.Fatalf("expected 0 items, got %d", counter) + } +} + +func TestDecoderReaderFailure(t *testing.T) { + var ( + failAfter = 900 + mockData = byte('[') + ) + + r := newMockReader(failAfter, mockData) + decoder := NewDecoder(r, -1) + + for mv := range decoder.Stream() { + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + + err := decoder.Err() + t.Logf("got error: %s", err) + if err == nil { + t.Fatalf("missing expected decoder error") + } + + derr, ok := err.(DecoderError) + if !ok { + t.Fatalf("expected error of type DecoderError, got %T", err) + } + + if derr.ReaderErr() == nil { + t.Fatalf("missing expected underlying reader error") + } +} + +func TestDecoderMaxDepth(t *testing.T) { + tests := []struct { + input string + maxDepth int + mustFail bool + }{ + // No limit + {input: `[{"bio":"bada bing bada boom","id":1,"name":"Charles","falseVal":false}]`, maxDepth: 0, mustFail: false}, + // Array + object = depth 2 = false + {input: `[{"bio":"bada bing bada boom","id":1,"name":"Charles","falseVal":false}]`, maxDepth: 1, mustFail: true}, + // Depth 2 = ok + {input: `[{"bio":"bada bing bada boom","id":1,"name":"Charles","falseVal":false}]`, maxDepth: 2, mustFail: false}, + // Arrays: + {input: `[[[[[[[[[[[[[[[[[[[[[["ok"]]]]]]]]]]]]]]]]]]]]]]`, maxDepth: 2, mustFail: true}, + {input: `[[[[[[[[[[[[[[[[[[[[[["ok"]]]]]]]]]]]]]]]]]]]]]]`, maxDepth: 10, mustFail: true}, + {input: `[[[[[[[[[[[[[[[[[[[[[["ok"]]]]]]]]]]]]]]]]]]]]]]`, maxDepth: 100, mustFail: false}, + // Objects: + {input: `{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"ok":false}}}}}}}}}}}}}}}}}}}}}}`, maxDepth: 2, mustFail: true}, + {input: `{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"ok":false}}}}}}}}}}}}}}}}}}}}}}`, maxDepth: 10, mustFail: true}, + {input: `{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"a":{"ok":false}}}}}}}}}}}}}}}}}}}}}}`, maxDepth: 100, mustFail: false}, + } + + for _, test := range tests { + decoder := NewDecoder(mkReader(test.input), 0).MaxDepth(test.maxDepth) + var mv *MetaValue + for mv = range decoder.Stream() { + t.Logf("depth=%d offset=%d len=%d (%v)", mv.Depth, mv.Offset, mv.Length, mv.Value) + } + + err := decoder.Err() + if test.mustFail && err != ErrMaxDepth { + t.Fatalf("missing expected decoder error, got %q", err) + } + if !test.mustFail && err != nil { + t.Fatalf("unexpected error: %q", err) + } + } +} diff --git a/internal/s3select/jstream/errors.go b/internal/s3select/jstream/errors.go new file mode 100644 index 0000000000000..52a0e5f6292e0 --- /dev/null +++ b/internal/s3select/jstream/errors.go @@ -0,0 +1,52 @@ +package jstream + +import ( + "fmt" + "strconv" +) + +// Predefined errors +var ( + ErrSyntax = DecoderError{msg: "invalid character"} + ErrUnexpectedEOF = DecoderError{msg: "unexpected end of JSON input"} + ErrMaxDepth = DecoderError{msg: "maximum recursion depth exceeded"} +) + +type errPos [2]int // line number, byte offset where error occurred + +// DecoderError contains a detailed decoding error. +type DecoderError struct { + msg string // description of error + context string // additional error context + pos errPos + atChar byte + readerErr error // underlying reader error, if any +} + +// ReaderErr returns the underlying error. +func (e DecoderError) ReaderErr() error { return e.readerErr } + +// Error returns a string representation of the error. +func (e DecoderError) Error() string { + loc := fmt.Sprintf("%s [%d,%d]", quoteChar(e.atChar), e.pos[0], e.pos[1]) + s := fmt.Sprintf("%s %s: %s", e.msg, e.context, loc) + if e.readerErr != nil { + s += "\nreader error: " + e.readerErr.Error() + } + return s +} + +// quoteChar formats c as a quoted character literal +func quoteChar(c byte) string { + // special cases - different from quoted strings + if c == '\'' { + return `'\''` + } + if c == '"' { + return `'"'` + } + + // use quoted string with different quotation marks + s := strconv.Quote(string(c)) + return "'" + s[1:len(s)-1] + "'" +} diff --git a/internal/s3select/jstream/scanner.go b/internal/s3select/jstream/scanner.go new file mode 100644 index 0000000000000..a8e5be7db5f7d --- /dev/null +++ b/internal/s3select/jstream/scanner.go @@ -0,0 +1,114 @@ +package jstream + +import ( + "io" + "sync/atomic" +) + +const ( + chunk = 4095 // ~4k + maxUint = ^uint(0) + maxInt = int64(maxUint >> 1) + nullByte = byte(0) +) + +type scanner struct { + pos int64 // position in reader + ipos int64 // internal buffer position + ifill int64 // internal buffer fill + end int64 + buf [chunk + 1]byte // internal buffer (with a lookback size of 1) + nbuf [chunk]byte // next internal buffer + fillReq chan struct{} + fillReady chan int64 + readerErr error // underlying reader error, if any +} + +func newScanner(r io.Reader) *scanner { + sr := &scanner{ + end: maxInt, + fillReq: make(chan struct{}), + fillReady: make(chan int64), + } + + go func() { + var rpos int64 // total bytes read into buffer + + defer func() { + atomic.StoreInt64(&sr.end, rpos) + close(sr.fillReady) + }() + + for range sr.fillReq { + scan: + n, err := r.Read(sr.nbuf[:]) + + if n == 0 { + switch err { + case io.EOF: // reader is exhausted + return + case nil: // no data and no error, retry fill + goto scan + default: // unexpected reader error + sr.readerErr = err + return + } + } + + rpos += int64(n) + sr.fillReady <- int64(n) + } + }() + + sr.fillReq <- struct{}{} // initial fill + + return sr +} + +// remaining returns the number of unread bytes +// if EOF for the underlying reader has not yet been found, +// maximum possible integer value will be returned +func (s *scanner) remaining() int64 { + if atomic.LoadInt64(&s.end) == maxInt { + return maxInt + } + return atomic.LoadInt64(&s.end) - s.pos +} + +// read byte at current position (without advancing) +func (s *scanner) cur() byte { return s.buf[s.ipos] } + +// read next byte +func (s *scanner) next() byte { + if s.pos >= atomic.LoadInt64(&s.end) { + return nullByte + } + s.ipos++ + + if s.ipos > s.ifill { // internal buffer is exhausted + s.ifill = <-s.fillReady + + s.buf[0] = s.buf[len(s.buf)-1] // copy current last item to guarantee lookback + copy(s.buf[1:], s.nbuf[:]) // copy contents of pre-filled next buffer + s.ipos = 1 // move to beginning of internal buffer + + // request next fill to be prepared + if s.end == maxInt { + s.fillReq <- struct{}{} + } + } + + s.pos++ + return s.buf[s.ipos] +} + +// back undoes a previous call to next(), moving backward one byte in the internal buffer. +// as we only guarantee a lookback buffer size of one, any subsequent calls to back() +// before calling next() may panic +func (s *scanner) back() { + if s.ipos <= 0 { + panic("back buffer exhausted") + } + s.ipos-- + s.pos-- +} diff --git a/internal/s3select/jstream/scanner_test.go b/internal/s3select/jstream/scanner_test.go new file mode 100644 index 0000000000000..374a75dfe81d8 --- /dev/null +++ b/internal/s3select/jstream/scanner_test.go @@ -0,0 +1,171 @@ +package jstream + +import ( + "bufio" + "bytes" + "fmt" + "io" + "sync/atomic" + "testing" +) + +var ( + smallInput = make([]byte, 1024*12) // 12K + mediumInput = make([]byte, 1024*1024*12) // 12MB + largeInput = make([]byte, 1024*1024*128) // 128MB +) + +func TestScanner(t *testing.T) { + t.Skip("Unstable test") + data := []byte("abcdefghijklmnopqrstuvwxyz0123456789") + + var i int + r := bytes.NewReader(data) + scanner := newScanner(r) + for scanner.pos < atomic.LoadInt64(&scanner.end) { + c := scanner.next() + if scanner.readerErr != nil { + t.Fatal(scanner.readerErr) + } + if c != data[i] { + t.Fatalf("expected %s, got %s", string(data[i]), string(c)) + } + t.Logf("pos=%d remaining=%d (%s)", i, r.Len(), string(c)) + i++ + } +} + +type mockReader struct { + pos int + mockData byte + failAfter int +} + +func newMockReader(failAfter int, data byte) *mockReader { + return &mockReader{0, data, failAfter} +} + +func (r *mockReader) Read(p []byte) (n int, err error) { + if r.pos >= r.failAfter { + return 0, fmt.Errorf("intentionally unexpected reader error") + } + r.pos++ + p[0] = r.mockData + return 1, nil +} + +func TestScannerFailure(t *testing.T) { + var ( + i int + failAfter = 900 + mockData = byte(32) + ) + + r := newMockReader(failAfter, mockData) + scanner := newScanner(r) + + for i < 1000 { + c := scanner.next() + if c == byte(0) { + break + } + if c != mockData { + t.Fatalf("expected \"%s\", got \"%s\"", string(mockData), string(c)) + } + i++ + } + c := scanner.next() + if scanner.readerErr == nil { + t.Fatalf("failed to receive expected error after %d bytes", failAfter) + } + if c != byte(0) { + t.Fatalf("expected null byte, got %v", c) + } +} + +func BenchmarkBufioScanner(b *testing.B) { + b.Run("small", func(b *testing.B) { + for b.Loop() { + benchmarkBufioScanner(smallInput) + } + }) + b.Run("medium", func(b *testing.B) { + for b.Loop() { + benchmarkBufioScanner(mediumInput) + } + }) + b.Run("large", func(b *testing.B) { + for b.Loop() { + benchmarkBufioScanner(largeInput) + } + }) +} + +func benchmarkBufioScanner(b []byte) { + s := bufio.NewScanner(bytes.NewReader(b)) + s.Split(bufio.ScanBytes) + for s.Scan() { + s.Bytes() + } +} + +func BenchmarkBufioReader(b *testing.B) { + b.Run("small", func(b *testing.B) { + for b.Loop() { + benchmarkBufioReader(smallInput) + } + }) + b.Run("medium", func(b *testing.B) { + for b.Loop() { + benchmarkBufioReader(mediumInput) + } + }) + b.Run("large", func(b *testing.B) { + for b.Loop() { + benchmarkBufioReader(largeInput) + } + }) +} + +func benchmarkBufioReader(b []byte) { + br := bufio.NewReader(bytes.NewReader(b)) +loop: + for { + _, err := br.ReadByte() + switch err { + case nil: + continue loop + case io.EOF: + break loop + default: + panic(err) + } + } +} + +func BenchmarkScanner(b *testing.B) { + b.Run("small", func(b *testing.B) { + for b.Loop() { + benchmarkScanner(smallInput) + } + }) + b.Run("medium", func(b *testing.B) { + for b.Loop() { + benchmarkScanner(mediumInput) + } + }) + b.Run("large", func(b *testing.B) { + for b.Loop() { + benchmarkScanner(largeInput) + } + }) +} + +func benchmarkScanner(b []byte) { + r := bytes.NewReader(b) + + scanner := newScanner(r) + for scanner.remaining() > 0 { + scanner.next() + } +} diff --git a/internal/s3select/jstream/scratch.go b/internal/s3select/jstream/scratch.go new file mode 100644 index 0000000000000..75bc6c435acfe --- /dev/null +++ b/internal/s3select/jstream/scratch.go @@ -0,0 +1,44 @@ +package jstream + +import ( + "unicode/utf8" +) + +type scratch struct { + data []byte + fill int +} + +// reset scratch buffer +func (s *scratch) reset() { s.fill = 0 } + +// bytes returns the written contents of scratch buffer +func (s *scratch) bytes() []byte { return s.data[0:s.fill] } + +// grow scratch buffer +func (s *scratch) grow() { + ndata := make([]byte, cap(s.data)*2) + copy(ndata, s.data) + s.data = ndata +} + +// append single byte to scratch buffer +func (s *scratch) add(c byte) { + if s.fill+1 >= cap(s.data) { + s.grow() + } + + s.data[s.fill] = c + s.fill++ +} + +// append encoded rune to scratch buffer +func (s *scratch) addRune(r rune) int { + if s.fill+utf8.UTFMax >= cap(s.data) { + s.grow() + } + + n := utf8.EncodeRune(s.data[s.fill:], r) + s.fill += n + return n +} diff --git a/internal/s3select/message.go b/internal/s3select/message.go index 0f931dc92e846..e2ed15945d484 100644 --- a/internal/s3select/message.go +++ b/internal/s3select/message.go @@ -26,6 +26,8 @@ import ( "strconv" "sync/atomic" "time" + + xhttp "github.com/minio/minio/internal/http" ) // A message is in the format specified in @@ -262,7 +264,7 @@ func (writer *messageWriter) write(data []byte) bool { return false } - writer.writer.(http.Flusher).Flush() + xhttp.Flush(writer.writer) return true } diff --git a/internal/s3select/parquet/reader.go b/internal/s3select/parquet/reader.go index 7d27c3a35ca0d..29357ddc6d8e1 100644 --- a/internal/s3select/parquet/reader.go +++ b/internal/s3select/parquet/reader.go @@ -22,10 +22,10 @@ import ( "io" "time" - "github.com/bcicen/jstream" parquetgo "github.com/fraugster/parquet-go" parquettypes "github.com/fraugster/parquet-go/parquet" jsonfmt "github.com/minio/minio/internal/s3select/json" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" ) @@ -56,8 +56,7 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { kvs := jstream.KVS{} for _, col := range pr.r.Columns() { - - var value interface{} + var value any if v, ok := nextRow[col.FlatName()]; ok { value, err = convertFromAnnotation(col.Element(), v) if err != nil { @@ -81,12 +80,12 @@ func (pr *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { // annotations. LogicalType annotations if present override the deprecated // ConvertedType annotations. Ref: // https://github.com/apache/parquet-format/blob/master/LogicalTypes.md -func convertFromAnnotation(se *parquettypes.SchemaElement, v interface{}) (interface{}, error) { +func convertFromAnnotation(se *parquettypes.SchemaElement, v any) (any, error) { if se == nil { return v, nil } - var value interface{} + var value any switch val := v.(type) { case []byte: // TODO: only strings are supported in s3select output (not diff --git a/internal/s3select/progress.go b/internal/s3select/progress.go index a777b429f70e2..e052ef7f16cc6 100644 --- a/internal/s3select/progress.go +++ b/internal/s3select/progress.go @@ -30,7 +30,7 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) type countUpReader struct { diff --git a/internal/s3select/select.go b/internal/s3select/select.go index 404022d6190dd..a5dfc7b17cffb 100644 --- a/internal/s3select/select.go +++ b/internal/s3select/select.go @@ -32,6 +32,7 @@ import ( "github.com/klauspost/compress/s2" "github.com/klauspost/compress/zstd" gzip "github.com/klauspost/pgzip" + "github.com/minio/minio/internal/bpool" "github.com/minio/minio/internal/config" xioutil "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/s3select/csv" @@ -39,9 +40,9 @@ import ( "github.com/minio/minio/internal/s3select/parquet" "github.com/minio/minio/internal/s3select/simdj" "github.com/minio/minio/internal/s3select/sql" - "github.com/minio/pkg/v2/env" + "github.com/minio/pkg/v3/env" "github.com/minio/simdjson-go" - "github.com/pierrec/lz4" + "github.com/pierrec/lz4/v4" ) type recordReader interface { @@ -81,15 +82,15 @@ func init() { parquetSupport = env.Get("MINIO_API_SELECT_PARQUET", config.EnableOff) == config.EnableOn } -var bufPool = sync.Pool{ - New: func() interface{} { +var bufPool = bpool.Pool[*bytes.Buffer]{ + New: func() *bytes.Buffer { // make a buffer with a reasonable capacity. return bytes.NewBuffer(make([]byte, 0, maxRecordSize)) }, } -var bufioWriterPool = sync.Pool{ - New: func() interface{} { +var bufioWriterPool = bpool.Pool[*bufio.Writer]{ + New: func() *bufio.Writer { // io.Discard is just used to create the writer. Actual destination // writer is set later by Reset() before using it. return bufio.NewWriter(xioutil.Discard) @@ -409,7 +410,8 @@ func (s3Select *S3Select) Open(rsc io.ReadSeekCloser) error { gzip.ErrHeader, gzip.ErrChecksum, s2.ErrCorrupt, s2.ErrUnsupported, s2.ErrCRC, zstd.ErrBlockTooSmall, zstd.ErrMagicMismatch, zstd.ErrWindowSizeExceeded, zstd.ErrUnknownDictionary, zstd.ErrWindowSizeTooSmall, - lz4.ErrInvalid, lz4.ErrBlockDependency, + lz4.ErrInvalidFrame, lz4.ErrInvalidBlockChecksum, lz4.ErrInvalidFrameChecksum, lz4.ErrInvalidFrameChecksum, + lz4.ErrInvalidHeaderChecksum, lz4.ErrInvalidSourceShortBuffer, lz4.ErrInternalUnhandledState, } for _, e := range errs { if errors.Is(err, e) { @@ -442,6 +444,7 @@ func (s3Select *S3Select) Open(rsc io.ReadSeekCloser) error { s3Select.recordReader = json.NewPReader(s3Select.progressReader, &s3Select.Input.JSONArgs) } } else { + // Document mode. s3Select.recordReader = json.NewReader(s3Select.progressReader, &s3Select.Input.JSONArgs) } @@ -466,7 +469,7 @@ func (s3Select *S3Select) marshal(buf *bytes.Buffer, record sql.Record) error { switch s3Select.Output.format { case csvFormat: // Use bufio Writer to prevent csv.Writer from allocating a new buffer. - bufioWriter := bufioWriterPool.Get().(*bufio.Writer) + bufioWriter := bufioWriterPool.Get() defer func() { bufioWriter.Reset(xioutil.Discard) bufioWriterPool.Put(bufioWriter) @@ -528,7 +531,7 @@ func (s3Select *S3Select) Evaluate(w http.ResponseWriter) { } var err error sendRecord := func() bool { - buf := bufPool.Get().(*bytes.Buffer) + buf := bufPool.Get() buf.Reset() for _, outputRecord := range outputQueue { diff --git a/internal/s3select/select_benchmark_test.go b/internal/s3select/select_benchmark_test.go index d21c8325a1677..43e19e26895c8 100644 --- a/internal/s3select/select_benchmark_test.go +++ b/internal/s3select/select_benchmark_test.go @@ -46,7 +46,7 @@ func genSampleCSVData(count int) []byte { csvWriter := csv.NewWriter(buf) csvWriter.Write([]string{"id", "name", "age", "city"}) - for i := 0; i < count; i++ { + for i := range count { csvWriter.Write([]string{ strconv.Itoa(i), newRandString(10), diff --git a/internal/s3select/select_test.go b/internal/s3select/select_test.go index 1f0ef4d969d57..7a623900a0ed7 100644 --- a/internal/s3select/select_test.go +++ b/internal/s3select/select_test.go @@ -630,7 +630,7 @@ func TestJSONQueries(t *testing.T) { if len(testReq) == 0 { var escaped bytes.Buffer xml.EscapeText(&escaped, []byte(testCase.query)) - testReq = []byte(fmt.Sprintf(defRequest, escaped.String())) + testReq = fmt.Appendf(nil, defRequest, escaped.String()) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -676,7 +676,7 @@ func TestJSONQueries(t *testing.T) { if len(testReq) == 0 { var escaped bytes.Buffer xml.EscapeText(&escaped, []byte(testCase.query)) - testReq = []byte(fmt.Sprintf(defRequest, escaped.String())) + testReq = fmt.Appendf(nil, defRequest, escaped.String()) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -761,7 +761,7 @@ func TestCSVQueries(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -944,7 +944,7 @@ func TestCSVQueries2(t *testing.T) { t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { @@ -1088,7 +1088,7 @@ true`, t.Run(testCase.name, func(t *testing.T) { testReq := testCase.requestXML if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) + testReq = fmt.Appendf(nil, defRequest, testCase.query) } s3Select, err := NewS3Select(bytes.NewReader(testReq)) if err != nil { diff --git a/internal/s3select/simdj/reader_amd64_test.go b/internal/s3select/simdj/reader_amd64_test.go index f9a2bca49380c..8028496aff5ce 100644 --- a/internal/s3select/simdj/reader_amd64_test.go +++ b/internal/s3select/simdj/reader_amd64_test.go @@ -31,7 +31,7 @@ import ( ) type tester interface { - Fatal(args ...interface{}) + Fatal(args ...any) } func loadCompressed(t tester, file string) (js []byte) { diff --git a/internal/s3select/simdj/record.go b/internal/s3select/simdj/record.go index 3cf91de6f8c9b..6adb404c6ceb0 100644 --- a/internal/s3select/simdj/record.go +++ b/internal/s3select/simdj/record.go @@ -21,9 +21,9 @@ import ( "fmt" "io" - "github.com/bcicen/jstream" csv "github.com/minio/csvparser" "github.com/minio/minio/internal/s3select/json" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/minio/internal/s3select/sql" "github.com/minio/simdjson-go" ) @@ -185,7 +185,7 @@ allElems: } // Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { +func (r *Record) Raw() (sql.SelectObjectFormat, any) { return sql.SelectFmtSIMDJSON, r.object } @@ -211,7 +211,7 @@ func (r *Record) WriteJSON(writer io.Writer) error { } // Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { +func (r *Record) Replace(k any) error { v, ok := k.(simdjson.Object) if !ok { return fmt.Errorf("cannot replace internal data in simd json record with type %T", k) diff --git a/internal/s3select/sql/analysis.go b/internal/s3select/sql/analysis.go index 537fc729226a0..881d4e28906b0 100644 --- a/internal/s3select/sql/analysis.go +++ b/internal/s3select/sql/analysis.go @@ -79,7 +79,7 @@ func (e *SelectExpression) analyze(s *Select) (result qProp) { for _, ex := range e.Expressions { result.combine(ex.analyze(s)) } - return + return result } func (e *AliasedExpression) analyze(s *Select) qProp { @@ -90,14 +90,14 @@ func (e *Expression) analyze(s *Select) (result qProp) { for _, ac := range e.And { result.combine(ac.analyze(s)) } - return + return result } func (e *AndCondition) analyze(s *Select) (result qProp) { for _, ac := range e.Condition { result.combine(ac.analyze(s)) } - return + return result } func (e *Condition) analyze(s *Select) (result qProp) { @@ -106,14 +106,14 @@ func (e *Condition) analyze(s *Select) (result qProp) { } else { result = e.Not.analyze(s) } - return + return result } func (e *ListExpr) analyze(s *Select) (result qProp) { for _, ac := range e.Elements { result.combine(ac.analyze(s)) } - return + return result } func (e *ConditionOperand) analyze(s *Select) (result qProp) { @@ -123,7 +123,7 @@ func (e *ConditionOperand) analyze(s *Select) (result qProp) { result.combine(e.Operand.analyze(s)) result.combine(e.ConditionRHS.analyze(s)) } - return + return result } func (e *ConditionRHS) analyze(s *Select) (result qProp) { @@ -143,7 +143,7 @@ func (e *ConditionRHS) analyze(s *Select) (result qProp) { default: result = qProp{err: errUnexpectedInvalidNode} } - return + return result } func (e *In) analyze(s *Select) (result qProp) { @@ -153,7 +153,7 @@ func (e *In) analyze(s *Select) (result qProp) { if len(e.JPathExpr.PathExpr) > 0 { if e.JPathExpr.BaseKey.String() != s.From.As && !strings.EqualFold(e.JPathExpr.BaseKey.String(), baseTableName) { result = qProp{err: errInvalidKeypath} - return + return result } } result = qProp{isRowFunc: true} @@ -162,7 +162,7 @@ func (e *In) analyze(s *Select) (result qProp) { default: result = qProp{err: errUnexpectedInvalidNode} } - return + return result } func (e *Operand) analyze(s *Select) (result qProp) { @@ -170,7 +170,7 @@ func (e *Operand) analyze(s *Select) (result qProp) { for _, r := range e.Right { result.combine(r.Right.analyze(s)) } - return + return result } func (e *MultOp) analyze(s *Select) (result qProp) { @@ -178,7 +178,7 @@ func (e *MultOp) analyze(s *Select) (result qProp) { for _, r := range e.Right { result.combine(r.Right.analyze(s)) } - return + return result } func (e *UnaryTerm) analyze(s *Select) (result qProp) { @@ -187,7 +187,7 @@ func (e *UnaryTerm) analyze(s *Select) (result qProp) { } else { result = e.Primary.analyze(s) } - return + return result } func (e *PrimaryTerm) analyze(s *Select) (result qProp) { @@ -200,7 +200,7 @@ func (e *PrimaryTerm) analyze(s *Select) (result qProp) { if len(e.JPathExpr.PathExpr) > 0 { if e.JPathExpr.BaseKey.String() != s.From.As && !strings.EqualFold(e.JPathExpr.BaseKey.String(), baseTableName) { result = qProp{err: errInvalidKeypath} - return + return result } } result = qProp{isRowFunc: true} @@ -217,7 +217,7 @@ func (e *PrimaryTerm) analyze(s *Select) (result qProp) { default: result = qProp{err: errUnexpectedInvalidNode} } - return + return result } func (e *FuncExpr) analyze(s *Select) (result qProp) { diff --git a/internal/s3select/sql/evaluate.go b/internal/s3select/sql/evaluate.go index b09be3b5611d9..1cae1e92da6de 100644 --- a/internal/s3select/sql/evaluate.go +++ b/internal/s3select/sql/evaluate.go @@ -24,7 +24,7 @@ import ( "math" "strings" - "github.com/bcicen/jstream" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/simdjson-go" ) @@ -413,7 +413,7 @@ func (e *JSONPath) evalNode(r Record, tableAlias string) (*Value, error) { } // jsonToValue will convert the json value to an internal value. -func jsonToValue(result interface{}) (*Value, error) { +func jsonToValue(result any) (*Value, error) { switch rval := result.(type) { case string: return FromString(rval), nil @@ -434,7 +434,7 @@ func jsonToValue(result interface{}) (*Value, error) { return nil, err } return FromBytes(bs), nil - case []interface{}: + case []any: dst := make([]Value, len(rval)) for i := range rval { v, err := jsonToValue(rval[i]) diff --git a/internal/s3select/sql/funceval.go b/internal/s3select/sql/funceval.go index 26294abf0bca3..974130cc0feed 100644 --- a/internal/s3select/sql/funceval.go +++ b/internal/s3select/sql/funceval.go @@ -91,7 +91,7 @@ func (e *FuncExpr) evalSQLFnNode(r Record, tableAlias string) (res *Value, err e case sqlFnCast: expr := e.Cast.Expr res, err = expr.castTo(r, strings.ToUpper(e.Cast.CastType), tableAlias) - return + return res, err case sqlFnSubstring: return handleSQLSubstring(r, e.Substring, tableAlias) @@ -107,7 +107,6 @@ func (e *FuncExpr) evalSQLFnNode(r Record, tableAlias string) (res *Value, err e case sqlFnDateDiff: return handleDateDiff(r, e.DateDiff, tableAlias) - } // For all simple argument functions, we evaluate the arguments here diff --git a/internal/s3select/sql/jsonpath.go b/internal/s3select/sql/jsonpath.go index 7e20c4584e267..3b18f47a4c048 100644 --- a/internal/s3select/sql/jsonpath.go +++ b/internal/s3select/sql/jsonpath.go @@ -20,7 +20,7 @@ package sql import ( "errors" - "github.com/bcicen/jstream" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/simdjson-go" ) @@ -34,7 +34,7 @@ var ( // jsonpathEval evaluates a JSON path and returns the value at the path. // If the value should be considered flat (from wildcards) any array returned should be considered individual values. -func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool, err error) { +func jsonpathEval(p []*JSONPathElement, v any) (r any, flat bool, err error) { // fmt.Printf("JPATHexpr: %v jsonobj: %v\n\n", p, v) if len(p) == 0 || v == nil { return v, false, nil @@ -71,7 +71,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool case p[0].Index != nil: idx := *p[0].Index - arr, ok := v.([]interface{}) + arr, ok := v.([]any) if !ok { return nil, false, errIndexLookup } @@ -100,14 +100,14 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool } case p[0].ArrayWildcard: - arr, ok := v.([]interface{}) + arr, ok := v.([]any) if !ok { return nil, false, errWildcardArrayLookup } // Lookup remainder of path in each array element and // make result array. - var result []interface{} + var result []any for _, a := range arr { rval, flatten, err := jsonpathEval(p[1:], a) if err != nil { @@ -116,7 +116,7 @@ func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool if flatten { // Flatten if array. - if arr, ok := rval.([]interface{}); ok { + if arr, ok := rval.([]any); ok { result = append(result, arr...) continue } diff --git a/internal/s3select/sql/jsonpath_test.go b/internal/s3select/sql/jsonpath_test.go index b043613456422..bbe9ea68596da 100644 --- a/internal/s3select/sql/jsonpath_test.go +++ b/internal/s3select/sql/jsonpath_test.go @@ -27,12 +27,12 @@ import ( "testing" "github.com/alecthomas/participle" - "github.com/bcicen/jstream" + "github.com/minio/minio/internal/s3select/jstream" ) -func getJSONStructs(b []byte) ([]interface{}, error) { - dec := jstream.NewDecoder(bytes.NewBuffer(b), 0).ObjectAsKVS() - var result []interface{} +func getJSONStructs(b []byte) ([]any, error) { + dec := jstream.NewDecoder(bytes.NewBuffer(b), 0).ObjectAsKVS().MaxDepth(100) + var result []any for parsedVal := range dec.Stream() { result = append(result, parsedVal.Value) } @@ -60,13 +60,13 @@ func TestJsonpathEval(t *testing.T) { ) cases := []struct { str string - res []interface{} + res []any }{ - {"s.title", []interface{}{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}}, - {"s.authorInfo.yearRange", []interface{}{[]interface{}{1890.0, 1976.0}, []interface{}{1920.0, 1992.0}, []interface{}{1881.0, 1975.0}}}, - {"s.authorInfo.name", []interface{}{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}}, - {"s.authorInfo.yearRange[0]", []interface{}{1890.0, 1920.0, 1881.0}}, - {"s.publicationHistory[0].pages", []interface{}{256.0, 336.0, Missing{}}}, + {"s.title", []any{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}}, + {"s.authorInfo.yearRange", []any{[]any{1890.0, 1976.0}, []any{1920.0, 1992.0}, []any{1881.0, 1975.0}}}, + {"s.authorInfo.name", []any{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}}, + {"s.authorInfo.yearRange[0]", []any{1890.0, 1920.0, 1881.0}}, + {"s.publicationHistory[0].pages", []any{256.0, 336.0, Missing{}}}, } for i, tc := range cases { t.Run(tc.str, func(t *testing.T) { diff --git a/internal/s3select/sql/record.go b/internal/s3select/sql/record.go index 925b6e95ea0de..2bc25df460120 100644 --- a/internal/s3select/sql/record.go +++ b/internal/s3select/sql/record.go @@ -63,16 +63,16 @@ type Record interface { Reset() // Returns underlying representation - Raw() (SelectObjectFormat, interface{}) + Raw() (SelectObjectFormat, any) // Replaces the underlying data - Replace(k interface{}) error + Replace(k any) error } // IterToValue converts a simdjson Iter to its underlying value. // Objects are returned as simdjson.Object // Arrays are returned as []interface{} with parsed values. -func IterToValue(iter simdjson.Iter) (interface{}, error) { +func IterToValue(iter simdjson.Iter) (any, error) { switch iter.Type() { case simdjson.TypeString: v, err := iter.String() @@ -118,7 +118,7 @@ func IterToValue(iter simdjson.Iter) (interface{}, error) { return nil, err } iter := arr.Iter() - var dst []interface{} + var dst []any var next simdjson.Iter for { typ, err := iter.AdvanceIter(&next) diff --git a/internal/s3select/sql/statement.go b/internal/s3select/sql/statement.go index ce8bfd8b8dc32..fd08626cdce8e 100644 --- a/internal/s3select/sql/statement.go +++ b/internal/s3select/sql/statement.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/bcicen/jstream" + "github.com/minio/minio/internal/s3select/jstream" "github.com/minio/simdjson-go" ) @@ -57,7 +57,7 @@ func ParseSelectStatement(s string) (stmt SelectStatement, err error) { err = SQLParser.ParseString(s, &selectAST) if err != nil { err = errQueryParseFailure(err) - return + return stmt, err } // Check if select is "SELECT s.* from S3Object s" @@ -80,7 +80,7 @@ func ParseSelectStatement(s string) (stmt SelectStatement, err error) { stmt.limitValue, err = parseLimit(selectAST.Limit) if err != nil { err = errQueryAnalysisFailure(err) - return + return stmt, err } // Analyze where clause @@ -88,19 +88,19 @@ func ParseSelectStatement(s string) (stmt SelectStatement, err error) { whereQProp := selectAST.Where.analyze(&selectAST) if whereQProp.err != nil { err = errQueryAnalysisFailure(fmt.Errorf("Where clause error: %w", whereQProp.err)) - return + return stmt, err } if whereQProp.isAggregation { err = errQueryAnalysisFailure(errors.New("WHERE clause cannot have an aggregation")) - return + return stmt, err } } // Validate table name err = validateTableName(selectAST.From) if err != nil { - return + return stmt, err } // Analyze main select expression @@ -120,7 +120,7 @@ func ParseSelectStatement(s string) (stmt SelectStatement, err error) { } } } - return + return stmt, err } func validateTableName(from *TableExpression) error { @@ -174,7 +174,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro case jstream.KVS: kvs = v - case []interface{}: + case []any: recs := make([]*Record, len(v)) for i, val := range v { tmpRec := input.Clone(nil) @@ -207,7 +207,7 @@ func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, erro return nil, err } - case []interface{}: + case []any: recs := make([]*Record, len(v)) for i, val := range v { tmpRec := input.Clone(nil) diff --git a/internal/s3select/sql/stringfuncs.go b/internal/s3select/sql/stringfuncs.go index b6d24f5e8f35a..28abdf6afd51f 100644 --- a/internal/s3select/sql/stringfuncs.go +++ b/internal/s3select/sql/stringfuncs.go @@ -107,7 +107,6 @@ func evalSQLLike(text, pattern string, escape rune) (match bool, err error) { default: s = append(s, r) } - } if hasLeadingPercent { return strings.HasSuffix(text, string(s)), nil diff --git a/internal/s3select/sql/timestampfuncs.go b/internal/s3select/sql/timestampfuncs.go index 4622f992a6668..ef7022f3a94ad 100644 --- a/internal/s3select/sql/timestampfuncs.go +++ b/internal/s3select/sql/timestampfuncs.go @@ -46,7 +46,7 @@ func parseSQLTimestamp(s string) (t time.Time, err error) { break } } - return + return t, err } // FormatSQLTimestamp - returns the a string representation of the @@ -175,7 +175,6 @@ func dateDiff(timePart string, ts1, ts2 time.Time) (*Value, error) { seconds := duration / time.Second return FromInt(int64(seconds)), nil default: - } return nil, errNotImplemented } diff --git a/internal/s3select/sql/value.go b/internal/s3select/sql/value.go index 8a778bcf11ffc..b2f341cd2f2ef 100644 --- a/internal/s3select/sql/value.go +++ b/internal/s3select/sql/value.go @@ -46,7 +46,7 @@ var ( // the type may not be determined yet. In these cases, a byte-slice is // used. type Value struct { - value interface{} + value any } // Missing is used to indicate a non-existing value. @@ -175,13 +175,13 @@ func (v Value) ToFloat() (val float64, ok bool) { // ToInt returns the value if int. func (v Value) ToInt() (val int64, ok bool) { val, ok = v.value.(int64) - return + return val, ok } // ToString returns the value if string. func (v Value) ToString() (val string, ok bool) { val, ok = v.value.(string) - return + return val, ok } // Equals returns whether the values strictly match. @@ -220,25 +220,25 @@ func (v Value) SameTypeAs(b Value) (ok bool) { // conversion succeeded. func (v Value) ToBool() (val bool, ok bool) { val, ok = v.value.(bool) - return + return val, ok } // ToTimestamp returns the timestamp value if present. func (v Value) ToTimestamp() (t time.Time, ok bool) { t, ok = v.value.(time.Time) - return + return t, ok } // ToBytes returns the value if byte-slice. func (v Value) ToBytes() (val []byte, ok bool) { val, ok = v.value.([]byte) - return + return val, ok } // ToArray returns the value if it is a slice of values. func (v Value) ToArray() (val []Value, ok bool) { val, ok = v.value.([]Value) - return + return val, ok } // IsNull - checks if value is missing. @@ -393,7 +393,7 @@ func (v *Value) InferBytesType() (err error) { } // Fallback to string v.setString(asString) - return + return err } // When numeric types are compared, type promotions could happen. If @@ -663,8 +663,13 @@ func inferTypeForArithOp(a *Value) error { a.setFloat(f) return nil } - - err := fmt.Errorf("Could not convert %q to a number", string(a.value.([]byte))) + var s string + if v, ok := a.value.([]byte); ok { + s = string(v) + } else { + s = fmt.Sprint(a.value) + } + err := fmt.Errorf("Could not convert %q to a number", s) return errInvalidDataType(err) } diff --git a/internal/s3select/sql/value_test.go b/internal/s3select/sql/value_test.go index 4d77ad2abf96e..416d7409e549e 100644 --- a/internal/s3select/sql/value_test.go +++ b/internal/s3select/sql/value_test.go @@ -217,7 +217,7 @@ func TestValue_CSVString(t *testing.T) { func TestValue_bytesToInt(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string @@ -367,7 +367,7 @@ func TestValue_bytesToInt(t *testing.T) { func TestValue_bytesToFloat(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string @@ -569,7 +569,7 @@ func TestValue_bytesToFloat(t *testing.T) { func TestValue_bytesToBool(t *testing.T) { type fields struct { - value interface{} + value any } tests := []struct { name string diff --git a/internal/store/batch.go b/internal/store/batch.go index cba034f4b62d0..bb31135c9adfe 100644 --- a/internal/store/batch.go +++ b/internal/store/batch.go @@ -18,100 +18,123 @@ package store import ( + "context" "errors" - "fmt" "sync" + "time" ) // ErrBatchFull indicates that the batch is full var ErrBatchFull = errors.New("batch is full") -type key interface { - string | int | int64 -} +const defaultCommitTimeout = 30 * time.Second // Batch represents an ordered batch -type Batch[K key, T any] struct { - keys []K - items map[K]T - limit uint32 +type Batch[I any] struct { + items []I + limit uint32 + store Store[I] + quitCh chan struct{} sync.Mutex } +// BatchConfig represents the batch config +type BatchConfig[I any] struct { + Limit uint32 + Store Store[I] + CommitTimeout time.Duration + Log logger +} + // Add adds the item to the batch -func (b *Batch[K, T]) Add(key K, item T) error { +func (b *Batch[I]) Add(item I) error { b.Lock() defer b.Unlock() if b.isFull() { - return ErrBatchFull - } - - if _, ok := b.items[key]; !ok { - b.keys = append(b.keys, key) + if b.store == nil { + return ErrBatchFull + } + // commit batch to store + if err := b.commit(); err != nil { + return err + } } - b.items[key] = item + b.items = append(b.items, item) return nil } -// GetAll fetches the items and resets the batch -// Returned items are not referenced by the batch -func (b *Batch[K, T]) GetAll() (orderedKeys []K, orderedItems []T, err error) { +// Len returns the no of items in the batch +func (b *Batch[_]) Len() int { b.Lock() defer b.Unlock() - orderedKeys = append([]K(nil), b.keys...) - for _, key := range orderedKeys { - item, ok := b.items[key] - if !ok { - err = fmt.Errorf("item not found for the key: %v; should not happen;", key) - return - } - orderedItems = append(orderedItems, item) - delete(b.items, key) - } - - b.keys = b.keys[:0] - - return + return len(b.items) } -// GetByKey will get the batch item by the provided key -func (b *Batch[K, T]) GetByKey(key K) (T, bool) { - b.Lock() - defer b.Unlock() - - item, ok := b.items[key] - return item, ok +func (b *Batch[_]) isFull() bool { + return len(b.items) >= int(b.limit) } -// Len returns the no of items in the batch -func (b *Batch[K, T]) Len() int { - b.Lock() - defer b.Unlock() - - return len(b.keys) +func (b *Batch[I]) commit() error { + switch len(b.items) { + case 0: + return nil + case 1: + _, err := b.store.Put(b.items[0]) + return err + default: + } + if _, err := b.store.PutMultiple(b.items); err != nil { + return err + } + b.items = make([]I, 0, b.limit) + return nil } -// IsFull checks if the batch is full or not -func (b *Batch[K, T]) IsFull() bool { +// Close commits the pending items and quits the goroutines +func (b *Batch[I]) Close() error { + defer func() { + close(b.quitCh) + }() + b.Lock() defer b.Unlock() - - return b.isFull() -} - -func (b *Batch[K, T]) isFull() bool { - return len(b.items) >= int(b.limit) + return b.commit() } // NewBatch creates a new batch -func NewBatch[K key, T any](limit uint32) *Batch[K, T] { - return &Batch[K, T]{ - keys: make([]K, 0, limit), - items: make(map[K]T, limit), - limit: limit, +func NewBatch[I any](config BatchConfig[I]) *Batch[I] { + if config.CommitTimeout == 0 { + config.CommitTimeout = defaultCommitTimeout + } + quitCh := make(chan struct{}) + batch := &Batch[I]{ + items: make([]I, 0, config.Limit), + limit: config.Limit, + store: config.Store, + quitCh: quitCh, + } + if batch.store != nil { + go func() { + commitTicker := time.NewTicker(config.CommitTimeout) + defer commitTicker.Stop() + for { + select { + case <-commitTicker.C: + case <-batch.quitCh: + return + } + batch.Lock() + err := batch.commit() + batch.Unlock() + if err != nil { + config.Log(context.Background(), err, "") + } + } + }() } + return batch } diff --git a/internal/store/batch_test.go b/internal/store/batch_test.go index db8b95dc8f1f3..4754e9a93744f 100644 --- a/internal/store/batch_test.go +++ b/internal/store/batch_test.go @@ -18,109 +18,202 @@ package store import ( - "errors" + "context" "sync" "testing" + "time" ) -func TestBatch(t *testing.T) { +func TestBatchCommit(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 100) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) + } + var limit uint32 = 100 - batch := NewBatch[int, int](limit) + + batch := NewBatch[TestItem](BatchConfig[TestItem]{ + Limit: limit, + Store: store, + CommitTimeout: 5 * time.Minute, + Log: func(ctx context.Context, err error, id string, errKind ...any) { + t.Log(err) + }, + }) + defer batch.Close() + for i := 0; i < int(limit); i++ { - if err := batch.Add(i, i); err != nil { + if err := batch.Add(testItem); err != nil { t.Fatalf("failed to add %v; %v", i, err) } - if _, ok := batch.GetByKey(i); !ok { - t.Fatalf("failed to get the item by key %v after adding", i) - } - } - err := batch.Add(101, 101) - if err == nil || !errors.Is(err, ErrBatchFull) { - t.Fatalf("Expected err %v but got %v", ErrBatchFull, err) - } - if !batch.IsFull() { - t.Fatal("Expected batch.IsFull to be true but got false") } + batchLen := batch.Len() if batchLen != int(limit) { - t.Fatalf("expected batch length to be %v but got %v", limit, batchLen) - } - keys, items, err := batch.GetAll() - if err != nil { - t.Fatalf("unable to get the items from the batch; %v", err) + t.Fatalf("Expected batch.Len() %v; but got %v", limit, batchLen) } - if len(items) != int(limit) { - t.Fatalf("Expected length of the batch items to be %v but got %v", limit, len(items)) + + keys := store.List() + if len(keys) > 0 { + t.Fatalf("Expected empty store list but got len(list) %v", len(keys)) } - if len(keys) != int(limit) { - t.Fatalf("Expected length of the batch keys to be %v but got %v", limit, len(items)) + if err := batch.Add(testItem); err != nil { + t.Fatalf("unable to add to the batch; %v", err) } batchLen = batch.Len() - if batchLen != 0 { - t.Fatalf("expected batch to be empty but still left with %d items", batchLen) + if batchLen != 1 { + t.Fatalf("expected batch length to be 1 but got %v", batchLen) } - // Add duplicate entries - for i := 0; i < 10; i++ { - if err := batch.Add(99, 99); err != nil { - t.Fatalf("failed to add duplicate item %v to batch after Get; %v", i, err) - } + keys = store.List() + if len(keys) != 1 { + t.Fatalf("expected len(store.List())=1; but got %v", len(keys)) } - if _, ok := batch.GetByKey(99); !ok { - t.Fatal("failed to get the duplicxate item by key '99' after adding") + key := keys[0] + if !key.Compress { + t.Fatal("expected key.Compress=true; but got false") } - keys, items, err = batch.GetAll() + if key.ItemCount != int(limit) { + t.Fatalf("expected key.ItemCount=%d; but got %v", limit, key.ItemCount) + } + items, err := store.GetMultiple(key) if err != nil { - t.Fatalf("unable to get the items from the batch; %v", err) + t.Fatalf("unable to read key %v; %v", key.String(), err) } - if len(items) != 1 { - t.Fatalf("Expected length of the batch items to be 1 but got %v", len(items)) + if len(items) != int(limit) { + t.Fatalf("expected len(items)=%d; but got %v", limit, len(items)) } - if len(keys) != 1 { - t.Fatalf("Expected length of the batch keys to be 1 but got %v", len(items)) +} + +func TestBatchCommitOnExit(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 100) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) } - // try adding again after Get. + + var limit uint32 = 100 + + batch := NewBatch[TestItem](BatchConfig[TestItem]{ + Limit: limit, + Store: store, + CommitTimeout: 5 * time.Minute, + Log: func(ctx context.Context, err error, id string, errKind ...any) { + t.Log([]any{err, id, errKind}...) + }, + }) + for i := 0; i < int(limit); i++ { - if err := batch.Add(i, i); err != nil { - t.Fatalf("failed to add item %v to batch after Get; %v", i, err) - } - if _, ok := batch.GetByKey(i); !ok { - t.Fatalf("failed to get the item by key %v after adding", i) + if err := batch.Add(testItem); err != nil { + t.Fatalf("failed to add %v; %v", i, err) } } + + batch.Close() + time.Sleep(1 * time.Second) + + batchLen := batch.Len() + if batchLen != 0 { + t.Fatalf("Expected batch.Len()=0; but got %v", batchLen) + } + + keys := store.List() + if len(keys) != 1 { + t.Fatalf("expected len(store.List())=1; but got %v", len(keys)) + } + + key := keys[0] + if !key.Compress { + t.Fatal("expected key.Compress=true; but got false") + } + if key.ItemCount != int(limit) { + t.Fatalf("expected key.ItemCount=%d; but got %v", limit, key.ItemCount) + } + items, err := store.GetMultiple(key) + if err != nil { + t.Fatalf("unable to read key %v; %v", key.String(), err) + } + if len(items) != int(limit) { + t.Fatalf("expected len(items)=%d; but got %v", limit, len(items)) + } } func TestBatchWithConcurrency(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 100) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) + } + var limit uint32 = 100 - batch := NewBatch[int, int](limit) + + batch := NewBatch[TestItem](BatchConfig[TestItem]{ + Limit: limit, + Store: store, + CommitTimeout: 5 * time.Minute, + Log: func(ctx context.Context, err error, id string, errKind ...any) { + t.Log(err) + }, + }) + defer batch.Close() var wg sync.WaitGroup for i := 0; i < int(limit); i++ { wg.Add(1) - go func(item int) { + go func(key int) { defer wg.Done() - if err := batch.Add(item, item); err != nil { - t.Errorf("failed to add item %v; %v", item, err) + if err := batch.Add(testItem); err != nil { + t.Errorf("failed to add item %v; %v", key, err) return } - if _, ok := batch.GetByKey(item); !ok { - t.Errorf("failed to get the item by key %v after adding", item) - } }(i) } wg.Wait() - keys, items, err := batch.GetAll() - if err != nil { - t.Fatalf("unable to get the items from the batch; %v", err) + batchLen := batch.Len() + if batchLen != int(limit) { + t.Fatalf("Expected batch.Len() %v; but got %v", limit, batchLen) } - if len(items) != int(limit) { - t.Fatalf("expected batch length %v but got %v", limit, len(items)) + + keys := store.List() + if len(keys) > 0 { + t.Fatalf("Expected empty store list but got len(list) %v", len(keys)) } - if len(keys) != int(limit) { - t.Fatalf("Expected length of the batch keys to be %v but got %v", limit, len(items)) + if err := batch.Add(testItem); err != nil { + t.Fatalf("unable to add to the batch; %v", err) } - batchLen := batch.Len() - if batchLen != 0 { - t.Fatalf("expected batch to be empty but still left with %d items", batchLen) + batchLen = batch.Len() + if batchLen != 1 { + t.Fatalf("expected batch length to be 1 but got %v", batchLen) + } + keys = store.List() + if len(keys) != 1 { + t.Fatalf("expected len(store.List())=1; but got %v", len(keys)) + } + key := keys[0] + if !key.Compress { + t.Fatal("expected key.Compress=true; but got false") + } + if key.ItemCount != int(limit) { + t.Fatalf("expected key.ItemCount=%d; but got %v", limit, key.ItemCount) + } + items, err := store.GetMultiple(key) + if err != nil { + t.Fatalf("unable to read key %v; %v", key.String(), err) + } + if len(items) != int(limit) { + t.Fatalf("expected len(items)=%d; but got %v", limit, len(items)) } } diff --git a/internal/store/queuestore.go b/internal/store/queuestore.go index a9aa79bfca59e..673e19c629dd9 100644 --- a/internal/store/queuestore.go +++ b/internal/store/queuestore.go @@ -18,21 +18,25 @@ package store import ( + "bytes" "encoding/json" "errors" "os" "path/filepath" "sort" - "strings" "sync" "time" "github.com/google/uuid" + jsoniter "github.com/json-iterator/go" + "github.com/klauspost/compress/s2" + "github.com/valyala/bytebufferpool" ) const ( defaultLimit = 100000 // Default store limit. defaultExt = ".unknown" + compressExt = ".snappy" ) // errLimitExceeded error is sent when the maximum limit is reached. @@ -80,106 +84,204 @@ func (store *QueueStore[_]) Open() error { return err } - // Truncate entries. - if uint64(len(files)) > store.entryLimit { - files = files[:store.entryLimit] - } for _, file := range files { if file.IsDir() { continue } - key := strings.TrimSuffix(file.Name(), store.fileExt) if fi, err := file.Info(); err == nil { - store.entries[key] = fi.ModTime().UnixNano() + store.entries[file.Name()] = fi.ModTime().UnixNano() } } return nil } +// Delete - Remove the store directory from disk +func (store *QueueStore[_]) Delete() error { + return os.Remove(store.directory) +} + +// PutMultiple - puts an item to the store. +func (store *QueueStore[I]) PutMultiple(items []I) (Key, error) { + // Generate a new UUID for the key. + uid, err := uuid.NewRandom() + if err != nil { + return Key{}, err + } + + store.Lock() + defer store.Unlock() + if uint64(len(store.entries)) >= store.entryLimit { + return Key{}, errLimitExceeded + } + key := Key{ + Name: uid.String(), + ItemCount: len(items), + Compress: true, + Extension: store.fileExt, + } + return key, store.multiWrite(key, items) +} + +// multiWrite - writes an item to the directory. +func (store *QueueStore[I]) multiWrite(key Key, items []I) (err error) { + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + enc := jsoniter.ConfigCompatibleWithStandardLibrary.NewEncoder(buf) + + for i := range items { + if err = enc.Encode(items[i]); err != nil { + return err + } + } + + path := filepath.Join(store.directory, key.String()) + if key.Compress { + err = os.WriteFile(path, s2.Encode(nil, buf.Bytes()), os.FileMode(0o770)) + } else { + err = os.WriteFile(path, buf.Bytes(), os.FileMode(0o770)) + } + + buf.Reset() + if err != nil { + return err + } + + // Increment the item count. + store.entries[key.String()] = time.Now().UnixNano() + + return err +} + // write - writes an item to the directory. -func (store *QueueStore[I]) write(key string, item I) error { - // Marshalls the item. +func (store *QueueStore[I]) write(key Key, item I) error { + // Marshals the item. eventData, err := json.Marshal(item) if err != nil { return err } + return store.writeBytes(key, eventData) +} - path := filepath.Join(store.directory, key+store.fileExt) - if err := os.WriteFile(path, eventData, os.FileMode(0o770)); err != nil { - return err +// writeBytes - writes bytes to the directory. +func (store *QueueStore[I]) writeBytes(key Key, b []byte) (err error) { + path := filepath.Join(store.directory, key.String()) + + if key.Compress { + err = os.WriteFile(path, s2.Encode(nil, b), os.FileMode(0o770)) + } else { + err = os.WriteFile(path, b, os.FileMode(0o770)) } + if err != nil { + return err + } // Increment the item count. - store.entries[key] = time.Now().UnixNano() - + store.entries[key.String()] = time.Now().UnixNano() return nil } // Put - puts an item to the store. -func (store *QueueStore[I]) Put(item I) error { +func (store *QueueStore[I]) Put(item I) (Key, error) { store.Lock() defer store.Unlock() if uint64(len(store.entries)) >= store.entryLimit { - return errLimitExceeded + return Key{}, errLimitExceeded } // Generate a new UUID for the key. - key, err := uuid.NewRandom() + uid, err := uuid.NewRandom() if err != nil { - return err + return Key{}, err } - return store.write(key.String(), item) + key := Key{ + Name: uid.String(), + Extension: store.fileExt, + ItemCount: 1, + } + return key, store.write(key, item) } -// Get - gets an item from the store. -func (store *QueueStore[I]) Get(key string) (item I, err error) { +// PutRaw - puts the raw bytes to the store +func (store *QueueStore[I]) PutRaw(b []byte) (Key, error) { + store.Lock() + defer store.Unlock() + if uint64(len(store.entries)) >= store.entryLimit { + return Key{}, errLimitExceeded + } + // Generate a new UUID for the key. + uid, err := uuid.NewRandom() + if err != nil { + return Key{}, err + } + key := Key{ + Name: uid.String(), + Extension: store.fileExt, + } + return key, store.writeBytes(key, b) +} + +// GetRaw - gets an item from the store. +func (store *QueueStore[I]) GetRaw(key Key) (raw []byte, err error) { store.RLock() defer func(store *QueueStore[I]) { store.RUnlock() - if err != nil { + if err != nil && !os.IsNotExist(err) { // Upon error we remove the entry. store.Del(key) } }(store) - var eventData []byte - eventData, err = os.ReadFile(filepath.Join(store.directory, key+store.fileExt)) + raw, err = os.ReadFile(filepath.Join(store.directory, key.String())) if err != nil { - return item, err + return raw, err } - if len(eventData) == 0 { - return item, os.ErrNotExist + if len(raw) == 0 { + return raw, os.ErrNotExist } - if err = json.Unmarshal(eventData, &item); err != nil { - return item, err + if key.Compress { + raw, err = s2.Decode(nil, raw) } - return item, nil + return raw, err } -// Del - Deletes an entry from the store. -func (store *QueueStore[_]) Del(key string) error { - store.Lock() - defer store.Unlock() - return store.del(key) +// Get - gets an item from the store. +func (store *QueueStore[I]) Get(key Key) (item I, err error) { + items, err := store.GetMultiple(key) + if err != nil { + return item, err + } + return items[0], nil } -// DelList - Deletes a list of entries from the store. -// Returns an error even if one key fails to be deleted. -func (store *QueueStore[_]) DelList(keys []string) error { - store.Lock() - defer store.Unlock() +// GetMultiple will read the multi payload file and fetch the items +func (store *QueueStore[I]) GetMultiple(key Key) (items []I, err error) { + raw, err := store.GetRaw(key) + if err != nil { + return nil, err + } - for _, key := range keys { - if err := store.del(key); err != nil { - return err + decoder := jsoniter.ConfigCompatibleWithStandardLibrary.NewDecoder(bytes.NewReader(raw)) + for decoder.More() { + var item I + if err := decoder.Decode(&item); err != nil { + return nil, err } + items = append(items, item) } - return nil + return items, err +} + +// Del - Deletes an entry from the store. +func (store *QueueStore[_]) Del(key Key) error { + store.Lock() + defer store.Unlock() + return store.del(key) } // Len returns the entry count. @@ -191,30 +293,35 @@ func (store *QueueStore[_]) Len() int { } // lockless call -func (store *QueueStore[_]) del(key string) error { - err := os.Remove(filepath.Join(store.directory, key+store.fileExt)) +func (store *QueueStore[_]) del(key Key) error { + err := os.Remove(filepath.Join(store.directory, key.String())) // Delete as entry no matter the result - delete(store.entries, key) + delete(store.entries, key.String()) return err } // List - lists all files registered in the store. -func (store *QueueStore[_]) List() ([]string, error) { +func (store *QueueStore[_]) List() (keys []Key) { store.RLock() - l := make([]string, 0, len(store.entries)) - for k := range store.entries { - l = append(l, k) + defer store.RUnlock() + + entries := make([]string, 0, len(store.entries)) + for entry := range store.entries { + entries = append(entries, entry) } // Sort entries... - sort.Slice(l, func(i, j int) bool { - return store.entries[l[i]] < store.entries[l[j]] + sort.Slice(entries, func(i, j int) bool { + return store.entries[entries[i]] < store.entries[entries[j]] }) - store.RUnlock() - return l, nil + for i := range entries { + keys = append(keys, parseKey(entries[i])) + } + + return keys } // list will read all entries from disk. @@ -241,9 +348,3 @@ func (store *QueueStore[_]) list() ([]os.DirEntry, error) { return files, nil } - -// Extension will return the file extension used -// for the items stored in the queue. -func (store *QueueStore[_]) Extension() string { - return store.fileExt -} diff --git a/internal/store/queuestore_test.go b/internal/store/queuestore_test.go index 680cb177bc13a..5211520c133b8 100644 --- a/internal/store/queuestore_test.go +++ b/internal/store/queuestore_test.go @@ -18,11 +18,15 @@ package store import ( + "bytes" + "fmt" "os" "path/filepath" "reflect" - "strings" "testing" + + jsoniter "github.com/json-iterator/go" + "github.com/valyala/bytebufferpool" ) type TestItem struct { @@ -65,18 +69,15 @@ func TestQueueStorePut(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 100 items. - for i := 0; i < 100; i++ { - if err := store.Put(testItem); err != nil { + for range 100 { + if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } } // Count the items. - names, err := store.List() - if err != nil { - t.Fatal(err) - } - if len(names) != 100 { - t.Fatalf("List() Expected: 100, got %d", len(names)) + keys := store.List() + if len(keys) != 100 { + t.Fatalf("List() Expected: 100, got %d", len(keys)) } } @@ -92,19 +93,16 @@ func TestQueueStoreGet(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 10 items - for i := 0; i < 10; i++ { - if err := store.Put(testItem); err != nil { + for range 10 { + if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } } - itemKeys, err := store.List() - if err != nil { - t.Fatal(err) - } + itemKeys := store.List() // Get 10 items. if len(itemKeys) == 10 { for _, key := range itemKeys { - item, eErr := store.Get(strings.TrimSuffix(key, testItemExt)) + item, eErr := store.Get(key) if eErr != nil { t.Fatal("Failed to Get the item from the queue store ", eErr) } @@ -129,19 +127,16 @@ func TestQueueStoreDel(t *testing.T) { t.Fatal("Failed to create a queue store ", err) } // Put 20 items. - for i := 0; i < 20; i++ { - if err := store.Put(testItem); err != nil { + for range 20 { + if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } } - itemKeys, err := store.List() - if err != nil { - t.Fatal(err) - } + itemKeys := store.List() // Remove all the items. if len(itemKeys) == 20 { for _, key := range itemKeys { - err := store.Del(strings.TrimSuffix(key, testItemExt)) + err := store.Del(key) if err != nil { t.Fatal("queue store Del failed with ", err) } @@ -150,12 +145,9 @@ func TestQueueStoreDel(t *testing.T) { t.Fatalf("List() Expected: 20, got %d", len(itemKeys)) } - names, err := store.List() - if err != nil { - t.Fatal(err) - } - if len(names) != 0 { - t.Fatalf("List() Expected: 0, got %d", len(names)) + keys := store.List() + if len(keys) != 0 { + t.Fatalf("List() Expected: 0, got %d", len(keys)) } } @@ -171,13 +163,13 @@ func TestQueueStoreLimit(t *testing.T) { if err != nil { t.Fatal("Failed to create a queue store ", err) } - for i := 0; i < 5; i++ { - if err := store.Put(testItem); err != nil { + for range 5 { + if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } } // Should not allow 6th Put. - if err := store.Put(testItem); err == nil { + if _, err := store.Put(testItem); err == nil { t.Fatalf("Expected to fail with %s, but passes", errLimitExceeded) } } @@ -193,19 +185,16 @@ func TestQueueStoreListN(t *testing.T) { if err != nil { t.Fatal("Failed to create a queue store ", err) } - for i := 0; i < 10; i++ { - if err := store.Put(testItem); err != nil { + for range 10 { + if _, err := store.Put(testItem); err != nil { t.Fatal("Failed to put to queue store ", err) } } // Should return all the item keys in the store. - names, err := store.List() - if err != nil { - t.Fatal(err) - } + keys := store.List() - if len(names) != 10 { - t.Fatalf("List() Expected: 10, got %d", len(names)) + if len(keys) != 10 { + t.Fatalf("List() Expected: 10, got %d", len(keys)) } // re-open @@ -213,28 +202,220 @@ func TestQueueStoreListN(t *testing.T) { if err != nil { t.Fatal("Failed to create a queue store ", err) } - names, err = store.List() - if err != nil { - t.Fatal(err) - } + keys = store.List() - if len(names) != 10 { - t.Fatalf("List() Expected: 10, got %d", len(names)) + if len(keys) != 10 { + t.Fatalf("List() Expected: 10, got %d", len(keys)) } - if len(names) != store.Len() { - t.Fatalf("List() Expected: 10, got %d", len(names)) + if len(keys) != store.Len() { + t.Fatalf("List() Expected: 10, got %d", len(keys)) } // Delete all - for _, key := range names { + for _, key := range keys { err := store.Del(key) if err != nil { t.Fatal(err) } } // Re-list - lst, err := store.List() - if len(lst) > 0 || err != nil { - t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", lst, err) + keys = store.List() + if len(keys) > 0 || err != nil { + t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", keys, err) + } +} + +func TestMultiplePutGetRaw(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 10) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) + } + // TestItem{Name: "test-item", Property: "property"} + var items []TestItem + for i := range 10 { + items = append(items, TestItem{ + Name: fmt.Sprintf("test-item-%d", i), + Property: "property", + }) + } + + buf := bytebufferpool.Get() + defer bytebufferpool.Put(buf) + + enc := jsoniter.ConfigCompatibleWithStandardLibrary.NewEncoder(buf) + for i := range items { + if err = enc.Encode(items[i]); err != nil { + t.Fatal(err) + } + } + + if _, err := store.PutMultiple(items); err != nil { + t.Fatalf("failed to put multiple; %v", err) + } + + keys := store.List() + if len(keys) != 1 { + t.Fatalf("expected len(keys)=1, but found %d", len(keys)) + } + + key := keys[0] + if !key.Compress { + t.Fatal("expected the item to be compressed") + } + if key.ItemCount != 10 { + t.Fatalf("expected itemcount=10 but found %v", key.ItemCount) + } + + raw, err := store.GetRaw(key) + if err != nil { + t.Fatalf("unable to get multiple items; %v", err) + } + + if !bytes.Equal(buf.Bytes(), raw) { + t.Fatalf("expected bytes: %d vs read bytes is wrong %d", len(buf.Bytes()), len(raw)) + } + + if err := store.Del(key); err != nil { + t.Fatalf("unable to Del; %v", err) + } + + // Re-list + keys = store.List() + if len(keys) > 0 || err != nil { + t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", keys, err) + } +} + +func TestMultiplePutGets(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 10) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) + } + // TestItem{Name: "test-item", Property: "property"} + var items []TestItem + for i := range 10 { + items = append(items, TestItem{ + Name: fmt.Sprintf("test-item-%d", i), + Property: "property", + }) + } + + if _, err := store.PutMultiple(items); err != nil { + t.Fatalf("failed to put multiple; %v", err) + } + + keys := store.List() + if len(keys) != 1 { + t.Fatalf("expected len(keys)=1, but found %d", len(keys)) + } + + key := keys[0] + if !key.Compress { + t.Fatal("expected the item to be compressed") + } + if key.ItemCount != 10 { + t.Fatalf("expected itemcount=10 but found %v", key.ItemCount) + } + + resultItems, err := store.GetMultiple(key) + if err != nil { + t.Fatalf("unable to get multiple items; %v", err) + } + + if !reflect.DeepEqual(resultItems, items) { + t.Fatalf("expected item list: %v; but got %v", items, resultItems) + } + + if err := store.Del(key); err != nil { + t.Fatalf("unable to Del; %v", err) + } + + // Re-list + keys = store.List() + if len(keys) > 0 || err != nil { + t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", keys, err) + } +} + +func TestMixedPutGets(t *testing.T) { + defer func() { + if err := tearDownQueueStore(); err != nil { + t.Fatalf("Failed to tear down store; %v", err) + } + }() + store, err := setUpQueueStore(queueDir, 10) + if err != nil { + t.Fatalf("Failed to create a queue store; %v", err) + } + // TestItem{Name: "test-item", Property: "property"} + var items []TestItem + for i := range 5 { + items = append(items, TestItem{ + Name: fmt.Sprintf("test-item-%d", i), + Property: "property", + }) + } + if _, err := store.PutMultiple(items); err != nil { + t.Fatalf("failed to put multiple; %v", err) + } + + for i := 5; i < 10; i++ { + item := TestItem{ + Name: fmt.Sprintf("test-item-%d", i), + Property: "property", + } + if _, err := store.Put(item); err != nil { + t.Fatalf("unable to store.Put(); %v", err) + } + items = append(items, item) + } + + keys := store.List() + if len(keys) != 6 { + // 1 multiple + 5 single PUTs + t.Fatalf("expected len(keys)=6, but found %d", len(keys)) + } + + var resultItems []TestItem + for _, key := range keys { + if key.ItemCount > 1 { + items, err := store.GetMultiple(key) + if err != nil { + t.Fatalf("unable to get multiple items; %v", err) + } + resultItems = append(resultItems, items...) + continue + } + item, err := store.Get(key) + if err != nil { + t.Fatalf("unable to get item; %v", err) + } + resultItems = append(resultItems, item) + } + + if !reflect.DeepEqual(resultItems, items) { + t.Fatalf("expected item list: %v; but got %v", items, resultItems) + } + + // Delete all + for _, key := range keys { + if err := store.Del(key); err != nil { + t.Fatalf("unable to Del; %v", err) + } + } + // Re-list + keys = store.List() + if len(keys) > 0 || err != nil { + t.Fatalf("Expected List() to return empty list and no error, got %v err: %v", keys, err) } } diff --git a/internal/store/store.go b/internal/store/store.go index 4f5b6a60e800c..eab1c875e075d 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -21,18 +21,18 @@ import ( "context" "errors" "fmt" + "strconv" "strings" "time" xioutil "github.com/minio/minio/internal/ioutil" - xnet "github.com/minio/pkg/v2/net" ) const ( retryInterval = 3 * time.Second ) -type logger = func(ctx context.Context, err error, id string, errKind ...interface{}) +type logger = func(ctx context.Context, err error, id string, errKind ...any) // ErrNotConnected - indicates that the target connection is not active. var ErrNotConnected = errors.New("not connected to target server/service") @@ -45,20 +45,64 @@ type Target interface { // Store - Used to persist items. type Store[I any] interface { - Put(item I) error - Get(key string) (I, error) + Put(item I) (Key, error) + PutMultiple(item []I) (Key, error) + Get(key Key) (I, error) + GetMultiple(key Key) ([]I, error) + GetRaw(key Key) ([]byte, error) + PutRaw(b []byte) (Key, error) Len() int - List() ([]string, error) - Del(key string) error - DelList(key []string) error + List() []Key + Del(key Key) error Open() error - Extension() string + Delete() error } // Key denotes the key present in the store. type Key struct { - Name string - IsLast bool + Name string + Compress bool + Extension string + ItemCount int +} + +// String returns the filepath name +func (k Key) String() string { + keyStr := k.Name + if k.ItemCount > 1 { + keyStr = fmt.Sprintf("%d:%s", k.ItemCount, k.Name) + } + return keyStr + k.Extension + func() string { + if k.Compress { + return compressExt + } + return "" + }() +} + +func getItemCount(k string) (count int, err error) { + count = 1 + v := strings.Split(k, ":") + if len(v) == 2 { + return strconv.Atoi(v[0]) + } + return count, err +} + +func parseKey(k string) (key Key) { + key.Name = k + if strings.HasSuffix(k, compressExt) { + key.Compress = true + key.Name = strings.TrimSuffix(key.Name, compressExt) + } + if key.ItemCount, _ = getItemCount(k); key.ItemCount > 1 { + key.Name = strings.TrimPrefix(key.Name, fmt.Sprintf("%d:", key.ItemCount)) + } + if vals := strings.Split(key.Name, "."); len(vals) == 2 { + key.Extension = "." + vals[1] + key.Name = strings.TrimSuffix(key.Name, key.Extension) + } + return key } // replayItems - Reads the items from the store and replays. @@ -72,18 +116,12 @@ func replayItems[I any](store Store[I], doneCh <-chan struct{}, log logger, id s defer retryTicker.Stop() for { - names, err := store.List() - if err != nil { - log(context.Background(), fmt.Errorf("store.List() failed with: %w", err), id) - } else { - keyCount := len(names) - for i, name := range names { - select { - case keyCh <- Key{strings.TrimSuffix(name, store.Extension()), keyCount == i+1}: - // Get next key. - case <-doneCh: - return - } + for _, key := range store.List() { + select { + case keyCh <- key: + // Get next key. + case <-doneCh: + return } } @@ -110,15 +148,14 @@ func sendItems(target Target, keyCh <-chan Key, doneCh <-chan struct{}, logger l break } - if err != ErrNotConnected && !xnet.IsConnResetErr(err) { - logger(context.Background(), - fmt.Errorf("target.SendFromStore() failed with '%w'", err), - target.Name()) - } - - // Retrying after 3secs back-off + logger( + context.Background(), + fmt.Errorf("unable to send log entry to '%s' err '%w'", target.Name(), err), + target.Name(), + ) select { + // Retrying after 3secs back-off case <-retryTicker.C: case <-doneCh: return false @@ -131,7 +168,6 @@ func sendItems(target Target, keyCh <-chan Key, doneCh <-chan struct{}, logger l select { case key, ok := <-keyCh: if !ok { - // closed channel. return } @@ -147,9 +183,7 @@ func sendItems(target Target, keyCh <-chan Key, doneCh <-chan struct{}, logger l // StreamItems reads the keys from the store and replays the corresponding item to the target. func StreamItems[I any](store Store[I], target Target, doneCh <-chan struct{}, logger logger) { go func() { - // Replays the items from the store. keyCh := replayItems(store, doneCh, logger, target.Name()) - // Send items from the store. sendItems(target, keyCh, doneCh, logger) }() } diff --git a/internal/store/store_test.go b/internal/store/store_test.go new file mode 100644 index 0000000000000..4c05f3d73b13e --- /dev/null +++ b/internal/store/store_test.go @@ -0,0 +1,146 @@ +// Copyright (c) 2015-2024 MinIO, Inc. +// +// This file is part of MinIO Object Storage stack +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with this program. If not, see . + +package store + +import ( + "testing" +) + +func TestKeyString(t *testing.T) { + testCases := []struct { + key Key + expectedString string + }{ + { + key: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + }, + expectedString: "01894394-d046-4783-ba0d-f1c6885790dc.event", + }, + { + key: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Compress: true, + Extension: ".event", + ItemCount: 100, + }, + expectedString: "100:01894394-d046-4783-ba0d-f1c6885790dc.event.snappy", + }, + { + key: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + ItemCount: 100, + }, + expectedString: "100:01894394-d046-4783-ba0d-f1c6885790dc.event", + }, + { + key: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Compress: true, + Extension: ".event", + ItemCount: 1, + }, + expectedString: "01894394-d046-4783-ba0d-f1c6885790dc.event.snappy", + }, + { + key: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + ItemCount: 1, + }, + expectedString: "01894394-d046-4783-ba0d-f1c6885790dc.event", + }, + } + + for i, testCase := range testCases { + if testCase.expectedString != testCase.key.String() { + t.Fatalf("case[%v]: key.String() Expected: %s, got %s", i, testCase.expectedString, testCase.key.String()) + } + } +} + +func TestParseKey(t *testing.T) { + testCases := []struct { + str string + expectedKey Key + }{ + { + str: "01894394-d046-4783-ba0d-f1c6885790dc.event", + expectedKey: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + ItemCount: 1, + }, + }, + { + str: "100:01894394-d046-4783-ba0d-f1c6885790dc.event.snappy", + expectedKey: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Compress: true, + Extension: ".event", + ItemCount: 100, + }, + }, + { + str: "100:01894394-d046-4783-ba0d-f1c6885790dc.event", + expectedKey: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + ItemCount: 100, + }, + }, + { + str: "01894394-d046-4783-ba0d-f1c6885790dc.event.snappy", + expectedKey: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Compress: true, + Extension: ".event", + ItemCount: 1, + }, + }, + { + str: "01894394-d046-4783-ba0d-f1c6885790dc.event", + expectedKey: Key{ + Name: "01894394-d046-4783-ba0d-f1c6885790dc", + Extension: ".event", + ItemCount: 1, + }, + }, + } + + for i, testCase := range testCases { + key := parseKey(testCase.str) + if testCase.expectedKey.Name != key.Name { + t.Fatalf("case[%v]: Expected key.Name: %v, got %v", i, testCase.expectedKey.Name, key.Name) + } + if testCase.expectedKey.Compress != key.Compress { + t.Fatalf("case[%v]: Expected key.Compress: %v, got %v", i, testCase.expectedKey.Compress, key.Compress) + } + if testCase.expectedKey.Extension != key.Extension { + t.Fatalf("case[%v]: Expected key.Extension: %v, got %v", i, testCase.expectedKey.Extension, key.Extension) + } + if testCase.expectedKey.ItemCount != key.ItemCount { + t.Fatalf("case[%v]: Expected key.ItemCount: %v, got %v", i, testCase.expectedKey.ItemCount, key.ItemCount) + } + if testCase.expectedKey.String() != key.String() { + t.Fatalf("case[%v]: Expected key.String(): %v, got %v", i, testCase.expectedKey.String(), key.String()) + } + } +} diff --git a/main.go b/main.go index 54fb454909b63..27b694d985323 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,8 @@ package main // import "github.com/minio/minio" +//go:generate go install tool + import ( "os"