From 8f4a7e1f92975fbd1536a569d7aad6800809ef4e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 31 Jan 2023 05:27:19 +0800 Subject: [PATCH 001/439] update the relate date of v1.3.7 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index d0026b376..c13749df4 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,7 +2,7 @@ Note that we start to track changes starting from v1.3.7.
-## v1.3.7(TBD) +## v1.3.7(2023-01-31) ### BoltDB - Add [recursive checker to confirm database consistency](https://github.com/etcd-io/bbolt/pull/225). @@ -39,4 +39,4 @@ Other changes focused on defense-in-depth ([#358](https://github.com/etcd-io/bbo `bbolt` command line tool was expanded to: - allow fixing simple corruptions by `bbolt surgery` ([#370](https://github.com/etcd-io/bbolt/pull/370)) - be flexible about output formatting ([#306](https://github.com/etcd-io/bbolt/pull/306), [#359](https://github.com/etcd-io/bbolt/pull/359)) -- allow accessing data in subbuckets ([#295](https://github.com/etcd-io/bbolt/pull/295)) \ No newline at end of file +- allow accessing data in subbuckets ([#295](https://github.com/etcd-io/bbolt/pull/295)) From 287049ea83a2bfee3f3a83e1368b2f3719996fe1 Mon Sep 17 00:00:00 2001 From: missinglink Date: Wed, 1 Feb 2023 14:26:24 +0100 Subject: [PATCH 002/439] compact: add cli flags to enable NoSync option Signed-off-by: missinglink --- cmd/bbolt/main.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index f76cd82c4..38be33de3 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1572,6 +1572,7 @@ type compactCommand struct { SrcPath string DstPath string TxMaxSize int64 + DstNoSync bool } // newCompactCommand returns a CompactCommand. @@ -1588,6 +1589,7 @@ func (cmd *compactCommand) Run(args ...string) (err error) { fs.SetOutput(io.Discard) fs.StringVar(&cmd.DstPath, "o", "", "") fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") + fs.BoolVar(&cmd.DstNoSync, "no-sync", false, "") if err := fs.Parse(args); err == flag.ErrHelp { fmt.Fprintln(cmd.Stderr, cmd.Usage()) return ErrUsage @@ -1620,7 +1622,7 @@ func (cmd *compactCommand) Run(args ...string) (err error) { defer src.Close() // Open destination database. - dst, err := bolt.Open(cmd.DstPath, fi.Mode(), nil) + dst, err := bolt.Open(cmd.DstPath, fi.Mode(), &bolt.Options{NoSync: cmd.DstNoSync}) if err != nil { return err } @@ -1658,6 +1660,10 @@ Additional options include: -tx-max-size NUM Specifies the maximum size of individual transactions. Defaults to 64KB. + + -no-sync BOOL + Skip fsync() calls after each commit (fast but unsafe) + Defaults to false `, "\n") } From 505fc0f7af3c9bba93a80fc33918c90c1b0517ad Mon Sep 17 00:00:00 2001 From: caojiamingalan Date: Fri, 10 Feb 2023 09:21:59 -0600 Subject: [PATCH 003/439] complete all cleanup operations in db.close() even if there is an error in the middle Signed-off-by: caojiamingalan --- db.go | 11 ++++++++--- tests/failpoint/db_failpoint_test.go | 23 +++++++++++++++++++++++ 2 files changed, 31 insertions(+), 3 deletions(-) diff --git a/db.go b/db.go index c9422127e..5f45d966e 100644 --- a/db.go +++ b/db.go @@ -649,9 +649,10 @@ func (db *DB) close() error { // Clear ops. db.ops.writeAt = nil + var errs []error // Close the mmap. if err := db.munmap(); err != nil { - return err + errs = append(errs, err) } // Close file handles. @@ -660,18 +661,22 @@ func (db *DB) close() error { if !db.readOnly { // Unlock the file. if err := funlock(db); err != nil { - return fmt.Errorf("bolt.Close(): funlock error: %w", err) + errs = append(errs, fmt.Errorf("bolt.Close(): funlock error: %w", err)) } } // Close the file descriptor. if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) + errs = append(errs, fmt.Errorf("db file close: %w", err)) } db.file = nil } db.path = "" + + if len(errs) > 0 { + return errs[0] + } return nil } diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index 798c6b9fd..ae900b229 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -3,6 +3,7 @@ package failpoint import ( "path/filepath" "testing" + "time" "github.com/stretchr/testify/require" @@ -23,3 +24,25 @@ func TestFailpoint_MapFail(t *testing.T) { require.Error(t, err) require.ErrorContains(t, err, "map somehow failed") } + +// ensures when munmap fails, the flock is unlocked +func TestFailpoint_UnmapFail_DbClose(t *testing.T) { + //unmap error on db close + //we need to open the db first, and then enable the error. + //otherwise the db cannot be opened. + f := filepath.Join(t.TempDir(), "db") + + err := gofail.Enable("unmapError", `return("unmap somehow failed")`) + require.NoError(t, err) + _, err = bolt.Open(f, 0666, nil) + require.Error(t, err) + require.ErrorContains(t, err, "unmap somehow failed") + //disable the error, and try to reopen the db + err = gofail.Disable("unmapError") + require.NoError(t, err) + + db, err := bolt.Open(f, 0666, &bolt.Options{Timeout: 30 * time.Second}) + require.NoError(t, err) + err = db.Close() + require.NoError(t, err) +} From 46437cea06b7abd9479371a7872ba5a1c948637e Mon Sep 17 00:00:00 2001 From: Josh Rickmar Date: Sat, 11 Feb 2023 18:34:36 +0000 Subject: [PATCH 004/439] Avoid syscall.Syscall use on OpenBSD Syscall numbers are not stable on OpenBSD, and hardcoding the msync syscall number will break bbolt on future versions of OpenBSD. Use the libc wrapper provided by golang.org/x/sys/unix instead. Signed-off-by: Josh Rickmar --- bolt_openbsd.go | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/bolt_openbsd.go b/bolt_openbsd.go index d7f50358e..bf47aa1a6 100644 --- a/bolt_openbsd.go +++ b/bolt_openbsd.go @@ -1,22 +1,11 @@ package bbolt import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data + "golang.org/x/sys/unix" ) func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil + return unix.Msync(db.data[:db.datasz], unix.MS_INVALIDATE) } func fdatasync(db *DB) error { From 340246337cc36f9c486d6d139248881f1aac0145 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 15:11:49 +0000 Subject: [PATCH 005/439] Bump golang.org/x/sys from 0.4.0 to 0.5.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.4.0 to 0.5.0. - [Release notes](https://github.com/golang/sys/releases) - [Commits](https://github.com/golang/sys/compare/v0.4.0...v0.5.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 511a392a0..643fb1401 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/stretchr/testify v1.8.1 go.etcd.io/gofail v0.1.0 - golang.org/x/sys v0.4.0 + golang.org/x/sys v0.5.0 ) require ( diff --git a/go.sum b/go.sum index f0f96bf31..ead7195bb 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 578b94665bacd7842d3d851986990267b0ce5704 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 16 Feb 2023 16:04:42 +0800 Subject: [PATCH 006/439] update the usage of surgery command Signed-off-by: Benjamin Wang --- cmd/bbolt/surgery_commands.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index 5553c8fa7..9685d3aa7 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -126,8 +126,9 @@ Usage: bbolt surgery command [arguments] The commands are: - copy-page copy page from source pageid to target pageid help print this screen + clear-page clear all elements at the given pageId + copy-page copy page from source pageId to target pageId revert-meta-page revert the meta page change made by the last transaction Use "bbolt surgery [command] -h" for more information about a command. From eb39e4c93eb4d169130c1d4b36a65a0ca23d2183 Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Fri, 17 Feb 2023 14:23:03 +0100 Subject: [PATCH 007/439] Fix redacted format Signed-off-by: Marek Siarkowicz --- cmd/bbolt/main.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 38be33de3..96661b67a 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -536,7 +536,9 @@ func formatBytes(b []byte, format string) (string, error) { case "auto": return bytesToAsciiOrHex(b), nil case "redacted": - return fmt.Sprintf("", len(b), sha256.New().Sum(b)), nil + hash := sha256.New() + hash.Write(b) + return fmt.Sprintf("", len(b), hash.Sum(nil)), nil default: return "", fmt.Errorf("formatBytes: unsupported format: %s", format) } From 2e25261ce0543bf0a3246e490a71dd48711b09d8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 15:12:40 +0000 Subject: [PATCH 008/439] Bump github.com/stretchr/testify from 1.8.1 to 1.8.2 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.1 to 1.8.2. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.1...v1.8.2) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 643fb1401..80ce15aaf 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module go.etcd.io/bbolt go 1.17 require ( - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 golang.org/x/sys v0.5.0 ) diff --git a/go.sum b/go.sum index ead7195bb..f8c867bf7 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,9 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= From 34595e723161a3d00f0ca1e83b8f57536b01873f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 17 Feb 2023 13:58:25 +0800 Subject: [PATCH 009/439] create a common package Points: 1. There are lots of duplicated definitions between bolt and guts_cli, which is definitely not good. 2. The implementation in guts_cli also has issue, please refer to https://github.com/etcd-io/bbolt/issues/391. This refactoring can fix the issue. Signed-off-by: Benjamin Wang --- internal/common/bucket.go | 54 +++++ internal/common/errors.go | 78 ++++++++ internal/common/meta.go | 147 ++++++++++++++ internal/common/page.go | 374 +++++++++++++++++++++++++++++++++++ internal/common/page_test.go | 72 +++++++ internal/common/types.go | 50 +++++ internal/common/unsafe.go | 39 ++++ internal/common/utils.go | 25 +++ 8 files changed, 839 insertions(+) create mode 100644 internal/common/bucket.go create mode 100644 internal/common/errors.go create mode 100644 internal/common/meta.go create mode 100644 internal/common/page.go create mode 100644 internal/common/page_test.go create mode 100644 internal/common/types.go create mode 100644 internal/common/unsafe.go create mode 100644 internal/common/utils.go diff --git a/internal/common/bucket.go b/internal/common/bucket.go new file mode 100644 index 000000000..2b4ab1453 --- /dev/null +++ b/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/internal/common/errors.go b/internal/common/errors.go new file mode 100644 index 000000000..fd1d3541c --- /dev/null +++ b/internal/common/errors.go @@ -0,0 +1,78 @@ +package common + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + ErrDatabaseOpen = errors.New("database already open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when either meta page checksum does not match. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") +) diff --git a/internal/common/meta.go b/internal/common/meta.go new file mode 100644 index 000000000..7769ccc61 --- /dev/null +++ b/internal/common/meta.go @@ -0,0 +1,147 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return ErrInvalid + } else if m.version != Version { + return ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.flags |= MetaPageFlag + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/internal/common/page.go b/internal/common/page.go new file mode 100644 index 000000000..0975f738d --- /dev/null +++ b/internal/common/page.go @@ -0,0 +1,374 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if (p.flags & BranchPageFlag) != 0 { + return "branch" + } else if (p.flags & LeafPageFlag) != 0 { + return "leaf" + } else if (p.flags & MetaPageFlag) != 0 { + return "meta" + } else if (p.flags & FreelistPageFlag) != 0 { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.flags == BranchPageFlag || + p.flags == LeafPageFlag || + p.flags == MetaPageFlag || + p.flags == FreelistPageFlag, + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + var elems []leafPageElement + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + UnsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + var elems []branchPageElement + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + UnsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.flags == FreelistPageFlag, fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.flags == FreelistPageFlag, fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + var ids []Pgid + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) + UnsafeSlice(unsafe.Pointer(&ids), data, count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) FlagsXOR(v uint16) { + p.flags |= v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/internal/common/page_test.go b/internal/common/page_test.go new file mode 100644 index 000000000..376ab6a6c --- /dev/null +++ b/internal/common/page_test.go @@ -0,0 +1,72 @@ +package common + +import ( + "reflect" + "sort" + "testing" + "testing/quick" +) + +// Ensure that the page type can be returned in human readable format. +func TestPage_typ(t *testing.T) { + if typ := (&Page{flags: BranchPageFlag}).Typ(); typ != "branch" { + t.Fatalf("exp=branch; got=%v", typ) + } + if typ := (&Page{flags: LeafPageFlag}).Typ(); typ != "leaf" { + t.Fatalf("exp=leaf; got=%v", typ) + } + if typ := (&Page{flags: MetaPageFlag}).Typ(); typ != "meta" { + t.Fatalf("exp=meta; got=%v", typ) + } + if typ := (&Page{flags: FreelistPageFlag}).Typ(); typ != "freelist" { + t.Fatalf("exp=freelist; got=%v", typ) + } + if typ := (&Page{flags: 20000}).Typ(); typ != "unknown<4e20>" { + t.Fatalf("exp=unknown<4e20>; got=%v", typ) + } +} + +// Ensure that the hexdump debugging function doesn't blow up. +func TestPage_dump(t *testing.T) { + (&Page{id: 256}).hexdump(16) +} + +func TestPgids_merge(t *testing.T) { + a := Pgids{4, 5, 6, 10, 11, 12, 13, 27} + b := Pgids{1, 3, 8, 9, 25, 30} + c := a.Merge(b) + if !reflect.DeepEqual(c, Pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { + t.Errorf("mismatch: %v", c) + } + + a = Pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} + b = Pgids{8, 9, 25, 30} + c = a.Merge(b) + if !reflect.DeepEqual(c, Pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { + t.Errorf("mismatch: %v", c) + } +} + +func TestPgids_merge_quick(t *testing.T) { + if err := quick.Check(func(a, b Pgids) bool { + // Sort incoming lists. + sort.Sort(a) + sort.Sort(b) + + // Merge the two lists together. + got := a.Merge(b) + + // The expected value should be the two lists combined and sorted. + exp := append(a, b...) + sort.Sort(exp) + + if !reflect.DeepEqual(exp, got) { + t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) + return false + } + + return true + }, nil); err != nil { + t.Fatal(err) + } +} diff --git a/internal/common/types.go b/internal/common/types.go new file mode 100644 index 000000000..e970e86a2 --- /dev/null +++ b/internal/common/types.go @@ -0,0 +1,50 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// DO NOT EDIT. Copied from the "bolt" package. +const pageMaxAllocSize = 0xFFFFFFF + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// FreelistType is the type of the freelist backend +type FreelistType string + +const ( + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") +) + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/internal/common/unsafe.go b/internal/common/unsafe.go new file mode 100644 index 000000000..c1970ba3c --- /dev/null +++ b/internal/common/unsafe.go @@ -0,0 +1,39 @@ +package common + +import ( + "reflect" + "unsafe" +) + +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset) +} + +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { + return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) +} + +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { + // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices + // + // This memory is not allocated from C, but it is unmanaged by Go's + // garbage collector and should behave similarly, and the compiler + // should produce similar code. Note that this conversion allows a + // subslice to begin after the base address, with an optional offset, + // while the URL above does not cover this case and only slices from + // index 0. However, the wiki never says that the address must be to + // the beginning of a C allocation (or even that malloc was used at + // all), so this is believed to be correct. + return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] +} + +// UnsafeSlice modifies the data, len, and cap of a slice variable pointed to by +// the slice parameter. This helper should be used over other direct +// manipulation of reflect.SliceHeader to prevent misuse, namely, converting +// from reflect.SliceHeader to a Go slice type. +func UnsafeSlice(slice, data unsafe.Pointer, len int) { + s := (*reflect.SliceHeader)(slice) + s.Data = uintptr(data) + s.Cap = len + s.Len = len +} diff --git a/internal/common/utils.go b/internal/common/utils.go new file mode 100644 index 000000000..8fca0a661 --- /dev/null +++ b/internal/common/utils.go @@ -0,0 +1,25 @@ +package common + +import ( + "fmt" + "unsafe" +) + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...interface{}) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} From ea511567eb216de0ef8539eacbd56bed8d1aa2a7 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 28 Jan 2023 14:37:24 +0800 Subject: [PATCH 010/439] refactor both bolt and guts_cli based on the common package Signed-off-by: Benjamin Wang --- allocate_test.go | 10 +- bolt_unix.go | 4 +- bolt_windows.go | 6 +- bucket.go | 237 +++++++++++++------------- bucket_test.go | 31 ++-- cmd/bbolt/main.go | 43 +++-- cmd/bbolt/page_command.go | 22 +-- cmd/bbolt/surgery_commands.go | 6 +- cmd/bbolt/surgery_commands_test.go | 8 +- cursor.go | 86 +++++----- cursor_test.go | 3 +- db.go | 258 +++++++++------------------- db_test.go | 15 +- db_whitebox_test.go | 4 +- errors.go | 78 --------- freelist.go | 173 +++++++++---------- freelist_hmap.go | 50 +++--- freelist_test.go | 142 ++++++++-------- internal/btesting/btesting.go | 7 +- internal/guts_cli/guts_cli.go | 265 +++-------------------------- internal/surgeon/surgeon.go | 5 +- internal/surgeon/xray.go | 21 +-- node.go | 120 ++++++------- node_test.go | 43 +++-- page.go | 214 ----------------------- page_test.go | 72 -------- tx.go | 141 ++++++++------- tx_check.go | 74 ++++---- tx_test.go | 25 +-- unsafe.go | 39 ----- 30 files changed, 749 insertions(+), 1453 deletions(-) delete mode 100644 errors.go delete mode 100644 page.go delete mode 100644 page_test.go delete mode 100644 unsafe.go diff --git a/allocate_test.go b/allocate_test.go index 94e9116d0..9f08be1cf 100644 --- a/allocate_test.go +++ b/allocate_test.go @@ -2,20 +2,22 @@ package bbolt import ( "testing" + + "go.etcd.io/bbolt/internal/common" ) func TestTx_allocatePageStats(t *testing.T) { f := newTestFreelist() - ids := []pgid{2, 3} + ids := []common.Pgid{2, 3} f.readIDs(ids) tx := &Tx{ db: &DB{ freelist: f, - pageSize: defaultPageSize, + pageSize: common.DefaultPageSize, }, - meta: &meta{}, - pages: make(map[pgid]*page), + meta: &common.Meta{}, + pages: make(map[common.Pgid]*common.Page), } txStats := tx.Stats() diff --git a/bolt_unix.go b/bolt_unix.go index 757ae4d1a..e901e5643 100644 --- a/bolt_unix.go +++ b/bolt_unix.go @@ -10,6 +10,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +38,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return common.ErrTimeout } // Wait for a bit and try again. diff --git a/bolt_windows.go b/bolt_windows.go index e5dde2745..1981c64a3 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -8,6 +8,8 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/internal/common" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return common.ErrTimeout } // Wait for a bit and try again. @@ -93,7 +95,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/bucket.go b/bucket.go index 054467af3..0950f77ea 100644 --- a/bucket.go +++ b/bucket.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +16,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +27,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +42,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +58,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +96,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +116,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +127,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -155,11 +146,11 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { - return nil, ErrTxClosed + return nil, common.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, common.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, common.ErrBucketNameRequired } // Move cursor to correct position. @@ -168,15 +159,15 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, common.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, common.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } @@ -184,7 +175,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Insert into node. key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) + c.node().put(key, key, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -199,7 +190,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) - if err == ErrBucketExists { + if err == common.ErrBucketExists { return b.Bucket(key), nil } else if err != nil { return nil, err @@ -211,9 +202,9 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return common.ErrTxNotWritable } // Move cursor to correct position. @@ -222,9 +213,9 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Return an error if bucket doesn't exist or is not a bucket. if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + return common.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return common.ErrIncompatibleValue } // Recursively delete all child buckets. @@ -260,7 +251,7 @@ func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -277,15 +268,15 @@ func (b *Bucket) Get(key []byte) []byte { // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return common.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return common.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return common.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return common.ErrValueTooLarge } // Move cursor to correct position. @@ -293,8 +284,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(key) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(key, k) && (flags&common.BucketLeafFlag) != 0 { + return common.ErrIncompatibleValue } // Insert into node. @@ -309,9 +300,9 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Returns an error if the bucket was created from a read-only transaction. func (b *Bucket) Delete(key []byte) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return common.ErrTxNotWritable } // Move cursor to correct position. @@ -324,8 +315,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return common.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -335,44 +326,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return common.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, common.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, common.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -382,7 +375,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -395,11 +388,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -413,64 +406,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if (p.Flags() & common.LeafPageFlag) != 0 { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if (p.Flags() & common.BranchPageFlag) != 0 { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -491,29 +484,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -521,10 +514,10 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if (p.Flags() & common.BranchPageFlag) != 0 { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { @@ -553,9 +546,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -569,10 +562,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -587,16 +580,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -607,11 +600,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) - if inode.flags&bucketLeafFlag != 0 { + if inode.flags&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -630,14 +623,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -654,8 +647,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -688,19 +681,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -715,11 +708,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } diff --git a/bucket_test.go b/bucket_test.go index 137061232..33ff149b7 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -18,6 +18,7 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" ) // Ensure that a bucket that gets a non-existent key returns nil. @@ -246,7 +247,7 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) { if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { t.Fatal(err) } - if err := b0.Put([]byte("foo"), []byte("bar")); err != bolt.ErrIncompatibleValue { + if err := b0.Put([]byte("foo"), []byte("bar")); err != common.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -272,7 +273,7 @@ func TestBucket_Put_Closed(t *testing.T) { t.Fatal(err) } - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxClosed { + if err := b.Put([]byte("foo"), []byte("bar")); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -292,7 +293,7 @@ func TestBucket_Put_ReadOnly(t *testing.T) { if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("bar")); err != bolt.ErrTxNotWritable { + if err := b.Put([]byte("foo"), []byte("bar")); err != common.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -560,7 +561,7 @@ func TestBucket_Delete_Bucket(t *testing.T) { if _, err := b.CreateBucket([]byte("foo")); err != nil { t.Fatal(err) } - if err := b.Delete([]byte("foo")); err != bolt.ErrIncompatibleValue { + if err := b.Delete([]byte("foo")); err != common.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -583,7 +584,7 @@ func TestBucket_Delete_ReadOnly(t *testing.T) { } if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != bolt.ErrTxNotWritable { + if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != common.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -609,7 +610,7 @@ func TestBucket_Delete_Closed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if err := b.Delete([]byte("foo")); err != bolt.ErrTxClosed { + if err := b.Delete([]byte("foo")); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -780,7 +781,7 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { t.Fatal(err) } - if _, err := widgets.CreateBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + if _, err := widgets.CreateBucket([]byte("foo")); err != common.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -801,7 +802,7 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { t.Fatal(err) } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != bolt.ErrIncompatibleValue { + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != common.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -943,7 +944,7 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) { if err := db.View(func(tx *bolt.Tx) error { _, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != bolt.ErrTxNotWritable { + if err != common.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -966,7 +967,7 @@ func TestBucket_NextSequence_Closed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if _, err := b.NextSequence(); err != bolt.ErrTxClosed { + if _, err := b.NextSequence(); err != common.ErrTxClosed { t.Fatal(err) } } @@ -1158,7 +1159,7 @@ func TestBucket_ForEach_Closed(t *testing.T) { t.Fatal(err) } - if err := b.ForEach(func(k, v []byte) error { return nil }); err != bolt.ErrTxClosed { + if err := b.ForEach(func(k, v []byte) error { return nil }); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -1172,10 +1173,10 @@ func TestBucket_Put_EmptyKey(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put([]byte(""), []byte("bar")); err != bolt.ErrKeyRequired { + if err := b.Put([]byte(""), []byte("bar")); err != common.ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } - if err := b.Put(nil, []byte("bar")); err != bolt.ErrKeyRequired { + if err := b.Put(nil, []byte("bar")); err != common.ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } return nil @@ -1192,7 +1193,7 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put(make([]byte, 32769), []byte("bar")); err != bolt.ErrKeyTooLarge { + if err := b.Put(make([]byte, 32769), []byte("bar")); err != common.ErrKeyTooLarge { t.Fatalf("unexpected error: %s", err) } return nil @@ -1215,7 +1216,7 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != bolt.ErrValueTooLarge { + if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != common.ErrValueTooLarge { t.Fatalf("unexpected error: %s", err) } return nil diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 96661b67a..a06dd0812 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -18,11 +18,10 @@ import ( "time" "unicode" "unicode/utf8" - "unsafe" - - "go.etcd.io/bbolt/internal/guts_cli" bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" ) var ( @@ -52,12 +51,6 @@ var ( // ErrBucketRequired is returned when a bucket is not specified. ErrBucketRequired = errors.New("bucket required") - // ErrBucketNotFound is returned when a bucket is not found. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrKeyRequired is returned when a key is not specified. - ErrKeyRequired = errors.New("key required") - // ErrKeyNotFound is returned when a key is not found. ErrKeyNotFound = errors.New("key not found") ) @@ -509,16 +502,16 @@ func (cmd *pageItemCommand) Run(args ...string) error { return nil } -// leafPageElement retrieves a leaf page element. -func (cmd *pageItemCommand) leafPageElement(pageBytes []byte, index uint16) (*guts_cli.LeafPageElement, error) { - p := (*guts_cli.Page)(unsafe.Pointer(&pageBytes[0])) +func (cmd *pageItemCommand) validateLeafPage(pageBytes []byte, index uint16) (*common.Page, error) { + p := common.LoadPage(pageBytes) if index >= p.Count() { - return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d.", p.Count(), index) + return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d", p.Count(), index) } - if p.Type() != "leaf" { - return nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Type()) + if p.Typ() != "leaf" { + return nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Typ()) } - return p.LeafPageElement(index), nil + + return p, nil } const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted" @@ -568,19 +561,21 @@ func writelnBytes(w io.Writer, b []byte, format string) error { // PrintLeafItemKey writes the bytes of a leaf element's key. func (cmd *pageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error { - e, err := cmd.leafPageElement(pageBytes, index) + p, err := cmd.validateLeafPage(pageBytes, index) if err != nil { return err } + e := p.LeafPageElement(index) return writelnBytes(w, e.Key(), format) } -// PrintLeafItemKey writes the bytes of a leaf element's value. +// PrintLeafItemValue writes the bytes of a leaf element's value. func (cmd *pageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error { - e, err := cmd.leafPageElement(pageBytes, index) + p, err := cmd.validateLeafPage(pageBytes, index) if err != nil { return err } + e := p.LeafPageElement(index) return writelnBytes(w, e.Value(), format) } @@ -931,12 +926,12 @@ func (cmd *keysCommand) Run(args ...string) error { // Find bucket. var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) if lastbucket == nil { - return ErrBucketNotFound + return common.ErrBucketNotFound } for _, bucket := range buckets[1:] { lastbucket = lastbucket.Bucket([]byte(bucket)) if lastbucket == nil { - return ErrBucketNotFound + return common.ErrBucketNotFound } } @@ -1007,7 +1002,7 @@ func (cmd *getCommand) Run(args ...string) error { } else if len(buckets) == 0 { return ErrBucketRequired } else if len(key) == 0 { - return ErrKeyRequired + return common.ErrKeyRequired } // Open database. @@ -1022,12 +1017,12 @@ func (cmd *getCommand) Run(args ...string) error { // Find bucket. var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) if lastbucket == nil { - return ErrBucketNotFound + return common.ErrBucketNotFound } for _, bucket := range buckets[1:] { lastbucket = lastbucket.Bucket([]byte(bucket)) if lastbucket == nil { - return ErrBucketNotFound + return common.ErrBucketNotFound } } diff --git a/cmd/bbolt/page_command.go b/cmd/bbolt/page_command.go index 6789ba5da..c608d8460 100644 --- a/cmd/bbolt/page_command.go +++ b/cmd/bbolt/page_command.go @@ -8,6 +8,7 @@ import ( "os" "strings" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -113,12 +114,12 @@ func (cmd *pageCommand) printPage(path string, pageID uint64, formatValue string // Print basic page info. fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.Id()) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Type()) + fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Typ()) fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) fmt.Fprintf(cmd.Stdout, "Overflow pages: %d\n", p.Overflow()) // Print type-specific data. - switch p.Type() { + switch p.Typ() { case "meta": err = cmd.PrintMeta(cmd.Stdout, buf) case "leaf": @@ -136,14 +137,14 @@ func (cmd *pageCommand) printPage(path string, pageID uint64, formatValue string // PrintMeta prints the data from the meta page. func (cmd *pageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := guts_cli.LoadPageMeta(buf) + m := common.LoadPageMeta(buf) m.Print(w) return nil } // PrintLeaf prints the data for a leaf page. func (cmd *pageCommand) PrintLeaf(w io.Writer, buf []byte, formatValue string) error { - p := guts_cli.LoadPage(buf) + p := common.LoadPage(buf) // Print number of items. fmt.Fprintf(w, "Item Count: %d\n", p.Count()) @@ -182,7 +183,7 @@ func (cmd *pageCommand) PrintLeaf(w io.Writer, buf []byte, formatValue string) e // PrintBranch prints the data for a leaf page. func (cmd *pageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := guts_cli.LoadPage(buf) + p := common.LoadPage(buf) // Print number of items. fmt.Fprintf(w, "Item Count: %d\n", p.Count()) @@ -200,7 +201,7 @@ func (cmd *pageCommand) PrintBranch(w io.Writer, buf []byte) error { k = fmt.Sprintf("%x", string(e.Key())) } - fmt.Fprintf(w, "%s: \n", k, e.PgId()) + fmt.Fprintf(w, "%s: \n", k, e.Pgid()) } fmt.Fprintf(w, "\n") return nil @@ -208,16 +209,17 @@ func (cmd *pageCommand) PrintBranch(w io.Writer, buf []byte) error { // PrintFreelist prints the data for a freelist page. func (cmd *pageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := guts_cli.LoadPage(buf) + p := common.LoadPage(buf) // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.FreelistPageCount()) + _, cnt := p.FreelistPageCount() + fmt.Fprintf(w, "Item Count: %d\n", cnt) fmt.Fprintf(w, "Overflow: %d\n", p.Overflow()) fmt.Fprintf(w, "\n") // Print each page in the freelist. - ids := p.FreelistPagePages() + ids := p.FreelistPageIds() for _, ids := range ids { fmt.Fprintf(w, "%d\n", ids) } @@ -244,7 +246,7 @@ func (cmd *pageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSi for offset := 0; offset < pageSize; offset += bytesPerLineN { // Retrieve current 16-byte line. line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) + isLastLine := offset == (pageSize - bytesPerLineN) // If it's the same as the previous line then print a skip. if bytes.Equal(line, prev) && !isLastLine { diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index 9685d3aa7..ace121fd6 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -9,7 +9,7 @@ import ( "strconv" "strings" - "go.etcd.io/bbolt/internal/guts_cli" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/surgeon" ) @@ -224,7 +224,7 @@ func (cmd *copyPageCommand) Run(args ...string) error { } // copy the page - if err := surgeon.CopyPage(cmd.dstPath, guts_cli.Pgid(srcPageId), guts_cli.Pgid(dstPageId)); err != nil { + if err := surgeon.CopyPage(cmd.dstPath, common.Pgid(srcPageId), common.Pgid(dstPageId)); err != nil { return fmt.Errorf("copyPageCommand failed: %w", err) } @@ -279,7 +279,7 @@ func (cmd *clearPageCommand) Run(args ...string) error { return err } - if err := surgeon.ClearPage(cmd.dstPath, guts_cli.Pgid(pageId)); err != nil { + if err := surgeon.ClearPage(cmd.dstPath, common.Pgid(pageId)); err != nil { return fmt.Errorf("clearPageCommand failed: %w", err) } diff --git a/cmd/bbolt/surgery_commands_test.go b/cmd/bbolt/surgery_commands_test.go index 997836800..8d96eb326 100644 --- a/cmd/bbolt/surgery_commands_test.go +++ b/cmd/bbolt/surgery_commands_test.go @@ -11,7 +11,7 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/guts_cli" + "go.etcd.io/bbolt/internal/common" ) func TestSurgery_RevertMetaPage(t *testing.T) { @@ -28,8 +28,8 @@ func TestSurgery_RevertMetaPage(t *testing.T) { // Read both meta0 and meta1 from srcFile srcBuf0 := readPage(t, srcPath, 0, pageSize) srcBuf1 := readPage(t, srcPath, 1, pageSize) - meta0Page := guts_cli.LoadPageMeta(srcBuf0) - meta1Page := guts_cli.LoadPageMeta(srcBuf1) + meta0Page := common.LoadPageMeta(srcBuf0) + meta1Page := common.LoadPageMeta(srcBuf1) // Get the non-active meta page nonActiveSrcBuf := srcBuf0 @@ -115,7 +115,7 @@ func TestSurgery_ClearPage(t *testing.T) { t.Log("Verify result") dstPageId3Data := readPage(t, dstPath, 3, pageSize) - p := guts_cli.LoadPage(dstPageId3Data) + p := common.LoadPage(dstPageId3Data) assert.Equal(t, uint16(0), p.Count()) assert.Equal(t, uint32(0), p.Overflow()) } diff --git a/cursor.go b/cursor.go index 5dafb0cac..f08da545b 100644 --- a/cursor.go +++ b/cursor.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +32,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +42,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +53,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +63,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +82,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +92,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +104,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +117,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +128,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +138,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return common.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return common.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +158,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +174,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { pgId = ref.node.inodes[ref.index].pgid } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +195,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { pgId = ref.node.inodes[ref.index].pgid } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -268,10 +270,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && (p.Flags()&(common.BranchPageFlag|common.LeafPageFlag)) == 0 { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -309,15 +311,15 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.search(key, n.inodes[index].pgid) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -329,7 +331,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -347,9 +349,9 @@ func (c *Cursor) nsearch(key []byte) { } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -370,13 +372,13 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -386,19 +388,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -408,7 +410,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return (r.page.Flags() & common.LeafPageFlag) != 0 } // count returns the number of inodes or page elements. @@ -416,5 +418,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/cursor_test.go b/cursor_test.go index 8e112c14e..8fff82e95 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -13,6 +13,7 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" ) // Ensure that a cursor can return a reference to the bucket that created it. @@ -139,7 +140,7 @@ func TestCursor_Delete(t *testing.T) { } c.Seek([]byte("sub")) - if err := c.Delete(); err != bolt.ErrIncompatibleValue { + if err := c.Delete(); err != common.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } diff --git a/db.go b/db.go index 5f45d966e..5e125d64d 100644 --- a/db.go +++ b/db.go @@ -3,7 +3,6 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" @@ -11,48 +10,13 @@ import ( "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + "go.etcd.io/bbolt/internal/common" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond -// FreelistType is the type of the freelist backend -type FreelistType string - -const ( - // FreelistArrayType indicates backend freelist type is array - FreelistArrayType = FreelistType("array") - // FreelistMapType indicates backend freelist type is hashmap - FreelistMapType = FreelistType("hashmap") -) - // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. @@ -85,7 +49,7 @@ type DB struct { // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array - FreelistType FreelistType + FreelistType common.FreelistType // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. @@ -141,8 +105,8 @@ type DB struct { data *[maxMapSize]byte datasz int filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx @@ -206,9 +170,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize flag := os.O_RDWR if options.ReadOnly { @@ -249,7 +213,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. @@ -269,7 +233,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.pageSize = pgSize } else { _ = db.close() - return nil, ErrInvalid + return nil, common.ErrInvalid } } @@ -347,7 +311,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, common.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -356,11 +320,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, common.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -392,13 +356,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, common.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -412,14 +376,14 @@ func (db *DB) loadFreelist() { db.freelist.readIDs(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.read(db.page(db.meta().Freelist())) } db.stats.FreePageN = db.freelist.free_count() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -478,14 +442,14 @@ func (db *DB) mmap(minsz int) error { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { return err0 } @@ -533,8 +497,8 @@ func (db *DB) mmapSize(size int) (int, error) { // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -581,33 +545,33 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { @@ -719,14 +683,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, common.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, common.ErrInvalidMapping } // Create a transaction associated with the database. @@ -752,7 +716,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, common.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -767,13 +731,13 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, common.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, common.ErrInvalidMapping } // Create a transaction associated with the database. @@ -788,19 +752,19 @@ func (db *DB) beginRWTx() (*Tx, error) { func (db *DB) freePages() { // Free all pending pages prior to earliest open transaction. sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) + minid := common.Txid(0xFFFFFFFFFFFFFFFF) if len(db.txs) > 0 { - minid = db.txs[0].meta.txid + minid = db.txs[0].meta.Txid() } if minid > 0 { db.freelist.release(minid - 1) } // Release unused txid extents. for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 + db.freelist.releaseRange(minid, t.meta.Txid()-1) + minid = t.meta.Txid() + 1 } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) + db.freelist.releaseRange(minid, common.Txid(0xFFFFFFFFFFFFFFFF)) // Any page both allocated and freed in an extent is safe to release. } @@ -808,7 +772,7 @@ type txsById []*Tx func (t txsById) Len() int { return len(t) } func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } +func (t txsById) Less(i, j int) bool { return t[i].meta.Txid() < t[j].meta.Txid() } // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { @@ -1050,37 +1014,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1090,7 +1054,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1098,17 +1062,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1116,7 +1081,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1163,7 +1129,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1175,8 +1141,8 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { @@ -1188,8 +1154,8 @@ func (db *DB) freepages() []pgid { // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1221,7 +1187,7 @@ type Options struct { // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array - FreelistType FreelistType + FreelistType common.FreelistType // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). @@ -1263,7 +1229,7 @@ type Options struct { var DefaultOptions = &Options{ Timeout: 0, NoGrowSync: false, - FreelistType: FreelistArrayType, + FreelistType: common.FreelistArrayType, } // Stats represents statistics about the database. @@ -1302,65 +1268,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/db_test.go b/db_test.go index 9f1076fd4..db7c61951 100644 --- a/db_test.go +++ b/db_test.go @@ -21,6 +21,7 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" ) // pageSize is the size of one page in the data file. @@ -136,7 +137,7 @@ func TestOpen_ErrInvalid(t *testing.T) { t.Fatal(err) } - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrInvalid { + if _, err := bolt.Open(path, 0666, nil); err != common.ErrInvalid { t.Fatalf("unexpected error: %s", err) } } @@ -172,7 +173,7 @@ func TestOpen_ErrVersionMismatch(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrVersionMismatch { + if _, err := bolt.Open(path, 0666, nil); err != common.ErrVersionMismatch { t.Fatalf("unexpected error: %s", err) } } @@ -208,7 +209,7 @@ func TestOpen_ErrChecksum(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != bolt.ErrChecksum { + if _, err := bolt.Open(path, 0666, nil); err != common.ErrChecksum { t.Fatalf("unexpected error: %s", err) } } @@ -552,7 +553,7 @@ func TestDB_Open_ReadOnly(t *testing.T) { } // Can't launch read-write transaction. - if _, err := readOnlyDB.Begin(true); err != bolt.ErrDatabaseReadOnly { + if _, err := readOnlyDB.Begin(true); err != common.ErrDatabaseReadOnly { t.Fatalf("unexpected error: %s", err) } @@ -641,7 +642,7 @@ func TestOpen_RecoverFreeList(t *testing.T) { // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db bolt.DB - if _, err := db.Begin(false); err != bolt.ErrDatabaseNotOpen { + if _, err := db.Begin(false); err != common.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } @@ -727,7 +728,7 @@ func TestDB_Concurrent_WriteTo(t *testing.T) { // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB - if _, err := db.Begin(true); err != bolt.ErrDatabaseNotOpen { + if _, err := db.Begin(true); err != common.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } @@ -828,7 +829,7 @@ func TestDB_Update_Closed(t *testing.T) { t.Fatal(err) } return nil - }); err != bolt.ErrDatabaseNotOpen { + }); err != common.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } diff --git a/db_whitebox_test.go b/db_whitebox_test.go index eb95155e5..8b195f723 100644 --- a/db_whitebox_test.go +++ b/db_whitebox_test.go @@ -6,6 +6,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "go.etcd.io/bbolt/internal/common" ) func TestOpenWithPreLoadFreelist(t *testing.T) { @@ -76,7 +78,7 @@ func TestMethodPage(t *testing.T) { name: "readonly mode without preloading free pages", readonly: true, preLoadFreePage: false, - expectedError: ErrFreePagesNotLoaded, + expectedError: common.ErrFreePagesNotLoaded, }, } diff --git a/errors.go b/errors.go deleted file mode 100644 index f2c3b20ed..000000000 --- a/errors.go +++ /dev/null @@ -1,78 +0,0 @@ -package bbolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") - - // ErrFreePagesNotLoaded is returned when a readonly transaction without - // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/freelist.go b/freelist.go index 50f2d0e17..dfccc503b 100644 --- a/freelist.go +++ b/freelist.go @@ -4,50 +4,52 @@ import ( "fmt" "sort" "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // txPending holds a list of pgids and corresponding allocation txns // that are pending to be freed. type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange } // pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} +type pidSet map[common.Pgid]struct{} // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist + freelistType common.FreelistType // freelist type + ids []common.Pgid // all free and available free page ids. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size + allocate func(txid common.Txid, n int) common.Pgid // the freelist allocate func + free_count func() int // the function which gives you free page number + mergeSpans func(ids common.Pgids) // the mergeSpan func + getFreePageIDs func() []common.Pgid // get free pgids func + readIDs func(pgids []common.Pgid) // readIDs func reads list of pages and init the freelist } // newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { +func newFreelist(freelistType common.FreelistType) *freelist { f := &freelist{ freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), + allocs: make(map[common.Pgid]common.Txid), + pending: make(map[common.Txid]*txPending), + cache: make(map[common.Pgid]struct{}), freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), } - if freelistType == FreelistMapType { + if freelistType == common.FreelistMapType { f.allocate = f.hashmapAllocate f.free_count = f.hashmapFreeCount f.mergeSpans = f.hashmapMergeSpans @@ -71,7 +73,7 @@ func (f *freelist) size() int { // The first element will be used to store the count. See freelist.write. n++ } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) } // count returns count of pages on the freelist @@ -95,23 +97,23 @@ func (f *freelist) pending_count() int { // copyall copies a list of all free ids and all pending ids in one sorted list. // f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) +func (f *freelist) copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, f.pending_count()) for _, txp := range f.pending { m = append(m, txp.ids...) } sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) + common.Mergepgids(dst, f.getFreePageIDs(), m) } // arrayAllocate returns the starting page id of a contiguous list of pages of a given size. // If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { +func (f *freelist) arrayAllocate(txid common.Txid, n int) common.Pgid { if len(f.ids) == 0 { return 0 } - var initial, previd pgid + var initial, previd common.Pgid for i, id := range f.ids { if id <= 1 { panic(fmt.Sprintf("invalid page allocation: %d", id)) @@ -123,7 +125,7 @@ func (f *freelist) arrayAllocate(txid txid, n int) pgid { } // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { + if (id-initial)+1 == common.Pgid(n) { // If we're allocating off the beginning then take the fast path // and just adjust the existing slice. This will use extra memory // temporarily but the append() in free() will realloc the slice @@ -136,7 +138,7 @@ func (f *freelist) arrayAllocate(txid txid, n int) pgid { } // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { + for i := common.Pgid(0); i < common.Pgid(n); i++ { delete(f.cache, initial+i) } f.allocs[initial] = txid @@ -150,9 +152,9 @@ func (f *freelist) arrayAllocate(txid txid, n int) pgid { // free releases a page and its overflow for a given transaction id. // If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) +func (f *freelist) free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) } // Free page and all its overflow pages. @@ -161,15 +163,15 @@ func (f *freelist) free(txid txid, p *page) { txp = &txPending{} f.pending[txid] = txp } - allocTxid, ok := f.allocs[p.id] + allocTxid, ok := f.allocs[p.Id()] if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { + delete(f.allocs, p.Id()) + } else if (p.Flags() & common.FreelistPageFlag) != 0 { // Freelist is always allocated by prior tx. allocTxid = txid - 1 } - for id := p.id; id <= p.id+pgid(p.overflow); id++ { + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { // Verify that page is not already free. if _, ok := f.cache[id]; ok { panic(fmt.Sprintf("page %d already freed", id)) @@ -182,8 +184,8 @@ func (f *freelist) free(txid txid, p *page) { } // release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) +func (f *freelist) release(txid common.Txid) { + m := make(common.Pgids, 0) for tid, txp := range f.pending { if tid <= txid { // Move transaction's pending pages to the available freelist. @@ -196,11 +198,11 @@ func (f *freelist) release(txid txid) { } // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { +func (f *freelist) releaseRange(begin, end common.Txid) { if begin > end { return } - var m pgids + var m common.Pgids for tid, txp := range f.pending { if tid < begin || tid > end { continue @@ -229,13 +231,13 @@ func (f *freelist) releaseRange(begin, end txid) { } // rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { +func (f *freelist) rollback(txid common.Txid) { // Remove page ids from cache. txp := f.pending[txid] if txp == nil { return } - var m pgids + var m common.Pgids for i, pgid := range txp.ids { delete(f.cache, pgid) tx := txp.alloctx[i] @@ -256,82 +258,69 @@ func (f *freelist) rollback(txid txid) { } // freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { +func (f *freelist) freed(pgId common.Pgid) bool { _, ok := f.cache[pgId] return ok } // read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } +func (f *freelist) read(p *common.Page) { + if (p.Flags() & common.FreelistPageFlag) == 0 { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) } + ids := p.FreelistPageIds() + // Copy the list of page ids from the freelist. - if count == 0 { + if len(ids) == 0 { f.ids = nil } else { - var ids []pgid - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - unsafeSlice(unsafe.Pointer(&ids), data, count) - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) + idsCopy := make([]common.Pgid, len(ids)) copy(idsCopy, ids) // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) + sort.Sort(common.Pgids(idsCopy)) f.readIDs(idsCopy) } } // arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { +func (f *freelist) arrayReadIDs(ids []common.Pgid) { f.ids = ids f.reindex() } -func (f *freelist) arrayGetFreePageIDs() []pgid { +func (f *freelist) arrayGetFreePageIDs() []common.Pgid { return f.ids } // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. -func (f *freelist) write(p *page) error { +func (f *freelist) write(p *common.Page) error { // Combine the old free pgids and pgids waiting on an open transaction. // Update the header flag. - p.flags |= freelistPageFlag + p.FlagsXOR(common.FreelistPageFlag) // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. l := f.count() if l == 0 { - p.count = uint16(l) + p.SetCount(uint16(l)) } else if l < 0xFFFF { - p.count = uint16(l) - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l) + p.SetCount(uint16(l)) + var ids []common.Pgid + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + common.UnsafeSlice(unsafe.Pointer(&ids), data, l) f.copyall(ids) } else { - p.count = 0xFFFF - var ids []pgid - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&ids), data, l+1) - ids[0] = pgid(l) + p.SetCount(0xFFFF) + var ids []common.Pgid + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + common.UnsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids[0] = common.Pgid(l) f.copyall(ids[1:]) } @@ -339,11 +328,11 @@ func (f *freelist) write(p *page) error { } // reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { +func (f *freelist) reload(p *common.Page) { f.read(p) // Build a cache of only pending pages. - pcache := make(map[pgid]bool) + pcache := make(map[common.Pgid]bool) for _, txp := range f.pending { for _, pendingID := range txp.ids { pcache[pendingID] = true @@ -352,7 +341,7 @@ func (f *freelist) reload(p *page) { // Check each page in the freelist and build a new available freelist // with any pages not in the pending lists. - var a []pgid + var a []common.Pgid for _, id := range f.getFreePageIDs() { if !pcache[id] { a = append(a, id) @@ -362,10 +351,10 @@ func (f *freelist) reload(p *page) { f.readIDs(a) } -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { +// noSyncReload reads the freelist from Pgids and filters out pending items. +func (f *freelist) noSyncReload(Pgids []common.Pgid) { // Build a cache of only pending pages. - pcache := make(map[pgid]bool) + pcache := make(map[common.Pgid]bool) for _, txp := range f.pending { for _, pendingID := range txp.ids { pcache[pendingID] = true @@ -374,8 +363,8 @@ func (f *freelist) noSyncReload(pgids []pgid) { // Check each page in the freelist and build a new available freelist // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { + var a []common.Pgid + for _, id := range Pgids { if !pcache[id] { a = append(a, id) } @@ -387,7 +376,7 @@ func (f *freelist) noSyncReload(pgids []pgid) { // reindex rebuilds the free cache based on available and pending free lists. func (f *freelist) reindex() { ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) + f.cache = make(map[common.Pgid]struct{}, len(ids)) for _, id := range ids { f.cache[id] = struct{}{} } @@ -399,7 +388,7 @@ func (f *freelist) reindex() { } // arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { +func (f *freelist) arrayMergeSpans(ids common.Pgids) { sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) + f.ids = common.Pgids(f.ids).Merge(ids) } diff --git a/freelist_hmap.go b/freelist_hmap.go index dbd67a1e7..6e01bc116 100644 --- a/freelist_hmap.go +++ b/freelist_hmap.go @@ -1,6 +1,10 @@ package bbolt -import "sort" +import ( + "sort" + + "go.etcd.io/bbolt/internal/common" +) // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { @@ -13,7 +17,7 @@ func (f *freelist) hashmapFreeCount() int { } // hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { +func (f *freelist) hashmapAllocate(txid common.Txid, n int) common.Pgid { if n == 0 { return 0 } @@ -26,7 +30,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { f.allocs[pid] = txid - for i := pgid(0); i < pgid(n); i++ { + for i := common.Pgid(0); i < common.Pgid(n); i++ { delete(f.cache, pid+i) } return pid @@ -48,9 +52,9 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { remain := size - uint64(n) // add remain span - f.addSpan(pid+pgid(n), remain) + f.addSpan(pid+common.Pgid(n), remain) - for i := pgid(0); i < pgid(n); i++ { + for i := common.Pgid(0); i < common.Pgid(n); i++ { delete(f.cache, pid+i) } return pid @@ -61,7 +65,7 @@ func (f *freelist) hashmapAllocate(txid txid, n int) pgid { } // hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { +func (f *freelist) hashmapReadIDs(pgids []common.Pgid) { f.init(pgids) // Rebuild the page cache. @@ -69,25 +73,25 @@ func (f *freelist) hashmapReadIDs(pgids []pgid) { } // hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { +func (f *freelist) hashmapGetFreePageIDs() []common.Pgid { count := f.free_count() if count == 0 { return nil } - m := make([]pgid, 0, count) + m := make([]common.Pgid, 0, count) for start, size := range f.forwardMap { for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) + m = append(m, start+common.Pgid(i)) } } - sort.Sort(pgids(m)) + sort.Sort(common.Pgids(m)) return m } // hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { +func (f *freelist) hashmapMergeSpans(ids common.Pgids) { for _, id := range ids { // try to see if we can merge and update f.mergeWithExistingSpan(id) @@ -95,7 +99,7 @@ func (f *freelist) hashmapMergeSpans(ids pgids) { } // mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { +func (f *freelist) mergeWithExistingSpan(pid common.Pgid) { prev := pid - 1 next := pid + 1 @@ -106,10 +110,10 @@ func (f *freelist) mergeWithExistingSpan(pid pgid) { if mergeWithPrev { //merge with previous span - start := prev + 1 - pgid(preSize) + start := prev + 1 - common.Pgid(preSize) f.delSpan(start, preSize) - newStart -= pgid(preSize) + newStart -= common.Pgid(preSize) newSize += preSize } @@ -122,19 +126,19 @@ func (f *freelist) mergeWithExistingSpan(pid pgid) { f.addSpan(newStart, newSize) } -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size +func (f *freelist) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size f.forwardMap[start] = size if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) + f.freemaps[size] = make(map[common.Pgid]struct{}) } f.freemaps[size][start] = struct{}{} } -func (f *freelist) delSpan(start pgid, size uint64) { +func (f *freelist) delSpan(start common.Pgid, size uint64) { delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) + delete(f.backwardMap, start+common.Pgid(size-1)) delete(f.freemaps[size], start) if len(f.freemaps[size]) == 0 { delete(f.freemaps, size) @@ -143,7 +147,7 @@ func (f *freelist) delSpan(start pgid, size uint64) { // initial from pgids using when use hashmap version // pgids must be sorted -func (f *freelist) init(pgids []pgid) { +func (f *freelist) init(pgids []common.Pgid) { if len(pgids) == 0 { return } @@ -151,13 +155,13 @@ func (f *freelist) init(pgids []pgid) { size := uint64(1) start := pgids[0] - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { panic("pgids not sorted") } f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) for i := 1; i < len(pgids); i++ { // continuous page diff --git a/freelist_test.go b/freelist_test.go index 97656f4a2..1fffff2ff 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -7,6 +7,8 @@ import ( "sort" "testing" "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // TestFreelistType is used as a env variable for test to indicate the backend type @@ -15,17 +17,17 @@ const TestFreelistType = "TEST_FREELIST_TYPE" // Ensure that a page is added to a transaction's freelist. func TestFreelist_free(t *testing.T) { f := newTestFreelist() - f.free(100, &page{id: 12}) - if !reflect.DeepEqual([]pgid{12}, f.pending[100].ids) { - t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100].ids) + f.free(100, common.NewPage(12, 0, 0, 0)) + if !reflect.DeepEqual([]common.Pgid{12}, f.pending[100].ids) { + t.Fatalf("exp=%v; got=%v", []common.Pgid{12}, f.pending[100].ids) } } // Ensure that a page and its overflow is added to a transaction's freelist. func TestFreelist_free_overflow(t *testing.T) { f := newTestFreelist() - f.free(100, &page{id: 12, overflow: 3}) - if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100].ids) { + f.free(100, common.NewPage(12, 0, 0, 3)) + if exp := []common.Pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100].ids) { t.Fatalf("exp=%v; got=%v", exp, f.pending[100].ids) } } @@ -33,17 +35,17 @@ func TestFreelist_free_overflow(t *testing.T) { // Ensure that a transaction's free pages can be released. func TestFreelist_release(t *testing.T) { f := newTestFreelist() - f.free(100, &page{id: 12, overflow: 1}) - f.free(100, &page{id: 9}) - f.free(102, &page{id: 39}) + f.free(100, common.NewPage(12, 0, 0, 1)) + f.free(100, common.NewPage(9, 0, 0, 0)) + f.free(102, common.NewPage(39, 0, 0, 0)) f.release(100) f.release(101) - if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { + if exp := []common.Pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) } f.release(102) - if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { + if exp := []common.Pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) } } @@ -51,33 +53,33 @@ func TestFreelist_release(t *testing.T) { // Ensure that releaseRange handles boundary conditions correctly func TestFreelist_releaseRange(t *testing.T) { type testRange struct { - begin, end txid + begin, end common.Txid } type testPage struct { - id pgid + id common.Pgid n int - allocTxn txid - freeTxn txid + allocTxn common.Txid + freeTxn common.Txid } var releaseRangeTests = []struct { title string pagesIn []testPage releaseRanges []testRange - wantFree []pgid + wantFree []common.Pgid }{ { title: "Single pending in range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, releaseRanges: []testRange{{1, 300}}, - wantFree: []pgid{3}, + wantFree: []common.Pgid{3}, }, { title: "Single pending with minimum end range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, releaseRanges: []testRange{{1, 200}}, - wantFree: []pgid{3}, + wantFree: []common.Pgid{3}, }, { title: "Single pending outsize minimum end range", @@ -89,7 +91,7 @@ func TestFreelist_releaseRange(t *testing.T) { title: "Single pending with minimum begin range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, releaseRanges: []testRange{{100, 300}}, - wantFree: []pgid{3}, + wantFree: []common.Pgid{3}, }, { title: "Single pending outside minimum begin range", @@ -101,7 +103,7 @@ func TestFreelist_releaseRange(t *testing.T) { title: "Single pending in minimum range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, releaseRanges: []testRange{{199, 200}}, - wantFree: []pgid{3}, + wantFree: []common.Pgid{3}, }, { title: "Single pending and read transaction at 199", @@ -146,16 +148,16 @@ func TestFreelist_releaseRange(t *testing.T) { {id: 9, n: 2, allocTxn: 175, freeTxn: 200}, }, releaseRanges: []testRange{{50, 149}, {151, 300}}, - wantFree: []pgid{4, 9, 10}, + wantFree: []common.Pgid{4, 9, 10}, }, } for _, c := range releaseRangeTests { f := newTestFreelist() - var ids []pgid + var ids []common.Pgid for _, p := range c.pagesIn { for i := uint64(0); i < uint64(p.n); i++ { - ids = append(ids, pgid(uint64(p.id)+i)) + ids = append(ids, common.Pgid(uint64(p.id)+i)) } } f.readIDs(ids) @@ -164,7 +166,7 @@ func TestFreelist_releaseRange(t *testing.T) { } for _, p := range c.pagesIn { - f.free(p.freeTxn, &page{id: p.id, overflow: uint32(p.n - 1)}) + f.free(p.freeTxn, common.NewPage(p.id, 0, 0, uint32(p.n-1))) } for _, r := range c.releaseRanges { @@ -179,11 +181,11 @@ func TestFreelist_releaseRange(t *testing.T) { func TestFreelistHashmap_allocate(t *testing.T) { f := newTestFreelist() - if f.freelistType != FreelistMapType { + if f.freelistType != common.FreelistMapType { t.Skip() } - ids := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} f.readIDs(ids) f.allocate(1, 3) @@ -209,10 +211,10 @@ func TestFreelistHashmap_allocate(t *testing.T) { // Ensure that a freelist can find contiguous blocks of pages. func TestFreelistArray_allocate(t *testing.T) { f := newTestFreelist() - if f.freelistType != FreelistArrayType { + if f.freelistType != common.FreelistArrayType { t.Skip() } - ids := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} f.readIDs(ids) if id := int(f.allocate(1, 3)); id != 3 { t.Fatalf("exp=3; got=%v", id) @@ -235,7 +237,7 @@ func TestFreelistArray_allocate(t *testing.T) { if id := int(f.allocate(1, 0)); id != 0 { t.Fatalf("exp=0; got=%v", id) } - if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { + if exp := []common.Pgid{9, 18}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) } @@ -248,7 +250,7 @@ func TestFreelistArray_allocate(t *testing.T) { if id := int(f.allocate(1, 1)); id != 0 { t.Fatalf("exp=0; got=%v", id) } - if exp := []pgid{}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { + if exp := []common.Pgid{}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) } } @@ -257,12 +259,12 @@ func TestFreelistArray_allocate(t *testing.T) { func TestFreelist_read(t *testing.T) { // Create a page. var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = freelistPageFlag - page.count = 2 + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.FreelistPageFlag) + page.SetCount(2) // Insert 2 page ids. - ids := (*[3]pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) + ids := (*[3]common.Pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) ids[0] = 23 ids[1] = 50 @@ -271,7 +273,7 @@ func TestFreelist_read(t *testing.T) { f.read(page) // Ensure that there are two page ids in the freelist. - if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { + if exp := []common.Pgid{23, 50}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) } } @@ -282,10 +284,10 @@ func TestFreelist_write(t *testing.T) { var buf [4096]byte f := newTestFreelist() - f.readIDs([]pgid{12, 39}) - f.pending[100] = &txPending{ids: []pgid{28, 11}} - f.pending[101] = &txPending{ids: []pgid{3}} - p := (*page)(unsafe.Pointer(&buf[0])) + f.readIDs([]common.Pgid{12, 39}) + f.pending[100] = &txPending{ids: []common.Pgid{28, 11}} + f.pending[101] = &txPending{ids: []common.Pgid{3}} + p := (*common.Page)(unsafe.Pointer(&buf[0])) if err := f.write(p); err != nil { t.Fatal(err) } @@ -296,7 +298,7 @@ func TestFreelist_write(t *testing.T) { // Ensure that the freelist is correct. // All pages should be present and in reverse order. - if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.getFreePageIDs()) { + if exp := []common.Pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.getFreePageIDs()) { t.Fatalf("exp=%v; got=%v", exp, f2.getFreePageIDs()) } } @@ -313,17 +315,17 @@ func benchmark_FreelistRelease(b *testing.B, size int) { for i := 0; i < b.N; i++ { txp := &txPending{ids: pending} f := newTestFreelist() - f.pending = map[txid]*txPending{1: txp} + f.pending = map[common.Txid]*txPending{1: txp} f.readIDs(ids) f.release(1) } } -func randomPgids(n int) []pgid { +func randomPgids(n int) []common.Pgid { rand.Seed(42) - pgids := make(pgids, n) + pgids := make(common.Pgids, n) for i := range pgids { - pgids[i] = pgid(rand.Int63()) + pgids[i] = common.Pgid(rand.Int63()) } sort.Sort(pgids) return pgids @@ -331,7 +333,7 @@ func randomPgids(n int) []pgid { func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { f := newTestFreelist() - exp := []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + exp := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} f.readIDs(exp) @@ -340,7 +342,7 @@ func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { } f2 := newTestFreelist() - var exp2 []pgid + var exp2 []common.Pgid f2.readIDs(exp2) if got2 := f2.getFreePageIDs(); !reflect.DeepEqual(got2, exp2) { @@ -355,53 +357,53 @@ func Test_freelist_mergeWithExist(t *testing.T) { bm2 := pidSet{5: struct{}{}} tests := []struct { name string - ids []pgid - pgid pgid - want []pgid - wantForwardmap map[pgid]uint64 - wantBackwardmap map[pgid]uint64 + ids []common.Pgid + pgid common.Pgid + want []common.Pgid + wantForwardmap map[common.Pgid]uint64 + wantBackwardmap map[common.Pgid]uint64 wantfreemap map[uint64]pidSet }{ { name: "test1", - ids: []pgid{1, 2, 4, 5, 6}, + ids: []common.Pgid{1, 2, 4, 5, 6}, pgid: 3, - want: []pgid{1, 2, 3, 4, 5, 6}, - wantForwardmap: map[pgid]uint64{1: 6}, - wantBackwardmap: map[pgid]uint64{6: 6}, + want: []common.Pgid{1, 2, 3, 4, 5, 6}, + wantForwardmap: map[common.Pgid]uint64{1: 6}, + wantBackwardmap: map[common.Pgid]uint64{6: 6}, wantfreemap: map[uint64]pidSet{6: bm1}, }, { name: "test2", - ids: []pgid{1, 2, 5, 6}, + ids: []common.Pgid{1, 2, 5, 6}, pgid: 3, - want: []pgid{1, 2, 3, 5, 6}, - wantForwardmap: map[pgid]uint64{1: 3, 5: 2}, - wantBackwardmap: map[pgid]uint64{6: 2, 3: 3}, + want: []common.Pgid{1, 2, 3, 5, 6}, + wantForwardmap: map[common.Pgid]uint64{1: 3, 5: 2}, + wantBackwardmap: map[common.Pgid]uint64{6: 2, 3: 3}, wantfreemap: map[uint64]pidSet{3: bm1, 2: bm2}, }, { name: "test3", - ids: []pgid{1, 2}, + ids: []common.Pgid{1, 2}, pgid: 3, - want: []pgid{1, 2, 3}, - wantForwardmap: map[pgid]uint64{1: 3}, - wantBackwardmap: map[pgid]uint64{3: 3}, + want: []common.Pgid{1, 2, 3}, + wantForwardmap: map[common.Pgid]uint64{1: 3}, + wantBackwardmap: map[common.Pgid]uint64{3: 3}, wantfreemap: map[uint64]pidSet{3: bm1}, }, { name: "test4", - ids: []pgid{2, 3}, + ids: []common.Pgid{2, 3}, pgid: 1, - want: []pgid{1, 2, 3}, - wantForwardmap: map[pgid]uint64{1: 3}, - wantBackwardmap: map[pgid]uint64{3: 3}, + want: []common.Pgid{1, 2, 3}, + wantForwardmap: map[common.Pgid]uint64{1: 3}, + wantBackwardmap: map[common.Pgid]uint64{3: 3}, wantfreemap: map[uint64]pidSet{3: bm1}, }, } for _, tt := range tests { f := newTestFreelist() - if f.freelistType == FreelistArrayType { + if f.freelistType == common.FreelistArrayType { t.Skip() } f.readIDs(tt.ids) @@ -425,9 +427,9 @@ func Test_freelist_mergeWithExist(t *testing.T) { // newTestFreelist get the freelist type from env and initial the freelist func newTestFreelist() *freelist { - freelistType := FreelistArrayType - if env := os.Getenv(TestFreelistType); env == string(FreelistMapType) { - freelistType = FreelistMapType + freelistType := common.FreelistArrayType + if env := os.Getenv(TestFreelistType); env == string(common.FreelistMapType) { + freelistType = common.FreelistMapType } return newFreelist(freelistType) diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index b30507234..b5b814526 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" ) var statsFlag = flag.Bool("stats", false, "show performance stats") @@ -44,9 +45,9 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { o = bolt.DefaultOptions } - freelistType := bolt.FreelistArrayType - if env := os.Getenv(TestFreelistType); env == string(bolt.FreelistMapType) { - freelistType = bolt.FreelistMapType + freelistType := common.FreelistArrayType + if env := os.Getenv(TestFreelistType); env == string(common.FreelistMapType) { + freelistType = common.FreelistMapType } o.FreelistType = freelistType diff --git a/internal/guts_cli/guts_cli.go b/internal/guts_cli/guts_cli.go index 30e55664d..891ddb7b4 100644 --- a/internal/guts_cli/guts_cli.go +++ b/internal/guts_cli/guts_cli.go @@ -2,14 +2,13 @@ package guts_cli // Low level access to pages / data-structures of the bbolt file. -// TODO(ptab): Merge with bbolt/page file that should get ported to internal. - import ( "errors" "fmt" "io" "os" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) var ( @@ -17,231 +16,9 @@ var ( ErrCorrupt = errors.New("invalid value") ) -// PageHeaderSize represents the size of the bolt.Page header. -const PageHeaderSize = 16 - -// Represents a marker value to indicate that a file (Meta Page) is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// DO NOT EDIT. Copied from the "bolt" package. -const maxAllocSize = 0xFFFFFFF - -// DO NOT EDIT. Copied from the "bolt" package. -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -// DO NOT EDIT. Copied from the "bolt" package. -const bucketLeafFlag = 0x01 - -// DO NOT EDIT. Copied from the "bolt" package. -type Pgid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type txid uint64 - -// DO NOT EDIT. Copied from the "bolt" package. -type Meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root Bucket - freelist Pgid - pgid Pgid // High Water Mark (id of next added Page if the file growths) - txid txid - checksum uint64 -} - -func LoadPageMeta(buf []byte) *Meta { - return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) -} - -func (m *Meta) RootBucket() *Bucket { - return &m.root -} - -func (m *Meta) Txid() uint64 { - return uint64(m.txid) -} - -func (m *Meta) Print(w io.Writer) { - fmt.Fprintf(w, "Version: %d\n", m.version) - fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) - fmt.Fprintf(w, "Flags: %08x\n", m.flags) - fmt.Fprintf(w, "Root: \n", m.root.root) - fmt.Fprintf(w, "Freelist: \n", m.freelist) - fmt.Fprintf(w, "HWM: \n", m.pgid) - fmt.Fprintf(w, "Txn ID: %d\n", m.txid) - fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) - fmt.Fprintf(w, "\n") -} - -// DO NOT EDIT. Copied from the "bolt" package. -type Bucket struct { - root Pgid - sequence uint64 -} - -const bucketHeaderSize = int(unsafe.Sizeof(Bucket{})) - -func LoadBucket(buf []byte) *Bucket { - return (*Bucket)(unsafe.Pointer(&buf[0])) -} - -func (b *Bucket) String() string { - return fmt.Sprintf("", b.root, b.sequence) -} - -func (b *Bucket) RootPage() Pgid { - return b.root -} - -func (b *Bucket) InlinePage(v []byte) *Page { - return (*Page)(unsafe.Pointer(&v[bucketHeaderSize])) -} - -// DO NOT EDIT. Copied from the "bolt" package. -type Page struct { - id Pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -func LoadPage(buf []byte) *Page { - return (*Page)(unsafe.Pointer(&buf[0])) -} - -func (p *Page) FreelistPageCount() int { - // Check for overflow and, if present, adjust actual element count. - if p.count == 0xFFFF { - return int(((*[maxAllocSize]Pgid)(unsafe.Pointer(&p.ptr)))[0]) - } else { - return int(p.count) - } -} - -func (p *Page) FreelistPagePages() []Pgid { - // Check for overflow and, if present, adjust starting index. - idx := 0 - if p.count == 0xFFFF { - idx = 1 - } - return (*[maxAllocSize]Pgid)(unsafe.Pointer(&p.ptr))[idx:p.FreelistPageCount()] -} - -func (p *Page) Overflow() uint32 { - return p.overflow -} - -func (p *Page) String() string { - return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Type(), p.count, p.overflow) -} - -// DO NOT EDIT. Copied from the "bolt" package. - -// TODO(ptabor): Make the page-types an enum. -func (p *Page) Type() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -func (p *Page) Count() uint16 { - return p.count -} - -func (p *Page) Id() Pgid { - return p.id -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *Page) LeafPageElement(index uint16) *LeafPageElement { - n := &((*[0x7FFFFFF]LeafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (p *Page) BranchPageElement(index uint16) *BranchPageElement { - return &((*[0x7FFFFFF]BranchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -func (p *Page) SetId(target Pgid) { - p.id = target -} - -func (p *Page) SetCount(target uint16) { - p.count = target -} - -func (p *Page) SetOverflow(target uint32) { - p.overflow = target -} - -// DO NOT EDIT. Copied from the "bolt" package. -type BranchPageElement struct { - pos uint32 - ksize uint32 - pgid Pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *BranchPageElement) Key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -func (n *BranchPageElement) PgId() Pgid { - return n.pgid -} - -// DO NOT EDIT. Copied from the "bolt" package. -type LeafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *LeafPageElement) Key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos : n.pos+n.ksize] -} - -// DO NOT EDIT. Copied from the "bolt" package. -func (n *LeafPageElement) Value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return buf[n.pos+n.ksize : n.pos+n.ksize+n.vsize] -} - -func (n *LeafPageElement) IsBucketEntry() bool { - return n.flags&uint32(bucketLeafFlag) != 0 -} - -func (n *LeafPageElement) Bucket() *Bucket { - if n.IsBucketEntry() { - return LoadBucket(n.Value()) - } else { - return nil - } -} - // ReadPage reads Page info & full Page data from a path. // This is not transactionally safe. -func ReadPage(path string, pageID uint64) (*Page, []byte, error) { +func ReadPage(path string, pageID uint64) (*common.Page, []byte, error) { // Find Page size. pageSize, hwm, err := ReadPageAndHWMSize(path) if err != nil { @@ -264,11 +41,11 @@ func ReadPage(path string, pageID uint64) (*Page, []byte, error) { } // Determine total number of blocks. - p := LoadPage(buf) - if p.id != Pgid(pageID) { - return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.id, pageID) + p := common.LoadPage(buf) + if p.Id() != common.Pgid(pageID) { + return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.Id(), pageID) } - overflowN := p.overflow + overflowN := p.Overflow() if overflowN >= uint32(hwm)-3 { // we exclude 2 Meta pages and the current Page. return nil, nil, fmt.Errorf("error: %w, Page claims to have %d overflow pages (>=hwm=%d). Interrupting to avoid risky OOM", ErrCorrupt, overflowN, hwm) } @@ -280,16 +57,16 @@ func ReadPage(path string, pageID uint64) (*Page, []byte, error) { } else if n != len(buf) { return nil, nil, io.ErrUnexpectedEOF } - p = LoadPage(buf) - if p.id != Pgid(pageID) { - return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.id, pageID) + p = common.LoadPage(buf) + if p.Id() != common.Pgid(pageID) { + return nil, nil, fmt.Errorf("error: %w due to unexpected Page id: %d != %d", ErrCorrupt, p.Id(), pageID) } return p, buf, nil } func WritePage(path string, pageBuf []byte) error { - page := LoadPage(pageBuf) + page := common.LoadPage(pageBuf) pageSize, _, err := ReadPageAndHWMSize(path) if err != nil { return err @@ -309,7 +86,7 @@ func WritePage(path string, pageBuf []byte) error { // ReadPageAndHWMSize reads Page size and HWM (id of the last+1 Page). // This is not transactionally safe. -func ReadPageAndHWMSize(path string) (uint64, Pgid, error) { +func ReadPageAndHWMSize(path string) (uint64, common.Pgid, error) { // Open database file. f, err := os.Open(path) if err != nil { @@ -324,28 +101,28 @@ func ReadPageAndHWMSize(path string) (uint64, Pgid, error) { } // Read Page size from metadata. - m := LoadPageMeta(buf) - if m.magic != magic { + m := common.LoadPageMeta(buf) + if m.Magic() != common.Magic { return 0, 0, fmt.Errorf("the Meta Page has wrong (unexpected) magic") } - return uint64(m.pageSize), Pgid(m.pgid), nil + return uint64(m.PageSize()), common.Pgid(m.Pgid()), nil } // GetRootPage returns the root-page (according to the most recent transaction). -func GetRootPage(path string) (root Pgid, activeMeta Pgid, err error) { +func GetRootPage(path string) (root common.Pgid, activeMeta common.Pgid, err error) { _, buf0, err0 := ReadPage(path, 0) if err0 != nil { return 0, 0, err0 } - m0 := LoadPageMeta(buf0) + m0 := common.LoadPageMeta(buf0) _, buf1, err1 := ReadPage(path, 1) if err1 != nil { return 0, 1, err1 } - m1 := LoadPageMeta(buf1) - if m0.txid < m1.txid { - return m1.root.root, 1, nil + m1 := common.LoadPageMeta(buf1) + if m0.Txid() < m1.Txid() { + return m1.RootBucket().RootPage(), 1, nil } else { - return m0.root.root, 0, nil + return m0.RootBucket().RootPage(), 0, nil } } diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index 763583705..d2220a276 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -2,10 +2,11 @@ package surgeon import ( "fmt" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) -func CopyPage(path string, srcPage guts_cli.Pgid, target guts_cli.Pgid) error { +func CopyPage(path string, srcPage common.Pgid, target common.Pgid) error { p1, d1, err1 := guts_cli.ReadPage(path, uint64(srcPage)) if err1 != nil { return err1 @@ -14,7 +15,7 @@ func CopyPage(path string, srcPage guts_cli.Pgid, target guts_cli.Pgid) error { return guts_cli.WritePage(path, d1) } -func ClearPage(path string, pgId guts_cli.Pgid) error { +func ClearPage(path string, pgId common.Pgid) error { // Read the page p, buf, err := guts_cli.ReadPage(path, uint64(pgId)) if err != nil { diff --git a/internal/surgeon/xray.go b/internal/surgeon/xray.go index 446934131..b3f4e2bb1 100644 --- a/internal/surgeon/xray.go +++ b/internal/surgeon/xray.go @@ -9,6 +9,7 @@ import ( "bytes" "fmt" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -20,7 +21,7 @@ func NewXRay(path string) XRay { return XRay{path} } -func (n XRay) traverse(stack []guts_cli.Pgid, callback func(page *guts_cli.Page, stack []guts_cli.Pgid) error) error { +func (n XRay) traverse(stack []common.Pgid, callback func(page *common.Page, stack []common.Pgid) error) error { p, data, err := guts_cli.ReadPage(n.path, uint64(stack[len(stack)-1])) if err != nil { return fmt.Errorf("failed reading page (stack %v): %w", stack, err) @@ -29,10 +30,10 @@ func (n XRay) traverse(stack []guts_cli.Pgid, callback func(page *guts_cli.Page, if err != nil { return fmt.Errorf("failed callback for page (stack %v): %w", stack, err) } - switch p.Type() { + switch p.Typ() { case "meta": { - m := guts_cli.LoadPageMeta(data) + m := common.LoadPageMeta(data) r := m.RootBucket().RootPage() return n.traverse(append(stack, r), callback) } @@ -40,7 +41,7 @@ func (n XRay) traverse(stack []guts_cli.Pgid, callback func(page *guts_cli.Page, { for i := uint16(0); i < p.Count(); i++ { bpe := p.BranchPageElement(i) - if err := n.traverse(append(stack, bpe.PgId()), callback); err != nil { + if err := n.traverse(append(stack, bpe.Pgid()), callback); err != nil { return err } } @@ -73,19 +74,19 @@ func (n XRay) traverse(stack []guts_cli.Pgid, callback func(page *guts_cli.Page, // As it traverses multiple buckets, so in theory there might be multiple keys with the given name. // Note: For simplicity it's currently implemented as traversing of the whole reachable tree. // If key is a bucket name, a page-path referencing the key will be returned as well. -func (n XRay) FindPathsToKey(key []byte) ([][]guts_cli.Pgid, error) { - var found [][]guts_cli.Pgid +func (n XRay) FindPathsToKey(key []byte) ([][]common.Pgid, error) { + var found [][]common.Pgid rootPage, _, err := guts_cli.GetRootPage(n.path) if err != nil { return nil, err } - err = n.traverse([]guts_cli.Pgid{rootPage}, - func(page *guts_cli.Page, stack []guts_cli.Pgid) error { - if page.Type() == "leaf" { + err = n.traverse([]common.Pgid{rootPage}, + func(page *common.Page, stack []common.Pgid) error { + if page.Typ() == "leaf" { for i := uint16(0); i < page.Count(); i++ { if bytes.Equal(page.LeafPageElement(i).Key(), key) { - var copyPath []guts_cli.Pgid + var copyPath []common.Pgid copyPath = append(copyPath, stack...) found = append(found, copyPath) } diff --git a/node.go b/node.go index 9c56150d8..976934504 100644 --- a/node.go +++ b/node.go @@ -5,6 +5,8 @@ import ( "fmt" "sort" "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,7 +16,7 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes inodes inodes @@ -38,7 +40,7 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) @@ -50,7 +52,7 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) @@ -64,9 +66,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -113,9 +115,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -126,7 +128,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey) if !exact { n.inodes = append(n.inodes, inode{}) copy(n.inodes[index+1:], n.inodes[index:]) @@ -137,7 +139,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { inode.key = newKey inode.value = value inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + common.Assert(len(inode.key) > 0, "put: zero-length inode key") } // del removes a key from the node. @@ -158,30 +160,30 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = (p.Flags() & common.LeafPageFlag) != 0 + n.inodes = make(inodes, int(p.Count())) - for i := 0; i < int(p.count); i++ { + for i := 0; i < int(p.Count()); i++ { inode := &n.inodes[i] if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() + elem := p.LeafPageElement(uint16(i)) + inode.flags = elem.Flags() + inode.key = elem.Key() + inode.value = elem.Value() } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() + elem := p.BranchPageElement(uint16(i)) + inode.pgid = elem.Pgid() + inode.key = elem.Key() } - _assert(len(inode.key) > 0, "read: zero-length inode key") + common.Assert(len(inode.key) > 0, "read: zero-length inode key") } - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,23 +192,23 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } @@ -214,27 +216,27 @@ func (n *node) write(p *page) { // off tracks the offset into p of the start of the next data. off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") + common.Assert(len(item.key) > 0, "write: zero-length inode key") // Create a slice to write into of needed size and advance // byte pointer for next iteration. sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + b := common.UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) off += uintptr(sz) // Write the page element. if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.flags) + elem.SetKsize(uint32(len(item.key))) + elem.SetVsize(uint32(len(item.value))) } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.key))) + elem.SetPgid(item.pgid) + common.Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") } // Write data for the element to the end of the page. @@ -273,7 +275,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +315,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +362,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +373,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -387,7 +389,7 @@ func (n *node) spill() error { node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -457,11 +459,11 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") // Destination node is right sibling if idx == 0, otherwise left sibling. var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { target = n.nextSibling() } else { @@ -525,7 +527,7 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { @@ -534,7 +536,7 @@ func (n *node) dereference() { key := make([]byte, len(inode.key)) copy(key, inode.key) inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + common.Assert(len(inode.key) > 0, "dereference: zero-length inode key") value := make([]byte, len(inode.value)) copy(value, inode.value) @@ -553,7 +555,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -602,7 +604,7 @@ func (s nodes) Less(i, j int) bool { // to an element which hasn't been added to a page yet. type inode struct { flags uint32 - pgid pgid + pgid common.Pgid key []byte value []byte } diff --git a/node_test.go b/node_test.go index eea4d2582..6d286e91b 100644 --- a/node_test.go +++ b/node_test.go @@ -3,15 +3,19 @@ package bbolt import ( "testing" "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // Ensure that a node can insert a key/value. func TestNode_put(t *testing.T) { - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}} + m := &common.Meta{} + m.SetPgid(1) + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: m}}} n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) - n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag) + n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, common.LeafPageFlag) if len(n.inodes) != 3 { t.Fatalf("exp=3; got=%d", len(n.inodes)) @@ -25,7 +29,7 @@ func TestNode_put(t *testing.T) { if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if n.inodes[2].flags != uint32(leafPageFlag) { + if n.inodes[2].flags != uint32(common.LeafPageFlag) { t.Fatalf("not a leaf: %d", n.inodes[2].flags) } } @@ -34,18 +38,19 @@ func TestNode_put(t *testing.T) { func TestNode_read_LeafPage(t *testing.T) { // Create a page. var buf [4096]byte - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = leafPageFlag - page.count = 2 + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.LeafPageFlag) + page.SetCount(2) // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 - nodes := (*[3]leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) - nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2 - nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4 + nodes := page.LeafPageElements() + //nodes := (*[3]leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) + nodes[0] = *common.NewLeafPageElement(0, 32, 3, 4) // pos = sizeof(leafPageElement) * 2 + nodes[1] = *common.NewLeafPageElement(0, 23, 10, 3) // pos = sizeof(leafPageElement) + 3 + 4 // Write data for the nodes at the end. const s = "barfoozhelloworldbye" - data := unsafeByteSlice(unsafe.Pointer(&nodes[2]), 0, 0, len(s)) + data := common.UnsafeByteSlice(unsafe.Pointer(uintptr(unsafe.Pointer(page))+unsafe.Sizeof(*page)+common.LeafPageElementSize*2), 0, 0, len(s)) copy(data, s) // Deserialize page into a leaf. @@ -70,14 +75,16 @@ func TestNode_read_LeafPage(t *testing.T) { // Ensure that a node can serialize into a leaf page. func TestNode_write_LeafPage(t *testing.T) { // Create a node. - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + m := &common.Meta{} + m.SetPgid(1) + n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) // Write it to a page. var buf [4096]byte - p := (*page)(unsafe.Pointer(&buf[0])) + p := (*common.Page)(unsafe.Pointer(&buf[0])) n.write(p) // Read the page back in. @@ -102,7 +109,9 @@ func TestNode_write_LeafPage(t *testing.T) { // Ensure that a node can split into appropriate subgroups. func TestNode_split(t *testing.T) { // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + m := &common.Meta{} + m.SetPgid(1) + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) @@ -127,7 +136,9 @@ func TestNode_split(t *testing.T) { // Ensure that a page with the minimum number of inodes just returns a single node. func TestNode_split_MinKeys(t *testing.T) { // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + m := &common.Meta{} + m.SetPgid(1) + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) @@ -141,7 +152,9 @@ func TestNode_split_MinKeys(t *testing.T) { // Ensure that a node that has keys that all fit on a page just returns one leaf. func TestNode_split_SinglePage(t *testing.T) { // Create a node. - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}} + m := &common.Meta{} + m.SetPgid(1) + n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) diff --git a/page.go b/page.go deleted file mode 100644 index 379645c97..000000000 --- a/page.go +++ /dev/null @@ -1,214 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - var elems []leafPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - var elems []branchPageElement - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - unsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/page_test.go b/page_test.go deleted file mode 100644 index 9f5b7c0d1..000000000 --- a/page_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package bbolt - -import ( - "reflect" - "sort" - "testing" - "testing/quick" -) - -// Ensure that the page type can be returned in human readable format. -func TestPage_typ(t *testing.T) { - if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" { - t.Fatalf("exp=branch; got=%v", typ) - } - if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" { - t.Fatalf("exp=leaf; got=%v", typ) - } - if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" { - t.Fatalf("exp=meta; got=%v", typ) - } - if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" { - t.Fatalf("exp=freelist; got=%v", typ) - } - if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" { - t.Fatalf("exp=unknown<4e20>; got=%v", typ) - } -} - -// Ensure that the hexdump debugging function doesn't blow up. -func TestPage_dump(t *testing.T) { - (&page{id: 256}).hexdump(16) -} - -func TestPgids_merge(t *testing.T) { - a := pgids{4, 5, 6, 10, 11, 12, 13, 27} - b := pgids{1, 3, 8, 9, 25, 30} - c := a.merge(b) - if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) { - t.Errorf("mismatch: %v", c) - } - - a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} - b = pgids{8, 9, 25, 30} - c = a.merge(b) - if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) { - t.Errorf("mismatch: %v", c) - } -} - -func TestPgids_merge_quick(t *testing.T) { - if err := quick.Check(func(a, b pgids) bool { - // Sort incoming lists. - sort.Sort(a) - sort.Sort(b) - - // Merge the two lists together. - got := a.merge(b) - - // The expected value should be the two lists combined and sorted. - exp := append(a, b...) - sort.Sort(exp) - - if !reflect.DeepEqual(exp, got) { - t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) - return false - } - - return true - }, nil); err != nil { - t.Fatal(err) - } -} diff --git a/tx.go b/tx.go index 2fac8c0a7..343644235 100644 --- a/tx.go +++ b/tx.go @@ -9,10 +9,9 @@ import ( "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -26,9 +25,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -47,24 +46,24 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -74,7 +73,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -140,11 +139,11 @@ func (tx *Tx) OnCommit(fn func()) { // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return common.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -156,7 +155,7 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() @@ -167,11 +166,11 @@ func (tx *Tx) Commit() error { tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { @@ -180,12 +179,12 @@ func (tx *Tx) Commit() error { return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if tx.meta.Pgid() > opgid { + if err := tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { tx.rollback() return err } @@ -244,7 +243,7 @@ func (tx *Tx) commitFreelist() error { tx.rollback() return err } - tx.meta.freelist = p.id + tx.meta.SetFreelist(p.Id()) return nil } @@ -252,9 +251,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return common.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -266,7 +265,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.rollback(tx.meta.Txid()) } tx.close() } @@ -277,7 +276,7 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { @@ -287,7 +286,7 @@ func (tx *Tx) rollback() { tx.db.freelist.noSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -352,13 +351,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -366,9 +365,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { @@ -408,14 +407,14 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -427,18 +426,18 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. @@ -447,7 +446,7 @@ func (tx *Tx) write() error { if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { return err @@ -469,7 +468,7 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { if err := fdatasync(tx.db); err != nil { return err } @@ -479,11 +478,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -500,13 +499,13 @@ func (tx *Tx) writeMeta() error { // Create a temporary buffer for the meta page. buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { return err } - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { if err := fdatasync(tx.db); err != nil { return err } @@ -520,69 +519,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if (p.Flags() & common.BranchPageFlag) != 0 { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, common.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, common.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/tx_check.go b/tx_check.go index 75c7c0843..2ee03ec62 100644 --- a/tx_check.go +++ b/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -37,8 +39,8 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.count()) tx.db.freelist.copyall(all) for _, id := range all { if freed[id] { @@ -48,12 +50,12 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } @@ -61,7 +63,7 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { _, isReachable := reachable[i] if !isReachable && !freed[i] { ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) @@ -72,22 +74,22 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { close(ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) checkBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { // Ignore inline buckets. - if b.root == 0 { + if b.RootPage() == 0 { return } // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + b.tx.forEachPage(b.RootPage(), func(p *common.Page, _ int, stack []common.Pgid) { + if p.Id() > tx.meta.Pgid() { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(b.tx.meta.Pgid()), stack) } // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i if _, ok := reachable[id]; ok { ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) } @@ -95,14 +97,14 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo } // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if (p.Flags()&common.BranchPageFlag) == 0 && (p.Flags()&common.LeafPageFlag) == 0 { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) } }) - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.recursivelyCheckPages(b.RootPage(), kvStringer.KeyToString, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { @@ -117,7 +119,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { +func (tx *Tx) recursivelyCheckPages(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) } @@ -127,36 +129,36 @@ func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.Flags()&common.BranchPageFlag != 0: // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.Flags()&common.LeafPageFlag != 0: runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) @@ -168,7 +170,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) diff --git a/tx_test.go b/tx_test.go index fa8302d58..44cbbf13c 100644 --- a/tx_test.go +++ b/tx_test.go @@ -15,6 +15,7 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" ) // TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database. @@ -84,7 +85,7 @@ func TestTx_Commit_ErrTxClosed(t *testing.T) { t.Fatal(err) } - if err := tx.Commit(); err != bolt.ErrTxClosed { + if err := tx.Commit(); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -101,7 +102,7 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if err := tx.Rollback(); err != bolt.ErrTxClosed { + if err := tx.Rollback(); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -113,7 +114,7 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) { if err != nil { t.Fatal(err) } - if err := tx.Commit(); err != bolt.ErrTxNotWritable { + if err := tx.Commit(); err != common.ErrTxNotWritable { t.Fatal(err) } // Close the view transaction @@ -165,7 +166,7 @@ func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.View(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("foo")) - if err != bolt.ErrTxNotWritable { + if err != common.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -185,7 +186,7 @@ func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { t.Fatal(err) } - if _, err := tx.CreateBucket([]byte("foo")); err != bolt.ErrTxClosed { + if _, err := tx.CreateBucket([]byte("foo")); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -293,11 +294,11 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) { func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists([]byte{}); err != bolt.ErrBucketNameRequired { + if _, err := tx.CreateBucketIfNotExists([]byte{}); err != common.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } - if _, err := tx.CreateBucketIfNotExists(nil); err != bolt.ErrBucketNameRequired { + if _, err := tx.CreateBucketIfNotExists(nil); err != common.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } @@ -323,7 +324,7 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { // Create the same bucket again. if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != bolt.ErrBucketExists { + if _, err := tx.CreateBucket([]byte("widgets")); err != common.ErrBucketExists { t.Fatalf("unexpected error: %s", err) } return nil @@ -336,7 +337,7 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket(nil); err != bolt.ErrBucketNameRequired { + if _, err := tx.CreateBucket(nil); err != common.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } return nil @@ -401,7 +402,7 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { if err := tx.Commit(); err != nil { t.Fatal(err) } - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxClosed { + if err := tx.DeleteBucket([]byte("foo")); err != common.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -410,7 +411,7 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { func TestTx_DeleteBucket_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.View(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("foo")); err != bolt.ErrTxNotWritable { + if err := tx.DeleteBucket([]byte("foo")); err != common.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -423,7 +424,7 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) { func TestTx_DeleteBucket_NotFound(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != bolt.ErrBucketNotFound { + if err := tx.DeleteBucket([]byte("widgets")); err != common.ErrBucketNotFound { t.Fatalf("unexpected error: %s", err) } return nil diff --git a/unsafe.go b/unsafe.go deleted file mode 100644 index c0e503750..000000000 --- a/unsafe.go +++ /dev/null @@ -1,39 +0,0 @@ -package bbolt - -import ( - "reflect" - "unsafe" -) - -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset) -} - -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { - return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) -} - -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { - // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices - // - // This memory is not allocated from C, but it is unmanaged by Go's - // garbage collector and should behave similarly, and the compiler - // should produce similar code. Note that this conversion allows a - // subslice to begin after the base address, with an optional offset, - // while the URL above does not cover this case and only slices from - // index 0. However, the wiki never says that the address must be to - // the beginning of a C allocation (or even that malloc was used at - // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] -} - -// unsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func unsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} From 852b1df32f5865e5bbee9c0fb5fa96ac50529917 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 15 Feb 2023 10:24:47 +0800 Subject: [PATCH 011/439] refactor: move inode to internal/common package Signed-off-by: Benjamin Wang --- bucket.go | 6 +-- cursor.go | 12 ++--- internal/common/inode.go | 45 ++++++++++++++++ node.go | 108 +++++++++++++++++---------------------- node_test.go | 30 +++++------ 5 files changed, 117 insertions(+), 84 deletions(-) create mode 100644 internal/common/inode.go diff --git a/bucket.go b/bucket.go index 0950f77ea..b5a796deb 100644 --- a/bucket.go +++ b/bucket.go @@ -523,7 +523,7 @@ func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.P } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -602,9 +602,9 @@ func (b *Bucket) inlineable() bool { // our threshold for inline bucket size. var size = common.PageHeaderSize for _, inode := range n.inodes { - size += common.LeafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&common.BucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false diff --git a/cursor.go b/cursor.go index f08da545b..209c960dc 100644 --- a/cursor.go +++ b/cursor.go @@ -176,7 +176,7 @@ func (c *Cursor) goToFirstElementOnTheStack() { // Keep adding pages pointing to the first element to the stack. var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } @@ -197,7 +197,7 @@ func (c *Cursor) last() { // Keep adding pages pointing to the last element in the stack. var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } @@ -296,7 +296,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -308,7 +308,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } func (c *Cursor) searchPage(key []byte, p *common.Page) { @@ -342,7 +342,7 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return @@ -368,7 +368,7 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. diff --git a/internal/common/inode.go b/internal/common/inode.go new file mode 100644 index 000000000..f8711cf15 --- /dev/null +++ b/internal/common/inode.go @@ -0,0 +1,45 @@ +package common + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} diff --git a/node.go b/node.go index 976934504..b97028d53 100644 --- a/node.go +++ b/node.go @@ -19,7 +19,7 @@ type node struct { pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -43,7 +43,7 @@ func (n *node) size() int { sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -55,7 +55,7 @@ func (n *node) sizeLessThan(v uintptr) bool { sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -76,12 +76,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -125,30 +125,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - common.Assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -163,26 +163,26 @@ func (n *node) del(key []byte) { func (n *node) read(p *common.Page) { n.pgid = p.Id() n.isLeaf = (p.Flags() & common.LeafPageFlag) != 0 - n.inodes = make(inodes, int(p.Count())) + n.inodes = make(common.Inodes, int(p.Count())) for i := 0; i < int(p.Count()); i++ { inode := &n.inodes[i] if n.isLeaf { elem := p.LeafPageElement(uint16(i)) - inode.flags = elem.Flags() - inode.key = elem.Key() - inode.value = elem.Value() + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) } else { elem := p.BranchPageElement(uint16(i)) - inode.pgid = elem.Pgid() - inode.key = elem.Key() + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) } - common.Assert(len(inode.key) > 0, "read: zero-length inode key") + common.Assert(len(inode.Key()) > 0, "read: zero-length inode key") } // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key + n.key = n.inodes[0].Key() common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil @@ -216,11 +216,11 @@ func (n *node) write(p *common.Page) { // off tracks the offset into p of the start of the next data. off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) for i, item := range n.inodes { - common.Assert(len(item.key) > 0, "write: zero-length inode key") + common.Assert(len(item.Key()) > 0, "write: zero-length inode key") // Create a slice to write into of needed size and advance // byte pointer for next iteration. - sz := len(item.key) + len(item.value) + sz := len(item.Key()) + len(item.Value()) b := common.UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) off += uintptr(sz) @@ -228,20 +228,20 @@ func (n *node) write(p *common.Page) { if n.isLeaf { elem := p.LeafPageElement(uint16(i)) elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) - elem.SetFlags(item.flags) - elem.SetKsize(uint32(len(item.key))) - elem.SetVsize(uint32(len(item.value))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) } else { elem := p.BranchPageElement(uint16(i)) elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) - elem.SetKsize(uint32(len(item.key))) - elem.SetPgid(item.pgid) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) common.Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") } // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) + l := copy(b, item.Key()) + copy(b[l:], item.Value()) } // DEBUG ONLY: n.dump() @@ -321,7 +321,7 @@ func (n *node) splitIndex(threshold int) (index, sz uintptr) { for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. @@ -384,11 +384,11 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() common.Assert(len(node.key) > 0, "spill: zero-length node key") } @@ -428,14 +428,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -474,7 +474,7 @@ func (n *node) rebalance() { if useNextSibling { // Reparent all child nodes being moved. for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent.removeChild(child) child.parent = n child.parent.children = append(child.parent.children, child) @@ -490,7 +490,7 @@ func (n *node) rebalance() { } else { // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent.removeChild(child) child.parent = target child.parent.children = append(child.parent.children, child) @@ -533,14 +533,14 @@ func (n *node) dereference() { for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - common.Assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -596,17 +596,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid common.Pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/node_test.go b/node_test.go index 6d286e91b..ce36bf069 100644 --- a/node_test.go +++ b/node_test.go @@ -11,7 +11,7 @@ import ( func TestNode_put(t *testing.T) { m := &common.Meta{} m.SetPgid(1) - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: m}}} + n := &node{inodes: make(common.Inodes, 0), bucket: &Bucket{tx: &Tx{meta: m}}} n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0) n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0) n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0) @@ -20,17 +20,17 @@ func TestNode_put(t *testing.T) { if len(n.inodes) != 3 { t.Fatalf("exp=3; got=%d", len(n.inodes)) } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" { + if k, v := n.inodes[0].Key(), n.inodes[0].Value(); string(k) != "bar" || string(v) != "1" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" { + if k, v := n.inodes[1].Key(), n.inodes[1].Value(); string(k) != "baz" || string(v) != "2" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" { + if k, v := n.inodes[2].Key(), n.inodes[2].Value(); string(k) != "foo" || string(v) != "3" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if n.inodes[2].flags != uint32(common.LeafPageFlag) { - t.Fatalf("not a leaf: %d", n.inodes[2].flags) + if n.inodes[2].Flags() != uint32(common.LeafPageFlag) { + t.Fatalf("not a leaf: %d", n.inodes[2].Flags()) } } @@ -64,10 +64,10 @@ func TestNode_read_LeafPage(t *testing.T) { if len(n.inodes) != 2 { t.Fatalf("exp=2; got=%d", len(n.inodes)) } - if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" { + if k, v := n.inodes[0].Key(), n.inodes[0].Value(); string(k) != "bar" || string(v) != "fooz" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" { + if k, v := n.inodes[1].Key(), n.inodes[1].Value(); string(k) != "helloworld" || string(v) != "bye" { t.Fatalf("exp=; got=<%s,%s>", k, v) } } @@ -77,7 +77,7 @@ func TestNode_write_LeafPage(t *testing.T) { // Create a node. m := &common.Meta{} m.SetPgid(1) - n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} + n := &node{isLeaf: true, inodes: make(common.Inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0) n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0) n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0) @@ -95,13 +95,13 @@ func TestNode_write_LeafPage(t *testing.T) { if len(n2.inodes) != 3 { t.Fatalf("exp=3; got=%d", len(n2.inodes)) } - if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" { + if k, v := n2.inodes[0].Key(), n2.inodes[0].Value(); string(k) != "john" || string(v) != "johnson" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" { + if k, v := n2.inodes[1].Key(), n2.inodes[1].Value(); string(k) != "ricki" || string(v) != "lake" { t.Fatalf("exp=; got=<%s,%s>", k, v) } - if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" { + if k, v := n2.inodes[2].Key(), n2.inodes[2].Value(); string(k) != "susy" || string(v) != "que" { t.Fatalf("exp=; got=<%s,%s>", k, v) } } @@ -111,7 +111,7 @@ func TestNode_split(t *testing.T) { // Create a node. m := &common.Meta{} m.SetPgid(1) - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} + n := &node{inodes: make(common.Inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) @@ -138,7 +138,7 @@ func TestNode_split_MinKeys(t *testing.T) { // Create a node. m := &common.Meta{} m.SetPgid(1) - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} + n := &node{inodes: make(common.Inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) @@ -154,7 +154,7 @@ func TestNode_split_SinglePage(t *testing.T) { // Create a node. m := &common.Meta{} m.SetPgid(1) - n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} + n := &node{inodes: make(common.Inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: m}}} n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0) n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0) From f68adfee81834c5008e828b0bd418942400cc651 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 4 Mar 2023 04:58:53 +0800 Subject: [PATCH 012/439] CMD: update leafPageElement to return both key and value directly Signed-off-by: Benjamin Wang --- cmd/bbolt/main.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a06dd0812..d78eea5af 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -502,16 +502,17 @@ func (cmd *pageItemCommand) Run(args ...string) error { return nil } -func (cmd *pageItemCommand) validateLeafPage(pageBytes []byte, index uint16) (*common.Page, error) { +func (cmd *pageItemCommand) leafPageElement(pageBytes []byte, index uint16) ([]byte, []byte, error) { p := common.LoadPage(pageBytes) if index >= p.Count() { - return nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d", p.Count(), index) + return nil, nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d", p.Count(), index) } if p.Typ() != "leaf" { - return nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Typ()) + return nil, nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Typ()) } - return p, nil + e := p.LeafPageElement(index) + return e.Key(), e.Value(), nil } const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted" @@ -561,22 +562,21 @@ func writelnBytes(w io.Writer, b []byte, format string) error { // PrintLeafItemKey writes the bytes of a leaf element's key. func (cmd *pageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error { - p, err := cmd.validateLeafPage(pageBytes, index) + k, _, err := cmd.leafPageElement(pageBytes, index) if err != nil { return err } - e := p.LeafPageElement(index) - return writelnBytes(w, e.Key(), format) + + return writelnBytes(w, k, format) } // PrintLeafItemValue writes the bytes of a leaf element's value. func (cmd *pageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error { - p, err := cmd.validateLeafPage(pageBytes, index) + _, v, err := cmd.leafPageElement(pageBytes, index) if err != nil { return err } - e := p.LeafPageElement(index) - return writelnBytes(w, e.Value(), format) + return writelnBytes(w, v, format) } // Usage returns the help message. From 3be1fef0d313c882401dccf68b2a8d0ca776fc7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Mar 2023 15:11:52 +0000 Subject: [PATCH 013/439] Bump golang.org/x/sys from 0.5.0 to 0.6.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.5.0 to 0.6.0. - [Release notes](https://github.com/golang/sys/releases) - [Commits](https://github.com/golang/sys/compare/v0.5.0...v0.6.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 80ce15aaf..a69495e97 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.17 require ( github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 - golang.org/x/sys v0.5.0 + golang.org/x/sys v0.6.0 ) require ( diff --git a/go.sum b/go.sum index f8c867bf7..ed1ee3861 100644 --- a/go.sum +++ b/go.sum @@ -13,8 +13,8 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From a3a9877de629e1e63f60b26f0d8f094355b3e122 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 8 Mar 2023 10:46:18 +0800 Subject: [PATCH 014/439] encapsulate the logic of checking the page type Signed-off-by: Benjamin Wang --- bucket.go | 6 +++--- cursor.go | 4 ++-- freelist.go | 4 ++-- internal/common/page.go | 24 ++++++++++++++++++++---- node.go | 2 +- tx.go | 2 +- tx_check.go | 6 +++--- 7 files changed, 32 insertions(+), 16 deletions(-) diff --git a/bucket.go b/bucket.go index b5a796deb..52f9790ec 100644 --- a/bucket.go +++ b/bucket.go @@ -410,7 +410,7 @@ func (b *Bucket) Stats() BucketStats { s.InlineBucketN += 1 } b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { - if (p.Flags() & common.LeafPageFlag) != 0 { + if p.IsLeafPage() { s.KeyN += int(p.Count()) // used totals the used bytes for the page @@ -450,7 +450,7 @@ func (b *Bucket) Stats() BucketStats { } } } - } else if (p.Flags() & common.BranchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ lastElement := p.BranchPageElement(p.Count() - 1) @@ -514,7 +514,7 @@ func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.P // Recursively loop over children. if p != nil { - if (p.Flags() & common.BranchPageFlag) != 0 { + if p.IsBranchPage() { for i := 0; i < int(p.Count()); i++ { elem := p.BranchPageElement(uint16(i)) b._forEachPageNode(elem.Pgid(), depth+1, fn) diff --git a/cursor.go b/cursor.go index 209c960dc..14556e508 100644 --- a/cursor.go +++ b/cursor.go @@ -272,7 +272,7 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { // search recursively performs a binary search against a given page/node until it finds a given key. func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.Flags()&(common.BranchPageFlag|common.LeafPageFlag)) == 0 { + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} @@ -410,7 +410,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.Flags() & common.LeafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. diff --git a/freelist.go b/freelist.go index dfccc503b..24dfc3e48 100644 --- a/freelist.go +++ b/freelist.go @@ -166,7 +166,7 @@ func (f *freelist) free(txid common.Txid, p *common.Page) { allocTxid, ok := f.allocs[p.Id()] if ok { delete(f.allocs, p.Id()) - } else if (p.Flags() & common.FreelistPageFlag) != 0 { + } else if p.IsFreelistPage() { // Freelist is always allocated by prior tx. allocTxid = txid - 1 } @@ -265,7 +265,7 @@ func (f *freelist) freed(pgId common.Pgid) bool { // read initializes the freelist from a freelist page. func (f *freelist) read(p *common.Page) { - if (p.Flags() & common.FreelistPageFlag) == 0 { + if !p.IsFreelistPage() { panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) } diff --git a/internal/common/page.go b/internal/common/page.go index 0975f738d..cd8abf831 100644 --- a/internal/common/page.go +++ b/internal/common/page.go @@ -45,18 +45,34 @@ func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { // Typ returns a human-readable page type string used for debugging. func (p *Page) Typ() string { - if (p.flags & BranchPageFlag) != 0 { + if p.IsBranchPage() { return "branch" - } else if (p.flags & LeafPageFlag) != 0 { + } else if p.IsLeafPage() { return "leaf" - } else if (p.flags & MetaPageFlag) != 0 { + } else if p.IsMetaPage() { return "meta" - } else if (p.flags & FreelistPageFlag) != 0 { + } else if p.IsFreelistPage() { return "freelist" } return fmt.Sprintf("unknown<%02x>", p.flags) } +func (p *Page) IsBranchPage() bool { + return p.flags&BranchPageFlag != 0 +} + +func (p *Page) IsLeafPage() bool { + return p.flags&LeafPageFlag != 0 +} + +func (p *Page) IsMetaPage() bool { + return p.flags&MetaPageFlag != 0 +} + +func (p *Page) IsFreelistPage() bool { + return p.flags&FreelistPageFlag != 0 +} + // Meta returns a pointer to the metadata section of the page. func (p *Page) Meta() *Meta { return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) diff --git a/node.go b/node.go index b97028d53..5f3518faf 100644 --- a/node.go +++ b/node.go @@ -162,7 +162,7 @@ func (n *node) del(key []byte) { // read initializes the node from a page. func (n *node) read(p *common.Page) { n.pgid = p.Id() - n.isLeaf = (p.Flags() & common.LeafPageFlag) != 0 + n.isLeaf = p.IsLeafPage() n.inodes = make(common.Inodes, int(p.Count())) for i := 0; i < int(p.Count()); i++ { diff --git a/tx.go b/tx.go index 343644235..67362b66d 100644 --- a/tx.go +++ b/tx.go @@ -548,7 +548,7 @@ func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.Flags() & common.BranchPageFlag) != 0 { + if p.IsBranchPage() { for i := 0; i < int(p.Count()); i++ { elem := p.BranchPageElement(uint16(i)) tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) diff --git a/tx_check.go b/tx_check.go index 2ee03ec62..ee72cda22 100644 --- a/tx_check.go +++ b/tx_check.go @@ -99,7 +99,7 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[common.Pgid]*common.Page, fre // We should only encounter un-freed leaf and branch pages. if freed[p.Id()] { ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) - } else if (p.Flags()&common.BranchPageFlag) == 0 && (p.Flags()&common.LeafPageFlag) == 0 { + } else if !p.IsBranchPage() && !p.IsLeafPage() { ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) } }) @@ -135,7 +135,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.Flags()&common.BranchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed for i := range p.BranchPageElements() { @@ -150,7 +150,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.Flags()&common.LeafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed for i := range p.LeafPageElements() { elem := p.LeafPageElement(uint16(i)) From 4e41eab093af93da429a365c9e39e70da0a3a943 Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 9 Mar 2023 11:28:24 +1300 Subject: [PATCH 015/439] Updated go to 1.19.7 Go 1.17 is out of support, this brings bbolt in line with other etcd maintained projects and addresses CVEs with older versions of golang. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 4 ++-- go.mod | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 37f36819b..d8d556575 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: "1.17.13" + go-version: "1.19.7" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 30eed1c74..4326d40f7 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: "1.17.13" + go-version: "1.19.7" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: "1.17.13" + go-version: "1.19.7" - run: make fmt - env: TARGET: ${{ matrix.target }} diff --git a/go.mod b/go.mod index a69495e97..3602e0697 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module go.etcd.io/bbolt -go 1.17 +go 1.19 require ( github.com/stretchr/testify v1.8.2 From ab889e1dfa532249ae82b8462de6e3d4a240912a Mon Sep 17 00:00:00 2001 From: zhangsong Date: Fri, 2 Jul 2021 20:20:04 +0800 Subject: [PATCH 016/439] display progress during benchmark Signed-off-by: Cenk Alti --- cmd/bbolt/main.go | 123 ++++++++++++++++++++++++---------------------- 1 file changed, 63 insertions(+), 60 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index d78eea5af..ead1d0d77 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1077,7 +1077,7 @@ func (cmd *benchCommand) Run(args ...string) error { // Remove path if "-work" is not set. Otherwise keep path. if options.Work { - fmt.Fprintf(cmd.Stdout, "work: %s\n", options.Path) + fmt.Fprintf(cmd.Stderr, "work: %s\n", options.Path) } else { defer os.Remove(options.Path) } @@ -1091,19 +1091,22 @@ func (cmd *benchCommand) Run(args ...string) error { defer db.Close() // Write to the database. - var results BenchResults - if err := cmd.runWrites(db, options, &results); err != nil { + writeResults := BenchResults{int64(0), 0} + fmt.Fprintf(cmd.Stderr, "starting write benchmark.\n") + if err := cmd.runWrites(db, options, &writeResults); err != nil { return fmt.Errorf("write: %v", err) } + readResults := BenchResults{int64(0), 0} + fmt.Fprintf(cmd.Stderr, "starting read benchmark.\n") // Read from the database. - if err := cmd.runReads(db, options, &results); err != nil { + if err := cmd.runReads(db, options, &readResults); err != nil { return fmt.Errorf("bench: read: %s", err) } // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps, writeResults.Duration, writeResults.OpDuration(), writeResults.OpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps, readResults.Duration, readResults.OpDuration(), readResults.OpsPerSecond()) fmt.Fprintln(os.Stderr, "") return nil } @@ -1117,8 +1120,8 @@ func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") fs.StringVar(&options.WriteMode, "write-mode", "seq", "") fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.IntVar(&options.Iterations, "count", 1000, "") - fs.IntVar(&options.BatchSize, "batch-size", 0, "") + fs.Int64Var(&options.Iterations, "count", 1000, "") + fs.Int64Var(&options.BatchSize, "batch-size", 0, "") fs.IntVar(&options.KeySize, "key-size", 8, "") fs.IntVar(&options.ValueSize, "value-size", 32, "") fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") @@ -1162,6 +1165,10 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * cmd.startProfiling(options) } + finishChan := make(chan interface{}) + go checkProgress(results, finishChan, cmd.Stderr) + defer close(finishChan) + t := time.Now() var err error @@ -1179,7 +1186,7 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * } // Save time to write. - results.WriteDuration = time.Since(t) + results.Duration = time.Since(t) // Stop profiling for writes only. if options.ProfileMode == "w" { @@ -1210,14 +1217,12 @@ func (cmd *benchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOption } func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { + for i := int64(0); i < options.Iterations; i += options.BatchSize { if err := db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists(benchBucketName) b.FillPercent = options.FillPercent - for j := 0; j < options.BatchSize; j++ { + for j := int64(0); j < options.BatchSize; j++ { key := make([]byte, options.KeySize) value := make([]byte, options.ValueSize) @@ -1233,15 +1238,15 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, return nil }); err != nil { return err + } else { + results.CompletedOps += options.BatchSize } } return nil } func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { - results.WriteOps = options.Iterations - - for i := 0; i < options.Iterations; i += options.BatchSize { + for i := int64(0); i < options.Iterations; i += options.BatchSize { if err := db.Update(func(tx *bolt.Tx) error { top, err := tx.CreateBucketIfNotExists(benchBucketName) if err != nil { @@ -1260,7 +1265,7 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp } b.FillPercent = options.FillPercent - for j := 0; j < options.BatchSize; j++ { + for j := int64(0); j < options.BatchSize; j++ { var key = make([]byte, options.KeySize) var value = make([]byte, options.ValueSize) @@ -1276,6 +1281,8 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp return nil }); err != nil { return err + } else { + results.CompletedOps += options.BatchSize } } return nil @@ -1288,6 +1295,10 @@ func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B cmd.startProfiling(options) } + finishChan := make(chan interface{}) + go checkProgress(results, finishChan, cmd.Stderr) + defer close(finishChan) + t := time.Now() var err error @@ -1304,7 +1315,7 @@ func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B } // Save read time. - results.ReadDuration = time.Since(t) + results.Duration = time.Since(t) // Stop profiling for reads. if options.ProfileMode == "rw" || options.ProfileMode == "r" { @@ -1319,22 +1330,19 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, t := time.Now() for { - var count int c := tx.Bucket(benchBucketName).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { if v == nil { return errors.New("invalid value") } - count++ + results.CompletedOps++ } - if options.WriteMode == "seq" && count != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + if options.WriteMode == "seq" && results.CompletedOps != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, results.CompletedOps) } - results.ReadOps += count - // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1350,7 +1358,6 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt t := time.Now() for { - var count int var top = tx.Bucket(benchBucketName) if err := top.ForEach(func(name, _ []byte) error { if b := top.Bucket(name); b != nil { @@ -1359,7 +1366,7 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt if v == nil { return ErrInvalidValue } - count++ + results.CompletedOps++ } } return nil @@ -1367,12 +1374,10 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt return err } - if options.WriteMode == "seq-nest" && count != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, count) + if options.WriteMode == "seq-nest" && results.CompletedOps != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, results.CompletedOps) } - results.ReadOps += count - // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1383,6 +1388,23 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt }) } +func checkProgress(results *BenchResults, finishChan chan interface{}, stderr io.Writer) { + ticker := time.Tick(time.Second) + lastCompleted, lastTime := int64(0), time.Now() + for { + select { + case <-finishChan: + return + case t := <-ticker: + completed, taken := results.CompletedOps, t.Sub(lastTime) + fmt.Fprintf(stderr, "Completed %d requests, %d/s \n", + completed, ((completed-lastCompleted)*int64(time.Second))/int64(taken), + ) + lastCompleted, lastTime = completed, t + } + } +} + // File handlers for the various profiles. var cpuprofile, memprofile, blockprofile *os.File @@ -1458,8 +1480,8 @@ type BenchOptions struct { ProfileMode string WriteMode string ReadMode string - Iterations int - BatchSize int + Iterations int64 + BatchSize int64 KeySize int ValueSize int CPUProfile string @@ -1474,40 +1496,21 @@ type BenchOptions struct { // BenchResults represents the performance results of the benchmark. type BenchResults struct { - WriteOps int - WriteDuration time.Duration - ReadOps int - ReadDuration time.Duration -} - -// Returns the duration for a single write operation. -func (r *BenchResults) WriteOpDuration() time.Duration { - if r.WriteOps == 0 { - return 0 - } - return r.WriteDuration / time.Duration(r.WriteOps) -} - -// Returns average number of write operations that can be performed per second. -func (r *BenchResults) WriteOpsPerSecond() int { - var op = r.WriteOpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) + CompletedOps int64 + Duration time.Duration } -// Returns the duration for a single read operation. -func (r *BenchResults) ReadOpDuration() time.Duration { - if r.ReadOps == 0 { +// Returns the duration for a single read/write operation. +func (r *BenchResults) OpDuration() time.Duration { + if r.CompletedOps == 0 { return 0 } - return r.ReadDuration / time.Duration(r.ReadOps) + return r.Duration / time.Duration(r.CompletedOps) } -// Returns average number of read operations that can be performed per second. -func (r *BenchResults) ReadOpsPerSecond() int { - var op = r.ReadOpDuration() +// Returns average number of read/write operations that can be performed per second. +func (r *BenchResults) OpsPerSecond() int { + var op = r.OpDuration() if op == 0 { return 0 } From af055376f67f4a68fa39880774d58c7dc1a7a37d Mon Sep 17 00:00:00 2001 From: Sarthak Date: Fri, 19 Aug 2022 14:18:55 +0000 Subject: [PATCH 017/439] Add example for Bucket.Delete() in README.md The example is similar to the examples for `Bucket.Put()` and `Bucket.Get()` functions, so it does not breaks the layout. Signed-off-by: Sarthak2143 . Co-authored-by: Benjamin Wang Signed-off-by: Cenk Alti --- README.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2be669a60..3ff830499 100644 --- a/README.md +++ b/README.md @@ -336,7 +336,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction From 44cddc6aaf2d7c20d266c01eddd2e6065c82ac51 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 14 Mar 2023 15:38:08 +0800 Subject: [PATCH 018/439] test: run test cases under ./internal/... Signed-off-by: Benjamin Wang --- Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Makefile b/Makefile index 18154c638..c9fab8eeb 100644 --- a/Makefile +++ b/Makefile @@ -25,10 +25,12 @@ lint: test: @echo "hashmap freelist test" TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage From 15d561e54fd43b4985e30adbef5b018f34c64576 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 15 Mar 2023 08:44:19 +0800 Subject: [PATCH 019/439] move inode operations(read/write) into package internal/common Signed-off-by: Benjamin Wang --- internal/common/inode.go | 60 ++++++++++++++++++++++++++++++++++++++++ internal/common/page.go | 7 +++++ node.go | 50 ++------------------------------- 3 files changed, 69 insertions(+), 48 deletions(-) diff --git a/internal/common/inode.go b/internal/common/inode.go index f8711cf15..9f99937e7 100644 --- a/internal/common/inode.go +++ b/internal/common/inode.go @@ -1,5 +1,7 @@ package common +import "unsafe" + // Inode represents an internal node inside of a node. // It can be used to point to elements in a page or point // to an element which hasn't been added to a page yet. @@ -43,3 +45,61 @@ func (in *Inode) Value() []byte { func (in *Inode) SetValue(value []byte) { in.value = value } + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} diff --git a/internal/common/page.go b/internal/common/page.go index cd8abf831..504feb8f3 100644 --- a/internal/common/page.go +++ b/internal/common/page.go @@ -162,6 +162,13 @@ func (p *Page) hexdump(n int) { fmt.Fprintf(os.Stderr, "%x\n", buf) } +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + func (p *Page) Id() Pgid { return p.id } diff --git a/node.go b/node.go index 5f3518faf..05504c0c6 100644 --- a/node.go +++ b/node.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "sort" - "unsafe" "go.etcd.io/bbolt/internal/common" ) @@ -163,22 +162,7 @@ func (n *node) del(key []byte) { func (n *node) read(p *common.Page) { n.pgid = p.Id() n.isLeaf = p.IsLeafPage() - n.inodes = make(common.Inodes, int(p.Count())) - - for i := 0; i < int(p.Count()); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.LeafPageElement(uint16(i)) - inode.SetFlags(elem.Flags()) - inode.SetKey(elem.Key()) - inode.SetValue(elem.Value()) - } else { - elem := p.BranchPageElement(uint16(i)) - inode.SetPgid(elem.Pgid()) - inode.SetKey(elem.Key()) - } - common.Assert(len(inode.Key()) > 0, "read: zero-length inode key") - } + n.inodes = common.ReadInodeFromPage(p) // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { @@ -212,37 +196,7 @@ func (n *node) write(p *common.Page) { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - common.Assert(len(item.Key()) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.Key()) + len(item.Value()) - b := common.UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.LeafPageElement(uint16(i)) - elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) - elem.SetFlags(item.Flags()) - elem.SetKsize(uint32(len(item.Key()))) - elem.SetVsize(uint32(len(item.Value()))) - } else { - elem := p.BranchPageElement(uint16(i)) - elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) - elem.SetKsize(uint32(len(item.Key()))) - elem.SetPgid(item.Pgid()) - common.Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.Key()) - copy(b[l:], item.Value()) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } From 62f5a3f2911c533d6df25c343baa33b44db09b6a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 15 Mar 2023 08:34:03 +0800 Subject: [PATCH 020/439] cmd: move function copyFile into utils.go Signed-off-by: Benjamin Wang --- cmd/bbolt/surgery_commands.go | 46 ------------------------------- cmd/bbolt/utils.go | 51 +++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 46 deletions(-) create mode 100644 cmd/bbolt/utils.go diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index ace121fd6..652f21484 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -4,8 +4,6 @@ import ( "errors" "flag" "fmt" - "io" - "os" "strconv" "strings" @@ -64,22 +62,6 @@ func (cmd *surgeryCommand) parsePathsAndCopyFile(fs *flag.FlagSet) error { return errors.New("output file required") } - // Ensure source file exists. - _, err := os.Stat(cmd.srcPath) - if os.IsNotExist(err) { - return ErrFileNotFound - } else if err != nil { - return err - } - - // Ensure output file not exist. - _, err = os.Stat(cmd.dstPath) - if err == nil { - return fmt.Errorf("output file %q already exists", cmd.dstPath) - } else if !os.IsNotExist(err) { - return err - } - // Copy database from SrcPath to DstPath if err := copyFile(cmd.srcPath, cmd.dstPath); err != nil { return fmt.Errorf("failed to copy file: %w", err) @@ -88,34 +70,6 @@ func (cmd *surgeryCommand) parsePathsAndCopyFile(fs *flag.FlagSet) error { return nil } -func copyFile(srcPath, dstPath string) error { - srcDB, err := os.Open(srcPath) - if err != nil { - return fmt.Errorf("failed to open source file %q: %w", srcPath, err) - } - defer srcDB.Close() - dstDB, err := os.Create(dstPath) - if err != nil { - return fmt.Errorf("failed to create output file %q: %w", dstPath, err) - } - defer dstDB.Close() - written, err := io.Copy(dstDB, srcDB) - if err != nil { - return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) - } - - srcFi, err := srcDB.Stat() - if err != nil { - return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) - } - initialSize := srcFi.Size() - if initialSize != written { - return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) - } - - return nil -} - // Usage returns the help message. func (cmd *surgeryCommand) Usage() string { return strings.TrimLeft(` diff --git a/cmd/bbolt/utils.go b/cmd/bbolt/utils.go new file mode 100644 index 000000000..c757e0ce7 --- /dev/null +++ b/cmd/bbolt/utils.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "io" + "os" +) + +func copyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return ErrFileNotFound + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} From c9c264c9d90ee2a371b71615173471ab371d2664 Mon Sep 17 00:00:00 2001 From: mingtingzhang Date: Wed, 15 Mar 2023 00:00:26 -0400 Subject: [PATCH 021/439] modified freelist_hmap/hashmapGetFreePageIDs with better performance Signed-off-by: Cenk Alti --- freelist_hmap.go | 16 ++++++++++++---- freelist_test.go | 49 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+), 4 deletions(-) diff --git a/freelist_hmap.go b/freelist_hmap.go index 6e01bc116..57e1e950b 100644 --- a/freelist_hmap.go +++ b/freelist_hmap.go @@ -80,12 +80,20 @@ func (f *freelist) hashmapGetFreePageIDs() []common.Pgid { } m := make([]common.Pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+common.Pgid(i)) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } } } - sort.Sort(common.Pgids(m)) return m } diff --git a/freelist_test.go b/freelist_test.go index 1fffff2ff..ab848dd1d 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -434,3 +434,52 @@ func newTestFreelist() *freelist { return newFreelist(freelistType) } + +func Test_freelist_hashmapGetFreePageIDs(t *testing.T) { + f := newTestFreelist() + if f.freelistType == common.FreelistArrayType { + t.Skip() + } + + N := int32(100000) + fm := make(map[common.Pgid]uint64) + i := int32(0) + val := int32(0) + for i = 0; i < N; { + val = rand.Int31n(1000) + fm[common.Pgid(i)] = uint64(val) + i += val + } + + f.forwardMap = fm + res := f.hashmapGetFreePageIDs() + + if !sort.SliceIsSorted(res, func(i, j int) bool { return res[i] < res[j] }) { + t.Fatalf("pgids not sorted") + } +} + +func Benchmark_freelist_hashmapGetFreePageIDs(b *testing.B) { + f := newTestFreelist() + if f.freelistType == common.FreelistArrayType { + b.Skip() + } + + N := int32(100000) + fm := make(map[common.Pgid]uint64) + i := int32(0) + val := int32(0) + for i = 0; i < N; { + val = rand.Int31n(1000) + fm[common.Pgid(i)] = uint64(val) + i += val + } + + f.forwardMap = fm + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + f.hashmapGetFreePageIDs() + } +} From ef2d488d996e0214b9950be1aa781e6224087fd1 Mon Sep 17 00:00:00 2001 From: Bin Guo Date: Wed, 15 Mar 2023 00:22:56 -0400 Subject: [PATCH 022/439] Merge redundant codes in node.rebalance Signed-off-by: Cenk Alti --- node.go | 58 +++++++++++++++++++++------------------------------------ 1 file changed, 21 insertions(+), 37 deletions(-) diff --git a/node.go b/node.go index 5f3518faf..a80fce440 100644 --- a/node.go +++ b/node.go @@ -461,51 +461,35 @@ func (n *node) rebalance() { common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.Pgid()]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.Pgid()]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } From f7d0ed5185290b60973dae3d6f9cac123bb26547 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 15 Mar 2023 15:19:24 +0800 Subject: [PATCH 023/439] bbolt: remove `CheckWithOptions` and add variadic parameter options to method Check Signed-off-by: Benjamin Wang --- cmd/bbolt/main.go | 2 +- tx_check.go | 9 +++------ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index ead1d0d77..e6b478015 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -206,7 +206,7 @@ func (cmd *checkCommand) Run(args ...string) error { // Perform consistency check. return db.View(func(tx *bolt.Tx) error { var count int - for err := range tx.CheckWithOptions(bolt.WithKVStringer(CmdKvStringer())) { + for err := range tx.Check(bolt.WithKVStringer(CmdKvStringer())) { fmt.Fprintln(cmd.Stdout, err) count++ } diff --git a/tx_check.go b/tx_check.go index ee72cda22..cc08013e8 100644 --- a/tx_check.go +++ b/tx_check.go @@ -15,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } From 44d4ec54162fd8f1dab054cdf6b54c36267a3f1d Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Fri, 17 Mar 2023 12:15:02 +0100 Subject: [PATCH 024/439] fixing small bench errors BenchResult locking was missing and when the read test was faster than a second it would fail on a different count than expected. Signed-off-by: Thomas Jungblut --- cmd/bbolt/main.go | 77 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 21 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index e6b478015..9db26289f 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -15,6 +15,7 @@ import ( "runtime/pprof" "strconv" "strings" + "sync" "time" "unicode" "unicode/utf8" @@ -1091,13 +1092,13 @@ func (cmd *benchCommand) Run(args ...string) error { defer db.Close() // Write to the database. - writeResults := BenchResults{int64(0), 0} + var writeResults BenchResults fmt.Fprintf(cmd.Stderr, "starting write benchmark.\n") if err := cmd.runWrites(db, options, &writeResults); err != nil { return fmt.Errorf("write: %v", err) } - readResults := BenchResults{int64(0), 0} + var readResults BenchResults fmt.Fprintf(cmd.Stderr, "starting read benchmark.\n") // Read from the database. if err := cmd.runReads(db, options, &readResults); err != nil { @@ -1105,8 +1106,8 @@ func (cmd *benchCommand) Run(args ...string) error { } // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps, writeResults.Duration, writeResults.OpDuration(), writeResults.OpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps, readResults.Duration, readResults.OpDuration(), readResults.OpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) + fmt.Fprintf(os.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) fmt.Fprintln(os.Stderr, "") return nil } @@ -1186,7 +1187,7 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * } // Save time to write. - results.Duration = time.Since(t) + results.SetDuration(time.Since(t)) // Stop profiling for writes only. if options.ProfileMode == "w" { @@ -1239,7 +1240,7 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, }); err != nil { return err } else { - results.CompletedOps += options.BatchSize + results.AddCompletedOps(options.BatchSize) } } return nil @@ -1282,7 +1283,7 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp }); err != nil { return err } else { - results.CompletedOps += options.BatchSize + results.AddCompletedOps(options.BatchSize) } } return nil @@ -1315,7 +1316,7 @@ func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B } // Save read time. - results.Duration = time.Since(t) + results.SetDuration(time.Since(t)) // Stop profiling for reads. if options.ProfileMode == "rw" || options.ProfileMode == "r" { @@ -1330,19 +1331,21 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, t := time.Now() for { - + numReads := int64(0) c := tx.Bucket(benchBucketName).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { + numReads++ if v == nil { return errors.New("invalid value") } - results.CompletedOps++ } - if options.WriteMode == "seq" && results.CompletedOps != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, results.CompletedOps) + if options.WriteMode == "seq" && numReads != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) } + results.AddCompletedOps(numReads) + // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1358,15 +1361,16 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt t := time.Now() for { + numReads := int64(0) var top = tx.Bucket(benchBucketName) if err := top.ForEach(func(name, _ []byte) error { if b := top.Bucket(name); b != nil { c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { + numReads++ if v == nil { return ErrInvalidValue } - results.CompletedOps++ } } return nil @@ -1374,10 +1378,12 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt return err } - if options.WriteMode == "seq-nest" && results.CompletedOps != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, results.CompletedOps) + if options.WriteMode == "seq-nest" && numReads != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) } + results.AddCompletedOps(numReads) + // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1396,7 +1402,7 @@ func checkProgress(results *BenchResults, finishChan chan interface{}, stderr io case <-finishChan: return case t := <-ticker: - completed, taken := results.CompletedOps, t.Sub(lastTime) + completed, taken := results.CompletedOps(), t.Sub(lastTime) fmt.Fprintf(stderr, "Completed %d requests, %d/s \n", completed, ((completed-lastCompleted)*int64(time.Second))/int64(taken), ) @@ -1494,18 +1500,47 @@ type BenchOptions struct { Path string } -// BenchResults represents the performance results of the benchmark. +// BenchResults represents the performance results of the benchmark and is thread-safe. type BenchResults struct { - CompletedOps int64 - Duration time.Duration + m sync.Mutex + completedOps int64 + duration time.Duration +} + +func (r *BenchResults) AddCompletedOps(amount int64) { + r.m.Lock() + defer r.m.Unlock() + + r.completedOps += amount +} + +func (r *BenchResults) CompletedOps() int64 { + r.m.Lock() + defer r.m.Unlock() + + return r.completedOps +} + +func (r *BenchResults) SetDuration(dur time.Duration) { + r.m.Lock() + defer r.m.Unlock() + + r.duration = dur +} + +func (r *BenchResults) Duration() time.Duration { + r.m.Lock() + defer r.m.Unlock() + + return r.duration } // Returns the duration for a single read/write operation. func (r *BenchResults) OpDuration() time.Duration { - if r.CompletedOps == 0 { + if r.CompletedOps() == 0 { return 0 } - return r.Duration / time.Duration(r.CompletedOps) + return r.Duration() / time.Duration(r.CompletedOps()) } // Returns average number of read/write operations that can be performed per second. From 3914515d4f1989390208bd3315c396588cb166c8 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Fri, 17 Mar 2023 16:36:13 +0100 Subject: [PATCH 025/439] Add test for the bench command Also making sure the stdio buffers are thread safe. Signed-off-by: Thomas Jungblut --- cmd/bbolt/main.go | 6 ++-- cmd/bbolt/main_test.go | 68 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 68 insertions(+), 6 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 9db26289f..336b5b458 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1106,9 +1106,9 @@ func (cmd *benchCommand) Run(args ...string) error { } // Print results. - fmt.Fprintf(os.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) - fmt.Fprintf(os.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) - fmt.Fprintln(os.Stderr, "") + fmt.Fprintf(cmd.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) + fmt.Fprintf(cmd.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) + fmt.Fprintln(cmd.Stderr, "") return nil } diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 7d0cfd249..c6ac96f27 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -9,6 +9,8 @@ import ( "math/rand" "os" "strconv" + "strings" + "sync" "testing" "go.etcd.io/bbolt/internal/btesting" @@ -286,12 +288,72 @@ func TestPagesCommand_Run(t *testing.T) { require.NoError(t, err) } +// Ensure the "bench" command runs and exits without errors +func TestBenchCommand_Run(t *testing.T) { + tests := map[string]struct { + args []string + }{ + "no-args": {}, + "100k count": {[]string{"-count", "100000"}}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Run the command. + m := NewMain() + args := append([]string{"bench"}, test.args...) + if err := m.Run(args...); err != nil { + t.Fatal(err) + } + + stderr := m.Stderr.String() + if !strings.Contains(stderr, "starting write benchmark.") || !strings.Contains(stderr, "starting read benchmark.") { + t.Fatal(fmt.Errorf("benchmark result does not contain read/write start output:\n%s", stderr)) + } + + if strings.Contains(stderr, "iter mismatch") { + t.Fatal(fmt.Errorf("found iter mismatch in stdout:\n%s", stderr)) + } + + if !strings.Contains(stderr, "# Write") || !strings.Contains(stderr, "# Read") { + t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", stderr)) + } + }) + } +} + +type ConcurrentBuffer struct { + m sync.Mutex + buf bytes.Buffer +} + +func (b *ConcurrentBuffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.Read(p) +} + +func (b *ConcurrentBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.Write(p) +} + +func (b *ConcurrentBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.String() +} + // Main represents a test wrapper for main.Main that records output. type Main struct { *main.Main - Stdin bytes.Buffer - Stdout bytes.Buffer - Stderr bytes.Buffer + Stdin ConcurrentBuffer + Stdout ConcurrentBuffer + Stderr ConcurrentBuffer } // NewMain returns a new instance of Main. From b91d901b74231a6ad0ba170fd001eb5eabfe1349 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 20 Mar 2023 11:00:38 +0800 Subject: [PATCH 026/439] test: perform check in view(readonly) mode Signed-off-by: Benjamin Wang --- internal/btesting/btesting.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index b5b814526..e9ef64b3f 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -117,7 +117,7 @@ func (db *DB) MustReopen() { // MustCheck runs a consistency check on the database and panics if any errors are found. func (db *DB) MustCheck() { - err := db.Update(func(tx *bolt.Tx) error { + err := db.View(func(tx *bolt.Tx) error { // Collect all the errors. var errors []error for err := range tx.Check() { From c00862aa7c8299b6248c8ad176c75a4bd78fcc9a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 20 Mar 2023 15:34:31 +0800 Subject: [PATCH 027/439] test: add failpoint for mlock operation Signed-off-by: Benjamin Wang --- db.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/db.go b/db.go index 5e125d64d..d89cd8ca9 100644 --- a/db.go +++ b/db.go @@ -517,6 +517,8 @@ func (db *DB) mmapSize(size int) (int, error) { } func (db *DB) munlock(fileSize int) error { + // gofail: var munlockError string + // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { return fmt.Errorf("munlock error: " + err.Error()) } @@ -524,6 +526,8 @@ func (db *DB) munlock(fileSize int) error { } func (db *DB) mlock(fileSize int) error { + // gofail: var mlockError string + // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { return fmt.Errorf("mlock error: " + err.Error()) } From 360067ce9ee3f7c79d0213e7c39570f7e5b081db Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 15:06:32 +0000 Subject: [PATCH 028/439] Bump actions/setup-go from 3 to 4 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 3 to 4. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index d8d556575..c972f3804 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19.7" - run: | diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 4326d40f7..d399ff3fb 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19.7" - run: make fmt @@ -64,7 +64,7 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.19.7" - run: make fmt @@ -92,7 +92,7 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v3 - - uses: actions/setup-go@v3 + - uses: actions/setup-go@v4 with: go-version: "1.17.13" - run: make coverage From d8ac759d4f07c97aaf33927ea582461bc9972d22 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 21 Mar 2023 16:05:59 +0800 Subject: [PATCH 029/439] test: add test cases to similate mlock failure Signed-off-by: Benjamin Wang --- tests/failpoint/db_failpoint_test.go | 48 ++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index ae900b229..5df3dc06e 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -1,6 +1,8 @@ package failpoint import ( + "fmt" + "go.etcd.io/bbolt/internal/btesting" "path/filepath" "testing" "time" @@ -46,3 +48,49 @@ func TestFailpoint_UnmapFail_DbClose(t *testing.T) { err = db.Close() require.NoError(t, err) } + +func TestFailpoint_mLockFail(t *testing.T) { + err := gofail.Enable("mlockError", `return("mlock somehow failed")`) + require.NoError(t, err) + + f := filepath.Join(t.TempDir(), "db") + _, err = bolt.Open(f, 0666, &bolt.Options{Mlock: true}) + require.Error(t, err) + require.ErrorContains(t, err, "mlock somehow failed") + + // It should work after disabling the failpoint. + err = gofail.Disable("mlockError") + require.NoError(t, err) + + _, err = bolt.Open(f, 0666, &bolt.Options{Mlock: true}) + require.NoError(t, err) +} + +func TestFailpoint_mLockFail_When_remap(t *testing.T) { + db := btesting.MustCreateDB(t) + db.Mlock = true + + err := gofail.Enable("mlockError", `return("mlock somehow failed in allocate")`) + require.NoError(t, err) + + err = db.Fill([]byte("data"), 1, 10000, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 100) }, + ) + + require.Error(t, err) + require.ErrorContains(t, err, "mlock somehow failed in allocate") + + // It should work after disabling the failpoint. + err = gofail.Disable("mlockError") + require.NoError(t, err) + db.MustClose() + db.MustReopen() + + err = db.Fill([]byte("data"), 1, 10000, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 100) }, + ) + + require.NoError(t, err) +} From b2f3dd5dba77717524c5316b228880d7605ab64d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 21 Mar 2023 16:35:22 +0800 Subject: [PATCH 030/439] Perform unmap when failing to mlock or both meta pages corrupted Signed-off-by: Benjamin Wang --- db.go | 16 +++++++++++++--- tests/failpoint/db_failpoint_test.go | 2 +- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/db.go b/db.go index d89cd8ca9..b21fa3b33 100644 --- a/db.go +++ b/db.go @@ -388,7 +388,7 @@ func (db *DB) hasSyncedFreelist() bool { // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { +func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() @@ -423,17 +423,27 @@ func (db *DB) mmap(minsz int) error { } // Unmap existing data before continuing. - if err := db.munmap(); err != nil { + if err = db.munmap(); err != nil { return err } // Memory-map the data file as a byte slice. // gofail: var mapError string // return errors.New(mapError) - if err := mmap(db, size); err != nil { + if err = mmap(db, size); err != nil { return err } + // Perform unmmap on any error to reset all data fields: + // dataref, data, datasz, meta0 and meta1. + defer func() { + if err != nil { + if unmapErr := db.munmap(); unmapErr != nil { + err = fmt.Errorf("%w; unmap failed: %v", err, unmapErr) + } + } + }() + if db.Mlock { // Don't allow swapping of data file if err := db.mlock(fileSize); err != nil { diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index 5df3dc06e..ef7d7ca63 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -2,7 +2,6 @@ package failpoint import ( "fmt" - "go.etcd.io/bbolt/internal/btesting" "path/filepath" "testing" "time" @@ -10,6 +9,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/btesting" gofail "go.etcd.io/gofail/runtime" ) From e2c42548f296602809b1ffda300d7941186197dd Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 23 Mar 2023 19:51:08 +0800 Subject: [PATCH 031/439] update the error message when rollback unmap also fails Signed-off-by: Benjamin Wang --- db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db.go b/db.go index b21fa3b33..0e364e198 100644 --- a/db.go +++ b/db.go @@ -439,7 +439,7 @@ func (db *DB) mmap(minsz int) (err error) { defer func() { if err != nil { if unmapErr := db.munmap(); unmapErr != nil { - err = fmt.Errorf("%w; unmap failed: %v", err, unmapErr) + err = fmt.Errorf("%w; rollback unmap also failed: %v", err, unmapErr) } } }() From b5a219fd29f99e6d1de7a3013f40babb7c683460 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Thu, 23 Mar 2023 21:37:19 -0400 Subject: [PATCH 032/439] Fix progress reporting in bench cmd Signed-off-by: Cenk Alti --- cmd/bbolt/main.go | 43 ++++++++++++++++--------------------------- 1 file changed, 16 insertions(+), 27 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 336b5b458..7afb059f6 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -15,7 +15,7 @@ import ( "runtime/pprof" "strconv" "strings" - "sync" + "sync/atomic" "time" "unicode" "unicode/utf8" @@ -1223,6 +1223,7 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, b, _ := tx.CreateBucketIfNotExists(benchBucketName) b.FillPercent = options.FillPercent + fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) for j := int64(0); j < options.BatchSize; j++ { key := make([]byte, options.KeySize) value := make([]byte, options.ValueSize) @@ -1234,13 +1235,14 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, if err := b.Put(key, value); err != nil { return err } + + results.AddCompletedOps(1) } + fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) return nil }); err != nil { return err - } else { - results.AddCompletedOps(options.BatchSize) } } return nil @@ -1266,6 +1268,7 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp } b.FillPercent = options.FillPercent + fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) for j := int64(0); j < options.BatchSize; j++ { var key = make([]byte, options.KeySize) var value = make([]byte, options.ValueSize) @@ -1277,13 +1280,14 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp if err := b.Put(key, value); err != nil { return err } + + results.AddCompletedOps(1) } + fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) return nil }); err != nil { return err - } else { - results.AddCompletedOps(options.BatchSize) } } return nil @@ -1335,6 +1339,7 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, c := tx.Bucket(benchBucketName).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { numReads++ + results.AddCompletedOps(1) if v == nil { return errors.New("invalid value") } @@ -1344,8 +1349,6 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) } - results.AddCompletedOps(numReads) - // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1368,6 +1371,7 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { numReads++ + results.AddCompletedOps(1) if v == nil { return ErrInvalidValue } @@ -1382,8 +1386,6 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) } - results.AddCompletedOps(numReads) - // Make sure we do this for at least a second. if time.Since(t) >= time.Second { break @@ -1502,37 +1504,24 @@ type BenchOptions struct { // BenchResults represents the performance results of the benchmark and is thread-safe. type BenchResults struct { - m sync.Mutex completedOps int64 - duration time.Duration + duration int64 } func (r *BenchResults) AddCompletedOps(amount int64) { - r.m.Lock() - defer r.m.Unlock() - - r.completedOps += amount + atomic.AddInt64(&r.completedOps, amount) } func (r *BenchResults) CompletedOps() int64 { - r.m.Lock() - defer r.m.Unlock() - - return r.completedOps + return atomic.LoadInt64(&r.completedOps) } func (r *BenchResults) SetDuration(dur time.Duration) { - r.m.Lock() - defer r.m.Unlock() - - r.duration = dur + atomic.StoreInt64(&r.duration, int64(dur)) } func (r *BenchResults) Duration() time.Duration { - r.m.Lock() - defer r.m.Unlock() - - return r.duration + return time.Duration(atomic.LoadInt64(&r.duration)) } // Returns the duration for a single read/write operation. From 9832aff38a922c491f2e9934fe1d47b75f68a86e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 15 Mar 2023 16:41:12 +0800 Subject: [PATCH 033/439] internal: add function ClearElements in surgeon package Signed-off-by: Benjamin Wang --- internal/guts_cli/guts_cli.go | 4 ++ internal/surgeon/surgeon.go | 93 +++++++++++++++++++++++++++++++++-- 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/internal/guts_cli/guts_cli.go b/internal/guts_cli/guts_cli.go index 891ddb7b4..20b74b081 100644 --- a/internal/guts_cli/guts_cli.go +++ b/internal/guts_cli/guts_cli.go @@ -50,6 +50,10 @@ func ReadPage(path string, pageID uint64) (*common.Page, []byte, error) { return nil, nil, fmt.Errorf("error: %w, Page claims to have %d overflow pages (>=hwm=%d). Interrupting to avoid risky OOM", ErrCorrupt, overflowN, hwm) } + if overflowN == 0 { + return p, buf, nil + } + // Re-read entire Page (with overflow) into buffer. buf = make([]byte, (uint64(overflowN)+1)*pageSize) if n, err := f.ReadAt(buf, int64(pageID*pageSize)); err != nil { diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index d2220a276..d2c7be876 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -2,6 +2,7 @@ package surgeon import ( "fmt" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -16,19 +17,105 @@ func CopyPage(path string, srcPage common.Pgid, target common.Pgid) error { } func ClearPage(path string, pgId common.Pgid) error { + return ClearPageElements(path, pgId, 0, -1) +} + +func ClearPageElements(path string, pgId common.Pgid, start, end int) error { // Read the page p, buf, err := guts_cli.ReadPage(path, uint64(pgId)) if err != nil { return fmt.Errorf("ReadPage failed: %w", err) } - // Update and rewrite the page - p.SetCount(0) - p.SetOverflow(0) + if !p.IsLeafPage() && !p.IsBranchPage() { + return fmt.Errorf("can't clear elements in %q page", p.Typ()) + } + + elementCnt := int(p.Count()) + + if elementCnt == 0 { + return nil + } + + if start < 0 || start >= elementCnt { + return fmt.Errorf("the start index (%d) is out of range [0, %d)", start, elementCnt) + } + + if (end < 0 || end > elementCnt) && end != -1 { + return fmt.Errorf("the end index (%d) is out of range [0, %d]", end, elementCnt) + } + + if start > end && end != -1 { + return fmt.Errorf("the start index (%d) is bigger than the end index (%d)", start, end) + } + + if start == end { + return fmt.Errorf("invalid: the start index (%d) is equal to the end index (%d)", start, end) + } + + preOverflow := p.Overflow() + + if end == int(p.Count()) || end == -1 { + p.SetCount(uint16(start)) + p.SetOverflow(0) + if preOverflow != 0 || p.IsBranchPage() { + if err := clearFreelist(path); err != nil { + return err + } + } + } else { + inodes := common.ReadInodeFromPage(p) + inodes = append(inodes[:start], inodes[end:]...) + + p.SetCount(uint16(len(inodes))) + dataWritten := common.WriteInodeToPage(inodes, p) + + pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) + if err != nil { + return fmt.Errorf("ReadPageAndHWMSize failed: %w", err) + } + if dataWritten%uint32(pageSize) == 0 { + p.SetOverflow(dataWritten/uint32(pageSize) - 1) + } else { + p.SetOverflow(dataWritten / uint32(pageSize)) + } + } + if err := guts_cli.WritePage(path, buf); err != nil { return fmt.Errorf("WritePage failed: %w", err) } + if preOverflow != p.Overflow() || p.IsBranchPage() { + return clearFreelist(path) + } + + return nil +} + +func clearFreelist(path string) error { + if err := clearFreelistAt(path, 0); err != nil { + return fmt.Errorf("clearFreelist on meta page 0 failed: %w", err) + } + if err := clearFreelistAt(path, 1); err != nil { + return fmt.Errorf("clearFreelist on meta page 1 failed: %w", err) + } + return nil +} + +func clearFreelistAt(path string, pageId uint64) error { + _, buf, err := guts_cli.ReadPage(path, pageId) + if err != nil { + return fmt.Errorf("ReadPage %d failed: %w", pageId, err) + } + + meta := common.LoadPageMeta(buf) + meta.SetFreelist(common.PgidNoFreelist) + meta.SetChecksum(meta.Sum64()) + + if err := guts_cli.WritePage(path, buf); err != nil { + return fmt.Errorf("WritePage %d failed: %w", pageId, err) + } + return nil } From 3c3da590b120ac548d8543162160c543522905b1 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 19 Mar 2023 09:18:53 +0800 Subject: [PATCH 034/439] CMD: add cobra style 'surgery clear-page-elements' command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_root.go | 24 ++++++++++ cmd/bbolt/command_surgery_cobra.go | 74 ++++++++++++++++++++++++++++++ cmd/bbolt/main.go | 14 ++++++ go.mod | 3 ++ go.sum | 9 +++- 5 files changed, 123 insertions(+), 1 deletion(-) create mode 100644 cmd/bbolt/command_root.go create mode 100644 cmd/bbolt/command_surgery_cobra.go diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go new file mode 100644 index 000000000..b960df898 --- /dev/null +++ b/cmd/bbolt/command_root.go @@ -0,0 +1,24 @@ +package main + +import ( + "github.com/spf13/cobra" +) + +const ( + cliName = "bbolt" + cliDescription = "A simple command line tool for inspecting bbolt databases" +) + +func NewRootCommand() *cobra.Command { + rootCmd := &cobra.Command{ + Use: cliName, + Short: cliDescription, + Version: "dev", + } + + rootCmd.AddCommand( + newSurgeryCobraCommand(), + ) + + return rootCmd +} diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go new file mode 100644 index 000000000..b72cc6f14 --- /dev/null +++ b/cmd/bbolt/command_surgery_cobra.go @@ -0,0 +1,74 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/surgeon" +) + +var ( + surgeryTargetDBFilePath string + surgeryPageId uint64 + surgeryStartElementIdx int + surgeryEndElementIdx int +) + +func newSurgeryCobraCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "surgery ", + Short: "surgery related commands", + } + + cmd.AddCommand(newSurgeryClearPageElementsCommand()) + + return cmd +} + +func newSurgeryClearPageElementsCommand() *cobra.Command { + clearElementCmd := &cobra.Command{ + Use: "clear-page-elements [options]", + Short: "Clears elements from the given page", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + + RunE: surgeryClearPageElementFunc, + } + + clearElementCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + clearElementCmd.Flags().Uint64VarP(&surgeryPageId, "pageId", "", 0, "page id") + clearElementCmd.Flags().IntVarP(&surgeryStartElementIdx, "from", "", 0, "start element index (included) to clear, starting from 0") + clearElementCmd.Flags().IntVarP(&surgeryEndElementIdx, "to", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") + + return clearElementCmd +} + +func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { + srcDBPath := args[0] + + if err := copyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[clear-page-element] copy file failed: %w", err) + } + + if surgeryPageId < 2 { + return fmt.Errorf("the pageId must be at least 2, but got %d", surgeryPageId) + } + + if err := surgeon.ClearPageElements(surgeryTargetDBFilePath, common.Pgid(surgeryPageId), surgeryStartElementIdx, surgeryEndElementIdx); err != nil { + return fmt.Errorf("clear-page-element command failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", surgeryStartElementIdx, surgeryEndElementIdx, surgeryPageId) + return nil +} diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 7afb059f6..e84b31bf7 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -60,12 +60,26 @@ func main() { m := NewMain() if err := m.Run(os.Args[1:]...); err == ErrUsage { os.Exit(2) + } else if err == ErrUnknownCommand { + execute() } else if err != nil { fmt.Println(err.Error()) os.Exit(1) } } +func execute() { + rootCmd := NewRootCommand() + if err := rootCmd.Execute(); err != nil { + if rootCmd.SilenceErrors { + fmt.Fprintln(os.Stderr, "Error:", err) + os.Exit(1) + } else { + os.Exit(1) + } + } +} + type baseCommand struct { Stdin io.Reader Stdout io.Writer diff --git a/go.mod b/go.mod index 3602e0697..4506b5af8 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module go.etcd.io/bbolt go 1.19 require ( + github.com/spf13/cobra v1.6.1 github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 golang.org/x/sys v0.6.0 @@ -10,6 +11,8 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect + github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index ed1ee3861..529183910 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,21 @@ +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= From 95576a4f10b51e2e0383fe07b528804844e1f79c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 19 Mar 2023 09:20:13 +0800 Subject: [PATCH 035/439] CMD: add test cases for the 'surgery clear-page-elements' command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra_test.go | 184 ++++++++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 cmd/bbolt/command_surgery_cobra_test.go diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go new file mode 100644 index 000000000..1b054333c --- /dev/null +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -0,0 +1,184 @@ +package main_test + +import ( + "fmt" + "math/rand" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/guts_cli" +) + +func TestSurgery_ClearPageElement(t *testing.T) { + testCases := []struct { + name string + from int + to int + setEndIdxAsCount bool + removeOnlyOneElement bool // only valid when setEndIdxAsCount == true, and startIdx = endIdx -1 in this case. + expectError bool + }{ + // normal range + { + name: "normal range: [4, 8)", + from: 4, + to: 8, + }, + { + name: "normal range: [5, -1)", + from: 4, + to: -1, + }, + { + name: "normal range: all", + from: 0, + to: -1, + }, + { + name: "normal range: [0, 7)", + from: 0, + to: 7, + }, + { + name: "normal range: [3, count)", + from: 4, + setEndIdxAsCount: true, + }, + // remove only one element + { + name: "one element: the first one", + from: 0, + to: 1, + }, + { + name: "one element: [6, 7)", + from: 6, + to: 7, + }, + { + name: "one element: the last one", + setEndIdxAsCount: true, + removeOnlyOneElement: true, + }, + // abnormal range + { + name: "abnormal range: [-1, 4)", + from: -1, + to: 4, + expectError: true, + }, + { + name: "abnormal range: [-2, 5)", + from: -1, + to: 5, + expectError: true, + }, + { + name: "abnormal range: [3, 3)", + from: 3, + to: 3, + expectError: true, + }, + { + name: "abnormal range: [5, 3)", + from: 5, + to: 3, + expectError: true, + }, + { + name: "abnormal range: [3, -2)", + from: 3, + to: -2, + expectError: true, + }, + { + name: "abnormal range: [3, 1000000)", + from: -1, + to: 4, + expectError: true, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + testSurgeryClearPageElement(t, tc.from, tc.to, tc.setEndIdxAsCount, tc.removeOnlyOneElement, tc.expectError) + }) + } +} + +func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAsCount, removeOnlyOne, expectError bool) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + // Generate sample db + t.Log("Generate some sample data") + err := db.Fill([]byte("data"), 10, 200, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", tx*10000+k)) }, + func(tx int, k int) []byte { return make([]byte, 10) }, + ) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // find a page with at least 10 elements + var ( + pageId uint64 = 2 + elementCount uint16 = 0 + ) + for { + p, _, err := guts_cli.ReadPage(srcPath, pageId) + require.NoError(t, err) + + if p.IsLeafPage() && p.Count() > 10 { + elementCount = p.Count() + break + } + pageId++ + } + t.Logf("The original element count: %d", elementCount) + + if setEndIdxAsCount { + t.Logf("Set the endIdx as the element count: %d", elementCount) + endIdx = int(elementCount) + if removeOnlyOne { + startIdx = endIdx - 1 + t.Logf("Set the startIdx as the endIdx-1: %d", startIdx) + } + } + + // clear elements [startIdx, endIdx) in the page + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), fmt.Sprintf("db_%d", rand.Intn(100))) + rootCmd.SetArgs([]string{ + "surgery", "clear-page-elements", srcPath, + "--output", output, + "--pageId", fmt.Sprintf("%d", pageId), + "--from", fmt.Sprintf("%d", startIdx), + "--to", fmt.Sprintf("%d", endIdx), + }) + err = rootCmd.Execute() + if expectError { + require.Error(t, err) + return + } + + require.NoError(t, err) + + // check the element count again + expectedCnt := 0 + if endIdx == -1 { + expectedCnt = startIdx + } else { + expectedCnt = int(elementCount) - (endIdx - startIdx) + } + p, _, err := guts_cli.ReadPage(output, pageId) + require.NoError(t, err) + assert.Equal(t, expectedCnt, int(p.Count())) +} From a9a9356ea4aa43a3ef5ce360001bae704a9a5d13 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 19 Mar 2023 09:21:32 +0800 Subject: [PATCH 036/439] test: verify the left elements after clear-page-elements operation should have the same content Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra_test.go | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 1b054333c..de4aa2b5e 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -181,4 +181,30 @@ func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAs p, _, err := guts_cli.ReadPage(output, pageId) require.NoError(t, err) assert.Equal(t, expectedCnt, int(p.Count())) + + compareDataAfterClearingElement(t, srcPath, output, pageId, startIdx, endIdx) +} + +func compareDataAfterClearingElement(t *testing.T, srcPath, dstPath string, pageId uint64, startIdx, endIdx int) { + srcPage, _, err := guts_cli.ReadPage(srcPath, pageId) + require.NoError(t, err) + + dstPage, _, err := guts_cli.ReadPage(dstPath, pageId) + require.NoError(t, err) + + var dstIdx uint16 + for i := uint16(0); i < srcPage.Count(); i++ { + // skip the cleared elements + if dstIdx >= uint16(startIdx) && (dstIdx < uint16(endIdx) || endIdx == -1) { + continue + } + srcElement := srcPage.LeafPageElement(i) + dstElement := dstPage.LeafPageElement(dstIdx) + + require.Equal(t, srcElement.Flags(), dstElement.Flags()) + require.Equal(t, srcElement.Key(), dstElement.Key()) + require.Equal(t, srcElement.Value(), dstElement.Value()) + + dstIdx++ + } } From 8902ef92e971732e9ede717c9119b88f3e184c2d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 22 Mar 2023 11:29:27 +0800 Subject: [PATCH 037/439] resolve some minor review comments Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 4 ++-- cmd/bbolt/command_surgery_cobra_test.go | 3 +-- cmd/bbolt/main.go | 4 ++-- internal/surgeon/surgeon.go | 12 +++++++++--- 4 files changed, 14 insertions(+), 9 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index b72cc6f14..e903da0e7 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -31,8 +31,8 @@ func newSurgeryCobraCommand() *cobra.Command { func newSurgeryClearPageElementsCommand() *cobra.Command { clearElementCmd := &cobra.Command{ - Use: "clear-page-elements [options]", - Short: "Clears elements from the given page", + Use: "clear-page-elements [options]", + Short: "Clears elements from the given page, which can be a branch or leaf page", Args: func(cmd *cobra.Command, args []string) error { if len(args) == 0 { return errors.New("db file path not provided") diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index de4aa2b5e..4231228f4 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -2,7 +2,6 @@ package main_test import ( "fmt" - "math/rand" "path/filepath" "testing" @@ -155,7 +154,7 @@ func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAs // clear elements [startIdx, endIdx) in the page rootCmd := main.NewRootCommand() - output := filepath.Join(t.TempDir(), fmt.Sprintf("db_%d", rand.Intn(100))) + output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "clear-page-elements", srcPath, "--output", output, diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index e84b31bf7..413a49a09 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -61,14 +61,14 @@ func main() { if err := m.Run(os.Args[1:]...); err == ErrUsage { os.Exit(2) } else if err == ErrUnknownCommand { - execute() + cobraExecute() } else if err != nil { fmt.Println(err.Error()) os.Exit(1) } } -func execute() { +func cobraExecute() { rootCmd := NewRootCommand() if err := rootCmd.Execute(); err != nil { if rootCmd.SilenceErrors { diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index d2c7be876..5b915d36a 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -20,6 +20,12 @@ func ClearPage(path string, pgId common.Pgid) error { return ClearPageElements(path, pgId, 0, -1) } +// ClearPageElements supports clearing elements in both branch and leaf +// pages. Note the freelist may be cleaned in the meta pages in the following +// two cases, and bbolt needs to scan the db to reconstruct free list. It may +// cause some delay on next startup, depending on the db size. +// 1. Any branch elements are cleared; +// 2. An object saved in overflow pages is cleared; func ClearPageElements(path string, pgId common.Pgid, start, end int) error { // Read the page p, buf, err := guts_cli.ReadPage(path, uint64(pgId)) @@ -93,16 +99,16 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int) error { } func clearFreelist(path string) error { - if err := clearFreelistAt(path, 0); err != nil { + if err := clearFreelistInMetaPage(path, 0); err != nil { return fmt.Errorf("clearFreelist on meta page 0 failed: %w", err) } - if err := clearFreelistAt(path, 1); err != nil { + if err := clearFreelistInMetaPage(path, 1); err != nil { return fmt.Errorf("clearFreelist on meta page 1 failed: %w", err) } return nil } -func clearFreelistAt(path string, pageId uint64) error { +func clearFreelistInMetaPage(path string, pageId uint64) error { _, buf, err := guts_cli.ReadPage(path, pageId) if err != nil { return fmt.Errorf("ReadPage %d failed: %w", pageId, err) From a0e5e3a1dd8b6497d08014bc9d2b64a4cfea5a0b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 22 Mar 2023 12:25:05 +0800 Subject: [PATCH 038/439] test: add more subcases to verify 'surgery-clear-elements' on branch page Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra_test.go | 84 +++++++++++++++++++------ 1 file changed, 66 insertions(+), 18 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 4231228f4..c7e03642d 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -19,36 +19,68 @@ func TestSurgery_ClearPageElement(t *testing.T) { name string from int to int + isBranchPage bool setEndIdxAsCount bool removeOnlyOneElement bool // only valid when setEndIdxAsCount == true, and startIdx = endIdx -1 in this case. expectError bool }{ - // normal range + // normal range in leaf page { - name: "normal range: [4, 8)", + name: "normal range in leaf page: [4, 8)", from: 4, to: 8, }, { - name: "normal range: [5, -1)", + name: "normal range in leaf page: [5, -1)", from: 4, to: -1, }, { - name: "normal range: all", + name: "normal range in leaf page: all", from: 0, to: -1, }, { - name: "normal range: [0, 7)", + name: "normal range in leaf page: [0, 7)", from: 0, to: 7, }, { - name: "normal range: [3, count)", + name: "normal range in leaf page: [3, count)", from: 4, setEndIdxAsCount: true, }, + // normal range in branch page + { + name: "normal range in branch page: [4, 8)", + from: 4, + to: 8, + isBranchPage: true, + }, + { + name: "normal range in branch page: [5, -1)", + from: 4, + to: -1, + isBranchPage: true, + }, + { + name: "normal range in branch page: all", + from: 0, + to: -1, + isBranchPage: true, + }, + { + name: "normal range in branch page: [0, 7)", + from: 0, + to: 7, + isBranchPage: true, + }, + { + name: "normal range in branch page: [3, count)", + from: 4, + isBranchPage: true, + setEndIdxAsCount: true, + }, // remove only one element { name: "one element: the first one", @@ -106,12 +138,12 @@ func TestSurgery_ClearPageElement(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - testSurgeryClearPageElement(t, tc.from, tc.to, tc.setEndIdxAsCount, tc.removeOnlyOneElement, tc.expectError) + testSurgeryClearPageElement(t, tc.from, tc.to, tc.isBranchPage, tc.setEndIdxAsCount, tc.removeOnlyOneElement, tc.expectError) }) } } -func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAsCount, removeOnlyOne, expectError bool) { +func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, isBranchPage, setEndIdxAsCount, removeOnlyOne, expectError bool) { pageSize := 4096 db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) srcPath := db.Path() @@ -135,9 +167,16 @@ func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAs p, _, err := guts_cli.ReadPage(srcPath, pageId) require.NoError(t, err) - if p.IsLeafPage() && p.Count() > 10 { - elementCount = p.Count() - break + if isBranchPage { + if p.IsBranchPage() && p.Count() > 10 { + elementCount = p.Count() + break + } + } else { + if p.IsLeafPage() && p.Count() > 10 { + elementCount = p.Count() + break + } } pageId++ } @@ -181,10 +220,10 @@ func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, setEndIdxAs require.NoError(t, err) assert.Equal(t, expectedCnt, int(p.Count())) - compareDataAfterClearingElement(t, srcPath, output, pageId, startIdx, endIdx) + compareDataAfterClearingElement(t, srcPath, output, pageId, isBranchPage, startIdx, endIdx) } -func compareDataAfterClearingElement(t *testing.T, srcPath, dstPath string, pageId uint64, startIdx, endIdx int) { +func compareDataAfterClearingElement(t *testing.T, srcPath, dstPath string, pageId uint64, isBranchPage bool, startIdx, endIdx int) { srcPage, _, err := guts_cli.ReadPage(srcPath, pageId) require.NoError(t, err) @@ -197,12 +236,21 @@ func compareDataAfterClearingElement(t *testing.T, srcPath, dstPath string, page if dstIdx >= uint16(startIdx) && (dstIdx < uint16(endIdx) || endIdx == -1) { continue } - srcElement := srcPage.LeafPageElement(i) - dstElement := dstPage.LeafPageElement(dstIdx) - require.Equal(t, srcElement.Flags(), dstElement.Flags()) - require.Equal(t, srcElement.Key(), dstElement.Key()) - require.Equal(t, srcElement.Value(), dstElement.Value()) + if isBranchPage { + srcElement := srcPage.BranchPageElement(i) + dstElement := dstPage.BranchPageElement(dstIdx) + + require.Equal(t, srcElement.Key(), dstElement.Key()) + require.Equal(t, srcElement.Pgid(), dstElement.Pgid()) + } else { + srcElement := srcPage.LeafPageElement(i) + dstElement := dstPage.LeafPageElement(dstIdx) + + require.Equal(t, srcElement.Flags(), dstElement.Flags()) + require.Equal(t, srcElement.Key(), dstElement.Key()) + require.Equal(t, srcElement.Value(), dstElement.Value()) + } dstIdx++ } From 87eed0ac93871accb709518440b9488a3acdc047 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 22 Mar 2023 15:59:29 +0800 Subject: [PATCH 039/439] add test case to verify 'surgery-clear-elements' on overflow page Also resolved a bug related to overflow page. Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra_test.go | 179 +++++++++++++++++++++++- internal/common/inode.go | 10 ++ internal/surgeon/surgeon.go | 38 ++--- 3 files changed, 207 insertions(+), 20 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index c7e03642d..653a67989 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -14,7 +14,7 @@ import ( "go.etcd.io/bbolt/internal/guts_cli" ) -func TestSurgery_ClearPageElement(t *testing.T) { +func TestSurgery_ClearPageElements_Without_Overflow(t *testing.T) { testCases := []struct { name string from int @@ -138,12 +138,12 @@ func TestSurgery_ClearPageElement(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - testSurgeryClearPageElement(t, tc.from, tc.to, tc.isBranchPage, tc.setEndIdxAsCount, tc.removeOnlyOneElement, tc.expectError) + testSurgeryClearPageElementsWithoutOverflow(t, tc.from, tc.to, tc.isBranchPage, tc.setEndIdxAsCount, tc.removeOnlyOneElement, tc.expectError) }) } } -func testSurgeryClearPageElement(t *testing.T, startIdx, endIdx int, isBranchPage, setEndIdxAsCount, removeOnlyOne, expectError bool) { +func testSurgeryClearPageElementsWithoutOverflow(t *testing.T, startIdx, endIdx int, isBranchPage, setEndIdxAsCount, removeOnlyOne, expectError bool) { pageSize := 4096 db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) srcPath := db.Path() @@ -255,3 +255,176 @@ func compareDataAfterClearingElement(t *testing.T, srcPath, dstPath string, page dstIdx++ } } + +func TestSurgery_ClearPageElements_With_Overflow(t *testing.T) { + testCases := []struct { + name string + from int + to int + valueSizes []int + expectedOverflow int + }{ + // big element + { + name: "remove a big element at the end", + valueSizes: []int{500, 500, 500, 2600}, + from: 3, + to: 4, + expectedOverflow: 0, + }, + { + name: "remove a big element at the begin", + valueSizes: []int{2600, 500, 500, 500}, + from: 0, + to: 1, + expectedOverflow: 0, + }, + { + name: "remove a big element in the middle", + valueSizes: []int{500, 2600, 500, 500}, + from: 1, + to: 2, + expectedOverflow: 0, + }, + // small element + { + name: "remove a small element at the end", + valueSizes: []int{500, 500, 3100, 100}, + from: 3, + to: 4, + expectedOverflow: 1, + }, + { + name: "remove a small element at the begin", + valueSizes: []int{100, 500, 3100, 500}, + from: 0, + to: 1, + expectedOverflow: 1, + }, + { + name: "remove a small element in the middle", + valueSizes: []int{500, 100, 3100, 500}, + from: 1, + to: 2, + expectedOverflow: 1, + }, + { + name: "remove a small element at the end of page with big overflow", + valueSizes: []int{500, 500, 4096 * 5, 100}, + from: 3, + to: 4, + expectedOverflow: 5, + }, + { + name: "remove a small element at the begin of page with big overflow", + valueSizes: []int{100, 500, 4096 * 6, 500}, + from: 0, + to: 1, + expectedOverflow: 6, + }, + { + name: "remove a small element in the middle of page with big overflow", + valueSizes: []int{500, 100, 4096 * 4, 500}, + from: 1, + to: 2, + expectedOverflow: 4, + }, + // huge element + { + name: "remove a huge element at the end", + valueSizes: []int{500, 500, 500, 4096 * 5}, + from: 3, + to: 4, + expectedOverflow: 0, + }, + { + name: "remove a huge element at the begin", + valueSizes: []int{4096 * 5, 500, 500, 500}, + from: 0, + to: 1, + expectedOverflow: 0, + }, + { + name: "remove a huge element in the middle", + valueSizes: []int{500, 4096 * 5, 500, 500}, + from: 1, + to: 2, + expectedOverflow: 0, + }, + } + + for _, tc := range testCases { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + testSurgeryClearPageElementsWithOverflow(t, tc.from, tc.to, tc.valueSizes, tc.expectedOverflow) + }) + } +} + +func testSurgeryClearPageElementsWithOverflow(t *testing.T, startIdx, endIdx int, valueSizes []int, expectedOverflow int) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + // Generate sample db + err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("data")) + for i, valueSize := range valueSizes { + key := []byte(fmt.Sprintf("%04d", i)) + val := make([]byte, valueSize) + if putErr := b.Put(key, val); putErr != nil { + return putErr + } + } + return nil + }) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // find a page with overflow pages + var ( + pageId uint64 = 2 + elementCount uint16 = 0 + ) + for { + p, _, err := guts_cli.ReadPage(srcPath, pageId) + require.NoError(t, err) + + if p.Overflow() > 0 { + elementCount = p.Count() + break + } + pageId++ + } + t.Logf("The original element count: %d", elementCount) + + // clear elements [startIdx, endIdx) in the page + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "clear-page-elements", srcPath, + "--output", output, + "--pageId", fmt.Sprintf("%d", pageId), + "--from", fmt.Sprintf("%d", startIdx), + "--to", fmt.Sprintf("%d", endIdx), + }) + err = rootCmd.Execute() + require.NoError(t, err) + + // check the element count again + expectedCnt := 0 + if endIdx == -1 { + expectedCnt = startIdx + } else { + expectedCnt = int(elementCount) - (endIdx - startIdx) + } + p, _, err := guts_cli.ReadPage(output, pageId) + require.NoError(t, err) + assert.Equal(t, expectedCnt, int(p.Count())) + + assert.Equal(t, expectedOverflow, int(p.Overflow())) + + compareDataAfterClearingElement(t, srcPath, output, pageId, false, startIdx, endIdx) +} diff --git a/internal/common/inode.go b/internal/common/inode.go index 9f99937e7..080b9af78 100644 --- a/internal/common/inode.go +++ b/internal/common/inode.go @@ -103,3 +103,13 @@ func WriteInodeToPage(inodes Inodes, p *Page) uint32 { return uint32(off) } + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index 5b915d36a..3d556e3a2 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -61,33 +61,37 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int) error { preOverflow := p.Overflow() + var ( + dataWritten uint32 + ) if end == int(p.Count()) || end == -1 { + inodes := common.ReadInodeFromPage(p) + inodes = inodes[:start] + p.SetCount(uint16(start)) - p.SetOverflow(0) - if preOverflow != 0 || p.IsBranchPage() { - if err := clearFreelist(path); err != nil { - return err - } - } + // no need to write inode & data again, we just need to get + // the data size which will be kept. + dataWritten = common.UsedSpaceInPage(inodes, p) } else { inodes := common.ReadInodeFromPage(p) inodes = append(inodes[:start], inodes[end:]...) p.SetCount(uint16(len(inodes))) - dataWritten := common.WriteInodeToPage(inodes, p) + dataWritten = common.WriteInodeToPage(inodes, p) + } - pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) - if err != nil { - return fmt.Errorf("ReadPageAndHWMSize failed: %w", err) - } - if dataWritten%uint32(pageSize) == 0 { - p.SetOverflow(dataWritten/uint32(pageSize) - 1) - } else { - p.SetOverflow(dataWritten / uint32(pageSize)) - } + pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) + if err != nil { + return fmt.Errorf("ReadPageAndHWMSize failed: %w", err) + } + if dataWritten%uint32(pageSize) == 0 { + p.SetOverflow(dataWritten/uint32(pageSize) - 1) + } else { + p.SetOverflow(dataWritten / uint32(pageSize)) } - if err := guts_cli.WritePage(path, buf); err != nil { + datasz := pageSize * (uint64(p.Overflow()) + 1) + if err := guts_cli.WritePage(path, buf[0:datasz]); err != nil { return fmt.Errorf("WritePage failed: %w", err) } From 74e26bee7782a7ddd79c2f9936ea2cccea0513c3 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 25 Mar 2023 08:16:43 +0800 Subject: [PATCH 040/439] update command 'surgery clear-page-elements' not to automatically abandon freelist Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 12 +++++-- cmd/bbolt/command_surgery_cobra_test.go | 8 ++--- cmd/bbolt/surgery_commands.go | 9 ++++- internal/surgeon/surgeon.go | 44 +++++++++++++++---------- 4 files changed, 48 insertions(+), 25 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index e903da0e7..407d7eccd 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -48,8 +48,8 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { clearElementCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") clearElementCmd.Flags().Uint64VarP(&surgeryPageId, "pageId", "", 0, "page id") - clearElementCmd.Flags().IntVarP(&surgeryStartElementIdx, "from", "", 0, "start element index (included) to clear, starting from 0") - clearElementCmd.Flags().IntVarP(&surgeryEndElementIdx, "to", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") + clearElementCmd.Flags().IntVarP(&surgeryStartElementIdx, "from-index", "", 0, "start element index (included) to clear, starting from 0") + clearElementCmd.Flags().IntVarP(&surgeryEndElementIdx, "to-index", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") return clearElementCmd } @@ -65,10 +65,16 @@ func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { return fmt.Errorf("the pageId must be at least 2, but got %d", surgeryPageId) } - if err := surgeon.ClearPageElements(surgeryTargetDBFilePath, common.Pgid(surgeryPageId), surgeryStartElementIdx, surgeryEndElementIdx); err != nil { + needAbandonFreelist, err := surgeon.ClearPageElements(surgeryTargetDBFilePath, common.Pgid(surgeryPageId), surgeryStartElementIdx, surgeryEndElementIdx, false) + if err != nil { return fmt.Errorf("clear-page-element command failed: %w", err) } + if needAbandonFreelist { + fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + } + fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", surgeryStartElementIdx, surgeryEndElementIdx, surgeryPageId) return nil } diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 653a67989..3016a963f 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -198,8 +198,8 @@ func testSurgeryClearPageElementsWithoutOverflow(t *testing.T, startIdx, endIdx "surgery", "clear-page-elements", srcPath, "--output", output, "--pageId", fmt.Sprintf("%d", pageId), - "--from", fmt.Sprintf("%d", startIdx), - "--to", fmt.Sprintf("%d", endIdx), + "--from-index", fmt.Sprintf("%d", startIdx), + "--to-index", fmt.Sprintf("%d", endIdx), }) err = rootCmd.Execute() if expectError { @@ -407,8 +407,8 @@ func testSurgeryClearPageElementsWithOverflow(t *testing.T, startIdx, endIdx int "surgery", "clear-page-elements", srcPath, "--output", output, "--pageId", fmt.Sprintf("%d", pageId), - "--from", fmt.Sprintf("%d", startIdx), - "--to", fmt.Sprintf("%d", endIdx), + "--from-index", fmt.Sprintf("%d", startIdx), + "--to-index", fmt.Sprintf("%d", endIdx), }) err = rootCmd.Execute() require.NoError(t, err) diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index 652f21484..07b128819 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -4,6 +4,7 @@ import ( "errors" "flag" "fmt" + "os" "strconv" "strings" @@ -233,10 +234,16 @@ func (cmd *clearPageCommand) Run(args ...string) error { return err } - if err := surgeon.ClearPage(cmd.dstPath, common.Pgid(pageId)); err != nil { + needAbandonFreelist, err := surgeon.ClearPage(cmd.dstPath, common.Pgid(pageId)) + if err != nil { return fmt.Errorf("clearPageCommand failed: %w", err) } + if needAbandonFreelist { + fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + } + fmt.Fprintf(cmd.Stdout, "Page (%d) was cleared\n", pageId) return nil } diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index 3d556e3a2..e69eb7234 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -16,47 +16,54 @@ func CopyPage(path string, srcPage common.Pgid, target common.Pgid) error { return guts_cli.WritePage(path, d1) } -func ClearPage(path string, pgId common.Pgid) error { - return ClearPageElements(path, pgId, 0, -1) +func ClearPage(path string, pgId common.Pgid) (bool, error) { + return ClearPageElements(path, pgId, 0, -1, false) } // ClearPageElements supports clearing elements in both branch and leaf -// pages. Note the freelist may be cleaned in the meta pages in the following -// two cases, and bbolt needs to scan the db to reconstruct free list. It may -// cause some delay on next startup, depending on the db size. +// pages. Note if the ${abandonFreelist} is true, the freelist may be cleaned +// in the meta pages in the following two cases, and bbolt needs to scan the +// db to reconstruct free list. It may cause some delay on next startup, +// depending on the db size. // 1. Any branch elements are cleared; // 2. An object saved in overflow pages is cleared; -func ClearPageElements(path string, pgId common.Pgid, start, end int) error { +// +// Usually ${abandonFreelist} defaults to false, it means it will not clear the +// freelist in meta pages automatically. Users will receive a warning message +// to remind them to explicitly execute `bbolt surgery abandom-freelist` +// afterwards; the first return parameter will be true in such case. But if +// the freelist isn't synced at all, no warning message will be displayed. +func ClearPageElements(path string, pgId common.Pgid, start, end int, abandonFreelist bool) (bool, error) { // Read the page p, buf, err := guts_cli.ReadPage(path, uint64(pgId)) if err != nil { - return fmt.Errorf("ReadPage failed: %w", err) + return false, fmt.Errorf("ReadPage failed: %w", err) } if !p.IsLeafPage() && !p.IsBranchPage() { - return fmt.Errorf("can't clear elements in %q page", p.Typ()) + return false, fmt.Errorf("can't clear elements in %q page", p.Typ()) } elementCnt := int(p.Count()) if elementCnt == 0 { - return nil + return false, nil } if start < 0 || start >= elementCnt { - return fmt.Errorf("the start index (%d) is out of range [0, %d)", start, elementCnt) + return false, fmt.Errorf("the start index (%d) is out of range [0, %d)", start, elementCnt) } if (end < 0 || end > elementCnt) && end != -1 { - return fmt.Errorf("the end index (%d) is out of range [0, %d]", end, elementCnt) + return false, fmt.Errorf("the end index (%d) is out of range [0, %d]", end, elementCnt) } if start > end && end != -1 { - return fmt.Errorf("the start index (%d) is bigger than the end index (%d)", start, end) + return false, fmt.Errorf("the start index (%d) is bigger than the end index (%d)", start, end) } if start == end { - return fmt.Errorf("invalid: the start index (%d) is equal to the end index (%d)", start, end) + return false, fmt.Errorf("invalid: the start index (%d) is equal to the end index (%d)", start, end) } preOverflow := p.Overflow() @@ -82,7 +89,7 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int) error { pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) if err != nil { - return fmt.Errorf("ReadPageAndHWMSize failed: %w", err) + return false, fmt.Errorf("ReadPageAndHWMSize failed: %w", err) } if dataWritten%uint32(pageSize) == 0 { p.SetOverflow(dataWritten/uint32(pageSize) - 1) @@ -92,14 +99,17 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int) error { datasz := pageSize * (uint64(p.Overflow()) + 1) if err := guts_cli.WritePage(path, buf[0:datasz]); err != nil { - return fmt.Errorf("WritePage failed: %w", err) + return false, fmt.Errorf("WritePage failed: %w", err) } if preOverflow != p.Overflow() || p.IsBranchPage() { - return clearFreelist(path) + if abandonFreelist { + return false, clearFreelist(path) + } + return true, nil } - return nil + return false, nil } func clearFreelist(path string) error { From 7054e452332dfeeb837f58c1538d643d1c1d154d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 25 Mar 2023 13:44:01 +0800 Subject: [PATCH 041/439] test: make the test timeout configurable Signed-off-by: Benjamin Wang --- Makefile | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index c9fab8eeb..e2a594e9d 100644 --- a/Makefile +++ b/Makefile @@ -13,6 +13,11 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + .PHONY: fmt fmt: !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') @@ -24,23 +29,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m + TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic .PHONY: gofail-enable From a12c0c4bd7c51cb9348da309e36695a61cda73b4 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 25 Mar 2023 13:56:57 +0800 Subject: [PATCH 042/439] test: support enabling strict mode in testing Signed-off-by: Benjamin Wang --- Makefile | 10 +++++++++- internal/btesting/btesting.go | 20 ++++++++++++++++++-- internal/tests/tx_check_test.go | 4 ++++ 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index e2a594e9d..e234574dc 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,14 @@ ifdef TIMEOUT TESTFLAGS_TIMEOUT=$(TIMEOUT) endif +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') @@ -53,7 +61,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index e9ef64b3f..ffa0d8b3a 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -6,6 +6,7 @@ import ( "os" "path/filepath" "regexp" + "strings" "testing" "time" @@ -17,8 +18,12 @@ import ( var statsFlag = flag.Bool("stats", false, "show performance stats") -// TestFreelistType is used as a env variable for test to indicate the backend type -const TestFreelistType = "TEST_FREELIST_TYPE" +const ( + // TestFreelistType is used as an env variable for test to indicate the backend type. + TestFreelistType = "TEST_FREELIST_TYPE" + // TestEnableStrictMode is used to enable strict check by default after opening each DB. + TestEnableStrictMode = "TEST_ENABLE_STRICT_MODE" +) // DB is a test wrapper for bolt.DB. type DB struct { @@ -60,6 +65,7 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { o: o, t: t, } + resDB.strictModeEnabledDefault() t.Cleanup(resDB.PostTestCleanup) return resDB } @@ -113,6 +119,7 @@ func (db *DB) MustReopen() { indb, err := bolt.Open(db.Path(), 0666, db.o) require.NoError(db.t, err) db.DB = indb + db.strictModeEnabledDefault() } // MustCheck runs a consistency check on the database and panics if any errors are found. @@ -204,3 +211,12 @@ func (db *DB) PrintStats() { func truncDuration(d time.Duration) string { return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1") } + +func (db *DB) strictModeEnabledDefault() { + strictModeEnabled := strings.ToLower(os.Getenv(TestEnableStrictMode)) + db.StrictMode = strictModeEnabled == "true" +} + +func (db *DB) ForceDisableStrictMode() { + db.StrictMode = false +} diff --git a/internal/tests/tx_check_test.go b/internal/tests/tx_check_test.go index 0476d010f..7a4ab866d 100644 --- a/internal/tests/tx_check_test.go +++ b/internal/tests/tx_check_test.go @@ -14,6 +14,7 @@ import ( func TestTx_RecursivelyCheckPages_MisplacedPage(t *testing.T) { db := btesting.MustCreateDB(t) + db.ForceDisableStrictMode() require.NoError(t, db.Fill([]byte("data"), 1, 10000, func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, @@ -36,6 +37,7 @@ func TestTx_RecursivelyCheckPages_MisplacedPage(t *testing.T) { require.NoError(t, surgeon.CopyPage(db.Path(), srcPage, targetPage)) db.MustReopen() + db.ForceDisableStrictMode() require.NoError(t, db.Update(func(tx *bolt.Tx) error { // Collect all the errors. var errors []error @@ -51,6 +53,7 @@ func TestTx_RecursivelyCheckPages_MisplacedPage(t *testing.T) { func TestTx_RecursivelyCheckPages_CorruptedLeaf(t *testing.T) { db := btesting.MustCreateDB(t) + db.ForceDisableStrictMode() require.NoError(t, db.Fill([]byte("data"), 1, 10000, func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, @@ -72,6 +75,7 @@ func TestTx_RecursivelyCheckPages_CorruptedLeaf(t *testing.T) { require.NoError(t, guts_cli.WritePage(db.Path(), pbuf)) db.MustReopen() + db.ForceDisableStrictMode() require.NoError(t, db.Update(func(tx *bolt.Tx) error { // Collect all the errors. var errors []error From 7b07e70b8c615ac4e5659db605462832a83932dc Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 30 Mar 2023 16:33:34 +0800 Subject: [PATCH 043/439] test: improve testDB_Close_PendingTx to reduce flaky Signed-off-by: Benjamin Wang --- db_test.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/db_test.go b/db_test.go index db7c61951..a92782cf6 100644 --- a/db_test.go +++ b/db_test.go @@ -747,11 +747,15 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { } // Open update in separate goroutine. + startCh := make(chan struct{}, 1) done := make(chan error, 1) go func() { + startCh <- struct{}{} err := db.Close() done <- err }() + // wait for the above goroutine to get scheduled. + <-startCh // Ensure database hasn't closed. time.Sleep(100 * time.Millisecond) @@ -775,14 +779,13 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) { } // Ensure database closed now. - time.Sleep(100 * time.Millisecond) select { case err := <-done: if err != nil { t.Fatalf("error from inside goroutine: %v", err) } - default: - t.Fatal("database did not close") + case <-time.After(5 * time.Second): + t.Fatalf("database did not close") } } From dc50a729333e75209667510fd8959f9dca324ef3 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 30 Mar 2023 15:13:55 +0800 Subject: [PATCH 044/439] cmd: add 'surgery abandon-freelist' command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 58 +++++++++++++++++++++++-- cmd/bbolt/command_surgery_cobra_test.go | 29 +++++++++++++ internal/surgeon/surgeon.go | 4 +- 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index 407d7eccd..cb7f89548 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -19,14 +19,15 @@ var ( ) func newSurgeryCobraCommand() *cobra.Command { - cmd := &cobra.Command{ + surgeryCmd := &cobra.Command{ Use: "surgery ", Short: "surgery related commands", } - cmd.AddCommand(newSurgeryClearPageElementsCommand()) + surgeryCmd.AddCommand(newSurgeryClearPageElementsCommand()) + surgeryCmd.AddCommand(newSurgeryFreelistCommand()) - return cmd + return surgeryCmd } func newSurgeryClearPageElementsCommand() *cobra.Command { @@ -42,7 +43,6 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { } return nil }, - RunE: surgeryClearPageElementFunc, } @@ -78,3 +78,53 @@ func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", surgeryStartElementIdx, surgeryEndElementIdx, surgeryPageId) return nil } + +// TODO(ahrtr): add `bbolt surgery freelist rebuild/check ...` commands, +// and move all `surgery freelist` commands into a separate file, +// e.g command_surgery_freelist.go. +func newSurgeryFreelistCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "freelist ", + Short: "freelist related surgery commands", + } + + cmd.AddCommand(newSurgeryFreelistAbandonCommand()) + + return cmd +} + +func newSurgeryFreelistAbandonCommand() *cobra.Command { + abandonFreelistCmd := &cobra.Command{ + Use: "abandon [options]", + Short: "Abandon the freelist from both meta pages", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: surgeryFreelistAbandonFunc, + } + + abandonFreelistCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + + return abandonFreelistCmd +} + +func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { + srcDBPath := args[0] + + if err := copyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[abandon-freelist] copy file failed: %w", err) + } + + if err := surgeon.ClearFreelist(surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("abandom-freelist command failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "The freelist was abandoned in both meta pages.\nIt may cause some delay on next startup because bbolt needs to scan the whole db to reconstruct the free list.\n") + return nil +} diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 3016a963f..63103cd0d 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -11,6 +11,7 @@ import ( bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -428,3 +429,31 @@ func testSurgeryClearPageElementsWithOverflow(t *testing.T, startIdx, endIdx int compareDataAfterClearingElement(t, srcPath, output, pageId, false, startIdx, endIdx) } + +func TestSurgery_Freelist_Abandon(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "freelist", "abandon", srcPath, + "--output", output, + }) + err := rootCmd.Execute() + require.NoError(t, err) + + meta0 := loadMetaPage(t, output, 0) + assert.Equal(t, common.PgidNoFreelist, meta0.Freelist()) + meta1 := loadMetaPage(t, output, 1) + assert.Equal(t, common.PgidNoFreelist, meta1.Freelist()) +} + +func loadMetaPage(t *testing.T, dbPath string, pageID uint64) *common.Meta { + _, buf, err := guts_cli.ReadPage(dbPath, 0) + require.NoError(t, err) + return common.LoadPageMeta(buf) +} diff --git a/internal/surgeon/surgeon.go b/internal/surgeon/surgeon.go index e69eb7234..1f74e170e 100644 --- a/internal/surgeon/surgeon.go +++ b/internal/surgeon/surgeon.go @@ -104,7 +104,7 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int, abandonFre if preOverflow != p.Overflow() || p.IsBranchPage() { if abandonFreelist { - return false, clearFreelist(path) + return false, ClearFreelist(path) } return true, nil } @@ -112,7 +112,7 @@ func ClearPageElements(path string, pgId common.Pgid, start, end int, abandonFre return false, nil } -func clearFreelist(path string) error { +func ClearFreelist(path string) error { if err := clearFreelistInMetaPage(path, 0); err != nil { return fmt.Errorf("clearFreelist on meta page 0 failed: %w", err) } From 17767664664b2a43d0673a3f7d7f26b454be516e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 5 Apr 2023 14:05:38 +0800 Subject: [PATCH 045/439] move copyFile into internal/common/util.go Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 4 +-- cmd/bbolt/surgery_commands.go | 2 +- cmd/bbolt/utils.go | 51 ------------------------------ internal/common/utils.go | 46 +++++++++++++++++++++++++++ 4 files changed, 49 insertions(+), 54 deletions(-) delete mode 100644 cmd/bbolt/utils.go diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index cb7f89548..31816be6d 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -57,7 +57,7 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { srcDBPath := args[0] - if err := copyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[clear-page-element] copy file failed: %w", err) } @@ -117,7 +117,7 @@ func newSurgeryFreelistAbandonCommand() *cobra.Command { func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { srcDBPath := args[0] - if err := copyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[abandon-freelist] copy file failed: %w", err) } diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index 07b128819..d0a1c8da7 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -64,7 +64,7 @@ func (cmd *surgeryCommand) parsePathsAndCopyFile(fs *flag.FlagSet) error { } // Copy database from SrcPath to DstPath - if err := copyFile(cmd.srcPath, cmd.dstPath); err != nil { + if err := common.CopyFile(cmd.srcPath, cmd.dstPath); err != nil { return fmt.Errorf("failed to copy file: %w", err) } diff --git a/cmd/bbolt/utils.go b/cmd/bbolt/utils.go deleted file mode 100644 index c757e0ce7..000000000 --- a/cmd/bbolt/utils.go +++ /dev/null @@ -1,51 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os" -) - -func copyFile(srcPath, dstPath string) error { - // Ensure source file exists. - _, err := os.Stat(srcPath) - if os.IsNotExist(err) { - return ErrFileNotFound - } else if err != nil { - return err - } - - // Ensure output file not exist. - _, err = os.Stat(dstPath) - if err == nil { - return fmt.Errorf("output file %q already exists", dstPath) - } else if !os.IsNotExist(err) { - return err - } - - srcDB, err := os.Open(srcPath) - if err != nil { - return fmt.Errorf("failed to open source file %q: %w", srcPath, err) - } - defer srcDB.Close() - dstDB, err := os.Create(dstPath) - if err != nil { - return fmt.Errorf("failed to create output file %q: %w", dstPath, err) - } - defer dstDB.Close() - written, err := io.Copy(dstDB, srcDB) - if err != nil { - return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) - } - - srcFi, err := srcDB.Stat() - if err != nil { - return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) - } - initialSize := srcFi.Size() - if initialSize != written { - return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) - } - - return nil -} diff --git a/internal/common/utils.go b/internal/common/utils.go index 8fca0a661..c94e5c6bf 100644 --- a/internal/common/utils.go +++ b/internal/common/utils.go @@ -2,6 +2,8 @@ package common import ( "fmt" + "io" + "os" "unsafe" ) @@ -23,3 +25,47 @@ func LoadPage(buf []byte) *Page { func LoadPageMeta(buf []byte) *Meta { return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) } + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} From a333d9323c2c2b81231835d8d070ddbbc07e45ff Mon Sep 17 00:00:00 2001 From: Kishen V Date: Wed, 5 Apr 2023 01:11:00 -0700 Subject: [PATCH 046/439] Add tests for 64KB block pages. Added data to support test coverage for environments which use 64KB pages. Signed-off-by: Kishen V --- bucket_test.go | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/bucket_test.go b/bucket_test.go index 33ff149b7..610f67566 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -1299,6 +1299,24 @@ func TestBucket_Stats(t *testing.T) { BucketN: 1, InlineBucketN: 0, InlineBucketInuse: 0}, + 65536: { + BranchPageN: 1, + BranchOverflowN: 0, + LeafPageN: 2, + LeafOverflowN: 10, + KeyN: 501, + Depth: 2, + BranchAlloc: 65536, + BranchInuse: 54, + LeafAlloc: 786432, + LeafInuse: 0 + + 2*16 + // leaf page header (x LeafPageN) + 501*16 + // leaf elements + 500*3 + len(bigKey) + // leaf keys + 1*10 + 2*90 + 3*400 + longKeyLength, // leaf values: 10 * 1digit, 90*2digits, ... + BucketN: 1, + InlineBucketN: 0, + InlineBucketInuse: 0}, } if err := db.View(func(tx *bolt.Tx) error { @@ -1664,6 +1682,20 @@ func TestBucket_Stats_Large(t *testing.T) { BucketN: 1, InlineBucketN: 0, InlineBucketInuse: 0}, + 65536: { + BranchPageN: 1, + BranchOverflowN: 0, + LeafPageN: 73, + LeafOverflowN: 0, + KeyN: 100000, + Depth: 2, + BranchAlloc: 65536, + BranchInuse: 1534, + LeafAlloc: 4784128, + LeafInuse: 2578948, + BucketN: 1, + InlineBucketN: 0, + InlineBucketInuse: 0}, } if err := db.View(func(tx *bolt.Tx) error { From 0c6c296522636fe7ae5e3e76e26b1b00152ef902 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 28 Mar 2023 19:16:27 +0800 Subject: [PATCH 047/439] test: add test case to verify concurrent reading and writing transactions always work correctly Signed-off-by: Benjamin Wang --- concurrent_test.go | 323 +++++++++++++++++++++++++++++++++++++++++++++ go.mod | 1 + go.sum | 2 + 3 files changed, 326 insertions(+) create mode 100644 concurrent_test.go diff --git a/concurrent_test.go b/concurrent_test.go new file mode 100644 index 000000000..c28ef47f2 --- /dev/null +++ b/concurrent_test.go @@ -0,0 +1,323 @@ +package bbolt_test + +import ( + crand "crypto/rand" + "encoding/hex" + "fmt" + mrand "math/rand" + "os" + "path/filepath" + "reflect" + "strings" + "testing" + "time" + "unicode/utf8" + + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" +) + +func TestConcurrentReadAndWrite(t *testing.T) { + bucket := []byte("data") + keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} + + testCases := []struct { + name string + readerCount int + minReadInterval time.Duration + maxReadInterval time.Duration + minWriteInterval time.Duration + maxWriteInterval time.Duration + minWriteBytes int + maxWriteBytes int + testDuration time.Duration + }{ + { + name: "1 reader", + readerCount: 1, + minReadInterval: 50 * time.Millisecond, + maxReadInterval: 100 * time.Millisecond, + minWriteInterval: 10 * time.Millisecond, + maxWriteInterval: 20 * time.Millisecond, + minWriteBytes: 200, + maxWriteBytes: 8000, + testDuration: 30 * time.Second, + }, + { + name: "10 readers", + readerCount: 10, + minReadInterval: 50 * time.Millisecond, + maxReadInterval: 100 * time.Millisecond, + minWriteInterval: 10 * time.Millisecond, + maxWriteInterval: 20 * time.Millisecond, + minWriteBytes: 200, + maxWriteBytes: 8000, + testDuration: 30 * time.Second, + }, + { + name: "50 readers", + readerCount: 50, + minReadInterval: 50 * time.Millisecond, + maxReadInterval: 100 * time.Millisecond, + minWriteInterval: 10 * time.Millisecond, + maxWriteInterval: 20 * time.Millisecond, + minWriteBytes: 500, + maxWriteBytes: 8000, + testDuration: 30 * time.Second, + }, + { + name: "100 readers", + readerCount: 100, + minReadInterval: 50 * time.Millisecond, + maxReadInterval: 100 * time.Millisecond, + minWriteInterval: 10 * time.Millisecond, + maxWriteInterval: 20 * time.Millisecond, + minWriteBytes: 500, + maxWriteBytes: 8000, + testDuration: 30 * time.Second, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + concurrentReadAndWrite(t, + bucket, + keys, + tc.readerCount, + tc.minReadInterval, tc.maxReadInterval, + tc.minWriteInterval, tc.maxWriteInterval, + tc.minWriteBytes, tc.maxWriteBytes, + tc.testDuration) + }) + } +} + +func concurrentReadAndWrite(t *testing.T, + bucket []byte, + keys []string, + readerCount int, + minReadInterval, maxReadInterval time.Duration, + minWriteInterval, maxWriteInterval time.Duration, + minWriteBytes, maxWriteBytes int, + testDuration time.Duration) { + + // prepare the db + db := btesting.MustCreateDB(t) + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket(bucket) + return err + }) + require.NoError(t, err) + + stopCh := make(chan struct{}, 1) + errCh := make(chan error, readerCount+1) + + // start readonly transactions + g := new(errgroup.Group) + for i := 0; i < readerCount; i++ { + reader := &readWorker{ + db: db, + bucket: bucket, + keys: keys, + minReadInterval: minReadInterval, + maxReadInterval: maxReadInterval, + errCh: errCh, + stopCh: stopCh, + t: t, + } + g.Go(reader.run) + } + + // start write transaction + writer := writeWorker{ + db: db, + bucket: bucket, + keys: keys, + minWriteBytes: minWriteBytes, + maxWriteBytes: maxWriteBytes, + minWriteInterval: minWriteInterval, + maxWriteInterval: maxWriteInterval, + + errCh: errCh, + stopCh: stopCh, + t: t, + } + g.Go(writer.run) + + t.Logf("Keep reading and writing transactions running for about %s.", testDuration) + select { + case <-time.After(testDuration): + case <-errCh: + } + + close(stopCh) + t.Log("Wait for all transactions to finish.") + if err := g.Wait(); err != nil { + t.Errorf("Received error: %v", err) + } + + saveDataIfFailed(t, db) + + // TODO (ahrtr): + // 1. intentionally inject a random failpoint. + // 2. validate the linearizablity: each reading transaction + // should read the value written by previous writing transaction. +} + +type readWorker struct { + db *btesting.DB + + bucket []byte + keys []string + + minReadInterval time.Duration + maxReadInterval time.Duration + errCh chan error + stopCh chan struct{} + + t *testing.T +} + +func (reader *readWorker) run() error { + for { + select { + case <-reader.stopCh: + reader.t.Log("Reading transaction finished.") + return nil + default: + } + + err := reader.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(reader.bucket) + + selectedKey := reader.keys[mrand.Intn(len(reader.keys))] + initialVal := b.Get([]byte(selectedKey)) + time.Sleep(randomDurationInRange(reader.minReadInterval, reader.maxReadInterval)) + val := b.Get([]byte(selectedKey)) + + if !reflect.DeepEqual(initialVal, val) { + return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", + selectedKey, formatBytes(initialVal), formatBytes(val)) + } + + return nil + }) + + if err != nil { + readErr := fmt.Errorf("[reader error]: %w", err) + reader.t.Log(readErr) + reader.errCh <- readErr + return readErr + } + } +} + +type writeWorker struct { + db *btesting.DB + + bucket []byte + keys []string + + minWriteBytes int + maxWriteBytes int + minWriteInterval time.Duration + maxWriteInterval time.Duration + errCh chan error + stopCh chan struct{} + + t *testing.T +} + +func (writer *writeWorker) run() error { + for { + select { + case <-writer.stopCh: + writer.t.Log("Writing transaction finished.") + return nil + default: + } + + err := writer.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(writer.bucket) + + selectedKey := writer.keys[mrand.Intn(len(writer.keys))] + + valueBytes := randomIntInRange(writer.minWriteBytes, writer.maxWriteBytes) + v := make([]byte, valueBytes) + if _, cErr := crand.Read(v); cErr != nil { + return cErr + } + + return b.Put([]byte(selectedKey), v) + }) + + if err != nil { + writeErr := fmt.Errorf("[writer error]: %w", err) + writer.t.Log(writeErr) + writer.errCh <- writeErr + return writeErr + } + + time.Sleep(randomDurationInRange(writer.minWriteInterval, writer.maxWriteInterval)) + } +} + +func randomDurationInRange(min, max time.Duration) time.Duration { + d := int64(max) - int64(min) + d = int64(mrand.Intn(int(d))) + int64(min) + return time.Duration(d) +} + +func randomIntInRange(min, max int) int { + return mrand.Intn(max-min) + min +} + +func formatBytes(val []byte) string { + if utf8.ValidString(string(val)) { + return string(val) + } + + return hex.EncodeToString(val) +} + +func saveDataIfFailed(t *testing.T, db *btesting.DB) { + if t.Failed() { + if err := db.Close(); err != nil { + t.Errorf("Failed to close db: %v", err) + } + backupPath := testResultsDirectory(t) + targetFile := filepath.Join(backupPath, "db.bak") + + t.Logf("Saving the DB file to %s", targetFile) + err := common.CopyFile(db.Path(), targetFile) + require.NoError(t, err) + t.Logf("DB file saved to %s", targetFile) + } +} + +func testResultsDirectory(t *testing.T) string { + resultsDirectory, ok := os.LookupEnv("RESULTS_DIR") + var err error + if !ok { + resultsDirectory, err = os.MkdirTemp("", "*.db") + require.NoError(t, err) + } + resultsDirectory, err = filepath.Abs(resultsDirectory) + require.NoError(t, err) + + path, err := filepath.Abs(filepath.Join(resultsDirectory, strings.ReplaceAll(t.Name(), "/", "_"))) + require.NoError(t, err) + + err = os.RemoveAll(path) + require.NoError(t, err) + + err = os.MkdirAll(path, 0700) + require.NoError(t, err) + + return path +} diff --git a/go.mod b/go.mod index 4506b5af8..a847074c5 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/spf13/cobra v1.6.1 github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 + golang.org/x/sync v0.1.0 golang.org/x/sys v0.6.0 ) diff --git a/go.sum b/go.sum index 529183910..beb6a4a39 100644 --- a/go.sum +++ b/go.sum @@ -20,6 +20,8 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 09b5b50101cdbdccb9beea465fa5d01b89292a09 Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 6 Apr 2023 22:43:23 +1200 Subject: [PATCH 048/439] Add verification of go imports when running make fmt. Signed-off-by: James Blair --- Makefile | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e234574dc..1ff13c133 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -28,7 +29,11 @@ TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: From 7a957f94b271666d839ab867ebb98eaa95a5d047 Mon Sep 17 00:00:00 2001 From: Josh Rickmar Date: Thu, 6 Apr 2023 13:26:30 +0000 Subject: [PATCH 049/439] Introduce errors package and restore API compatibility This moves the error variables that had been moved to the internal/common package during recent refactoring to a non-internal errors package, once again allowing consumers to test for particular error conditions. To preserve API compatibility with bbolt v1.3, these error variables are also redefined in the bbolt package, with deprecation notice to migrate to bbolt/errors. Signed-off-by: Josh Rickmar --- bolt_unix.go | 4 +- bolt_windows.go | 4 +- bucket.go | 51 ++++++------ bucket_test.go | 32 ++++---- cmd/bbolt/main.go | 11 +-- cursor.go | 7 +- cursor_test.go | 4 +- db.go | 19 +++-- db_test.go | 16 ++-- db_whitebox_test.go | 4 +- errors.go | 114 ++++++++++++++++++++++++++ {internal/common => errors}/errors.go | 4 +- internal/common/meta.go | 8 +- tx.go | 11 +-- tx_test.go | 26 +++--- 15 files changed, 219 insertions(+), 96 deletions(-) create mode 100644 errors.go rename {internal/common => errors}/errors.go (96%) diff --git a/bolt_unix.go b/bolt_unix.go index e901e5643..deb7e7ca2 100644 --- a/bolt_unix.go +++ b/bolt_unix.go @@ -11,7 +11,7 @@ import ( "golang.org/x/sys/unix" - "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/errors" ) // flock acquires an advisory lock on a file descriptor. @@ -38,7 +38,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return common.ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/bolt_windows.go b/bolt_windows.go index 1981c64a3..020f1a123 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -9,7 +9,7 @@ import ( "golang.org/x/sys/windows" - "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/errors" ) // fdatasync flushes written data to a file descriptor. @@ -44,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return common.ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/bucket.go b/bucket.go index 52f9790ec..b1c1b8586 100644 --- a/bucket.go +++ b/bucket.go @@ -5,6 +5,7 @@ import ( "fmt" "unsafe" + "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" ) @@ -146,11 +147,11 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { - return nil, common.ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, common.ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, common.ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Move cursor to correct position. @@ -160,9 +161,9 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(key, k) { if (flags & common.BucketLeafFlag) != 0 { - return nil, common.ErrBucketExists + return nil, errors.ErrBucketExists } - return nil, common.ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. @@ -190,7 +191,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) - if err == common.ErrBucketExists { + if err == errors.ErrBucketExists { return b.Bucket(key), nil } else if err != nil { return nil, err @@ -202,9 +203,9 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -213,9 +214,9 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Return an error if bucket doesn't exist or is not a bucket. if !bytes.Equal(key, k) { - return common.ErrBucketNotFound + return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { - return common.ErrIncompatibleValue + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. @@ -268,15 +269,15 @@ func (b *Bucket) Get(key []byte) []byte { // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return common.ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return common.ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return common.ErrValueTooLarge + return errors.ErrValueTooLarge } // Move cursor to correct position. @@ -285,7 +286,7 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Return an error if there is an existing key with a bucket value. if bytes.Equal(key, k) && (flags&common.BucketLeafFlag) != 0 { - return common.ErrIncompatibleValue + return errors.ErrIncompatibleValue } // Insert into node. @@ -300,9 +301,9 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Returns an error if the bucket was created from a read-only transaction. func (b *Bucket) Delete(key []byte) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -316,7 +317,7 @@ func (b *Bucket) Delete(key []byte) error { // Return an error if there is already existing bucket value. if (flags & common.BucketLeafFlag) != 0 { - return common.ErrIncompatibleValue + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -333,9 +334,9 @@ func (b *Bucket) Sequence() uint64 { // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the @@ -352,9 +353,9 @@ func (b *Bucket) SetSequence(v uint64) error { // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, common.ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, common.ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the @@ -375,7 +376,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -388,7 +389,7 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { diff --git a/bucket_test.go b/bucket_test.go index 33ff149b7..595631a68 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -17,8 +17,8 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" ) // Ensure that a bucket that gets a non-existent key returns nil. @@ -247,7 +247,7 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) { if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil { t.Fatal(err) } - if err := b0.Put([]byte("foo"), []byte("bar")); err != common.ErrIncompatibleValue { + if err := b0.Put([]byte("foo"), []byte("bar")); err != berrors.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -273,7 +273,7 @@ func TestBucket_Put_Closed(t *testing.T) { t.Fatal(err) } - if err := b.Put([]byte("foo"), []byte("bar")); err != common.ErrTxClosed { + if err := b.Put([]byte("foo"), []byte("bar")); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -293,7 +293,7 @@ func TestBucket_Put_ReadOnly(t *testing.T) { if err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("widgets")) - if err := b.Put([]byte("foo"), []byte("bar")); err != common.ErrTxNotWritable { + if err := b.Put([]byte("foo"), []byte("bar")); err != berrors.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -561,7 +561,7 @@ func TestBucket_Delete_Bucket(t *testing.T) { if _, err := b.CreateBucket([]byte("foo")); err != nil { t.Fatal(err) } - if err := b.Delete([]byte("foo")); err != common.ErrIncompatibleValue { + if err := b.Delete([]byte("foo")); err != berrors.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -584,7 +584,7 @@ func TestBucket_Delete_ReadOnly(t *testing.T) { } if err := db.View(func(tx *bolt.Tx) error { - if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != common.ErrTxNotWritable { + if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != berrors.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -610,7 +610,7 @@ func TestBucket_Delete_Closed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if err := b.Delete([]byte("foo")); err != common.ErrTxClosed { + if err := b.Delete([]byte("foo")); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -781,7 +781,7 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { t.Fatal(err) } - if _, err := widgets.CreateBucket([]byte("foo")); err != common.ErrIncompatibleValue { + if _, err := widgets.CreateBucket([]byte("foo")); err != berrors.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -802,7 +802,7 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil { t.Fatal(err) } - if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != common.ErrIncompatibleValue { + if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != berrors.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil @@ -944,7 +944,7 @@ func TestBucket_NextSequence_ReadOnly(t *testing.T) { if err := db.View(func(tx *bolt.Tx) error { _, err := tx.Bucket([]byte("widgets")).NextSequence() - if err != common.ErrTxNotWritable { + if err != berrors.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -967,7 +967,7 @@ func TestBucket_NextSequence_Closed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if _, err := b.NextSequence(); err != common.ErrTxClosed { + if _, err := b.NextSequence(); err != berrors.ErrTxClosed { t.Fatal(err) } } @@ -1159,7 +1159,7 @@ func TestBucket_ForEach_Closed(t *testing.T) { t.Fatal(err) } - if err := b.ForEach(func(k, v []byte) error { return nil }); err != common.ErrTxClosed { + if err := b.ForEach(func(k, v []byte) error { return nil }); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -1173,10 +1173,10 @@ func TestBucket_Put_EmptyKey(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put([]byte(""), []byte("bar")); err != common.ErrKeyRequired { + if err := b.Put([]byte(""), []byte("bar")); err != berrors.ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } - if err := b.Put(nil, []byte("bar")); err != common.ErrKeyRequired { + if err := b.Put(nil, []byte("bar")); err != berrors.ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } return nil @@ -1193,7 +1193,7 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put(make([]byte, 32769), []byte("bar")); err != common.ErrKeyTooLarge { + if err := b.Put(make([]byte, 32769), []byte("bar")); err != berrors.ErrKeyTooLarge { t.Fatalf("unexpected error: %s", err) } return nil @@ -1216,7 +1216,7 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) { if err != nil { t.Fatal(err) } - if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != common.ErrValueTooLarge { + if err := b.Put([]byte("foo"), make([]byte, bolt.MaxValueSize+1)); err != berrors.ErrValueTooLarge { t.Fatalf("unexpected error: %s", err) } return nil diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 413a49a09..31d0d62b0 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -21,6 +21,7 @@ import ( "unicode/utf8" bolt "go.etcd.io/bbolt" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -941,12 +942,12 @@ func (cmd *keysCommand) Run(args ...string) error { // Find bucket. var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) if lastbucket == nil { - return common.ErrBucketNotFound + return berrors.ErrBucketNotFound } for _, bucket := range buckets[1:] { lastbucket = lastbucket.Bucket([]byte(bucket)) if lastbucket == nil { - return common.ErrBucketNotFound + return berrors.ErrBucketNotFound } } @@ -1017,7 +1018,7 @@ func (cmd *getCommand) Run(args ...string) error { } else if len(buckets) == 0 { return ErrBucketRequired } else if len(key) == 0 { - return common.ErrKeyRequired + return berrors.ErrKeyRequired } // Open database. @@ -1032,12 +1033,12 @@ func (cmd *getCommand) Run(args ...string) error { // Find bucket. var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) if lastbucket == nil { - return common.ErrBucketNotFound + return berrors.ErrBucketNotFound } for _, bucket := range buckets[1:] { lastbucket = lastbucket.Bucket([]byte(bucket)) if lastbucket == nil { - return common.ErrBucketNotFound + return berrors.ErrBucketNotFound } } diff --git a/cursor.go b/cursor.go index 14556e508..acd2216e2 100644 --- a/cursor.go +++ b/cursor.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" + "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" ) @@ -138,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. if (flags & common.BucketLeafFlag) != 0 { - return common.ErrIncompatibleValue + return errors.ErrIncompatibleValue } c.node().del(key) diff --git a/cursor_test.go b/cursor_test.go index 8fff82e95..20e661424 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -12,8 +12,8 @@ import ( "testing/quick" bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" ) // Ensure that a cursor can return a reference to the bucket that created it. @@ -140,7 +140,7 @@ func TestCursor_Delete(t *testing.T) { } c.Seek([]byte("sub")) - if err := c.Delete(); err != common.ErrIncompatibleValue { + if err := c.Delete(); err != errors.ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } diff --git a/db.go b/db.go index 0e364e198..d17322df4 100644 --- a/db.go +++ b/db.go @@ -11,6 +11,7 @@ import ( "time" "unsafe" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" ) @@ -233,7 +234,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.pageSize = pgSize } else { _ = db.close() - return nil, common.ErrInvalid + return nil, berrors.ErrInvalid } } @@ -311,7 +312,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, common.ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -324,7 +325,7 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, common.ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -362,7 +363,7 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { } } - return 0, metaCanRead, common.ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -697,14 +698,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, common.ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, common.ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -730,7 +731,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, common.ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -745,13 +746,13 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, common.ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, common.ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. diff --git a/db_test.go b/db_test.go index a92782cf6..153de475e 100644 --- a/db_test.go +++ b/db_test.go @@ -20,8 +20,8 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" ) // pageSize is the size of one page in the data file. @@ -137,7 +137,7 @@ func TestOpen_ErrInvalid(t *testing.T) { t.Fatal(err) } - if _, err := bolt.Open(path, 0666, nil); err != common.ErrInvalid { + if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrInvalid { t.Fatalf("unexpected error: %s", err) } } @@ -173,7 +173,7 @@ func TestOpen_ErrVersionMismatch(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != common.ErrVersionMismatch { + if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrVersionMismatch { t.Fatalf("unexpected error: %s", err) } } @@ -209,7 +209,7 @@ func TestOpen_ErrChecksum(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != common.ErrChecksum { + if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrChecksum { t.Fatalf("unexpected error: %s", err) } } @@ -553,7 +553,7 @@ func TestDB_Open_ReadOnly(t *testing.T) { } // Can't launch read-write transaction. - if _, err := readOnlyDB.Begin(true); err != common.ErrDatabaseReadOnly { + if _, err := readOnlyDB.Begin(true); err != berrors.ErrDatabaseReadOnly { t.Fatalf("unexpected error: %s", err) } @@ -642,7 +642,7 @@ func TestOpen_RecoverFreeList(t *testing.T) { // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db bolt.DB - if _, err := db.Begin(false); err != common.ErrDatabaseNotOpen { + if _, err := db.Begin(false); err != berrors.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } @@ -728,7 +728,7 @@ func TestDB_Concurrent_WriteTo(t *testing.T) { // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB - if _, err := db.Begin(true); err != common.ErrDatabaseNotOpen { + if _, err := db.Begin(true); err != berrors.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } @@ -832,7 +832,7 @@ func TestDB_Update_Closed(t *testing.T) { t.Fatal(err) } return nil - }); err != common.ErrDatabaseNotOpen { + }); err != berrors.ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } diff --git a/db_whitebox_test.go b/db_whitebox_test.go index 8b195f723..130c3e349 100644 --- a/db_whitebox_test.go +++ b/db_whitebox_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/errors" ) func TestOpenWithPreLoadFreelist(t *testing.T) { @@ -78,7 +78,7 @@ func TestMethodPage(t *testing.T) { name: "readonly mode without preloading free pages", readonly: true, preLoadFreePage: false, - expectedError: common.ErrFreePagesNotLoaded, + expectedError: errors.ErrFreePagesNotLoaded, }, } diff --git a/errors.go b/errors.go new file mode 100644 index 000000000..28ca48d84 --- /dev/null +++ b/errors.go @@ -0,0 +1,114 @@ +package bbolt + +import "go.etcd.io/bbolt/errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen + + // ErrDatabaseOpen is returned when opening a database that is + // already open. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseOpen = errors.ErrDatabaseOpen + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid + + // ErrInvalidMapping is returned when the database file fails to get mapped. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch + + // ErrChecksum is returned when either meta page checksum does not match. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound + + // ErrBucketExists is returned when creating a bucket that already exists. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired + + // ErrKeyRequired is returned when inserting a zero-length key. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge + + // ErrIncompatibleValue is returned when trying create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue +) diff --git a/internal/common/errors.go b/errors/errors.go similarity index 96% rename from internal/common/errors.go rename to errors/errors.go index fd1d3541c..88fdc31ac 100644 --- a/internal/common/errors.go +++ b/errors/errors.go @@ -1,4 +1,6 @@ -package common +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors import "errors" diff --git a/internal/common/meta.go b/internal/common/meta.go index 7769ccc61..79727c8cb 100644 --- a/internal/common/meta.go +++ b/internal/common/meta.go @@ -5,6 +5,8 @@ import ( "hash/fnv" "io" "unsafe" + + "go.etcd.io/bbolt/errors" ) type Meta struct { @@ -22,11 +24,11 @@ type Meta struct { // Validate checks the marker bytes and version of the meta page to ensure it matches this binary. func (m *Meta) Validate() error { if m.magic != Magic { - return ErrInvalid + return errors.ErrInvalid } else if m.version != Version { - return ErrVersionMismatch + return errors.ErrVersionMismatch } else if m.checksum != m.Sum64() { - return ErrChecksum + return errors.ErrChecksum } return nil } diff --git a/tx.go b/tx.go index 67362b66d..340dda808 100644 --- a/tx.go +++ b/tx.go @@ -10,6 +10,7 @@ import ( "time" "unsafe" + "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" ) @@ -141,9 +142,9 @@ func (tx *Tx) OnCommit(fn func()) { func (tx *Tx) Commit() error { common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } else if !tx.writable { - return common.ErrTxNotWritable + return errors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -253,7 +254,7 @@ func (tx *Tx) commitFreelist() error { func (tx *Tx) Rollback() error { common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return common.ErrTxClosed + return errors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -560,13 +561,13 @@ func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, // This is only safe for concurrent use when used by a writable transaction. func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, common.ErrTxClosed + return nil, errors.ErrTxClosed } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, common.ErrFreePagesNotLoaded + return nil, errors.ErrFreePagesNotLoaded } // Build the page info. diff --git a/tx_test.go b/tx_test.go index 44cbbf13c..ceda3446a 100644 --- a/tx_test.go +++ b/tx_test.go @@ -14,8 +14,8 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" ) // TestTx_Check_ReadOnly tests consistency checking on a ReadOnly database. @@ -85,7 +85,7 @@ func TestTx_Commit_ErrTxClosed(t *testing.T) { t.Fatal(err) } - if err := tx.Commit(); err != common.ErrTxClosed { + if err := tx.Commit(); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -102,7 +102,7 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) { if err := tx.Rollback(); err != nil { t.Fatal(err) } - if err := tx.Rollback(); err != common.ErrTxClosed { + if err := tx.Rollback(); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -114,7 +114,7 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) { if err != nil { t.Fatal(err) } - if err := tx.Commit(); err != common.ErrTxNotWritable { + if err := tx.Commit(); err != berrors.ErrTxNotWritable { t.Fatal(err) } // Close the view transaction @@ -166,7 +166,7 @@ func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.View(func(tx *bolt.Tx) error { _, err := tx.CreateBucket([]byte("foo")) - if err != common.ErrTxNotWritable { + if err != berrors.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -186,7 +186,7 @@ func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { t.Fatal(err) } - if _, err := tx.CreateBucket([]byte("foo")); err != common.ErrTxClosed { + if _, err := tx.CreateBucket([]byte("foo")); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -294,11 +294,11 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) { func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists([]byte{}); err != common.ErrBucketNameRequired { + if _, err := tx.CreateBucketIfNotExists([]byte{}); err != berrors.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } - if _, err := tx.CreateBucketIfNotExists(nil); err != common.ErrBucketNameRequired { + if _, err := tx.CreateBucketIfNotExists(nil); err != berrors.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } @@ -324,7 +324,7 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { // Create the same bucket again. if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket([]byte("widgets")); err != common.ErrBucketExists { + if _, err := tx.CreateBucket([]byte("widgets")); err != berrors.ErrBucketExists { t.Fatalf("unexpected error: %s", err) } return nil @@ -337,7 +337,7 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucket(nil); err != common.ErrBucketNameRequired { + if _, err := tx.CreateBucket(nil); err != berrors.ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } return nil @@ -402,7 +402,7 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { if err := tx.Commit(); err != nil { t.Fatal(err) } - if err := tx.DeleteBucket([]byte("foo")); err != common.ErrTxClosed { + if err := tx.DeleteBucket([]byte("foo")); err != berrors.ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } @@ -411,7 +411,7 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { func TestTx_DeleteBucket_ReadOnly(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.View(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("foo")); err != common.ErrTxNotWritable { + if err := tx.DeleteBucket([]byte("foo")); err != berrors.ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil @@ -424,7 +424,7 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) { func TestTx_DeleteBucket_NotFound(t *testing.T) { db := btesting.MustCreateDB(t) if err := db.Update(func(tx *bolt.Tx) error { - if err := tx.DeleteBucket([]byte("widgets")); err != common.ErrBucketNotFound { + if err := tx.DeleteBucket([]byte("widgets")); err != berrors.ErrBucketNotFound { t.Fatalf("unexpected error: %s", err) } return nil From ceb6c304a64da3bbb6bda2a9bc776516147bf252 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 15:03:30 +0000 Subject: [PATCH 050/439] Bump github.com/spf13/cobra from 1.6.1 to 1.7.0 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.6.1 to 1.7.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.6.1...v1.7.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index a847074c5..1c4444575 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module go.etcd.io/bbolt go 1.19 require ( - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.1.0 @@ -12,7 +12,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/spf13/pflag v1.0.5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index beb6a4a39..5ead48299 100644 --- a/go.sum +++ b/go.sum @@ -2,13 +2,13 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46t github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= From f4bb2bb3557f79b3e624fd597a0cdd4905f220d4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 15:03:31 +0000 Subject: [PATCH 051/439] Bump golang.org/x/sys from 0.6.0 to 0.7.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.6.0 to 0.7.0. - [Release notes](https://github.com/golang/sys/releases) - [Commits](https://github.com/golang/sys/compare/v0.6.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1c4444575..db1a8d2f1 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.6.0 + golang.org/x/sys v0.7.0 ) require ( diff --git a/go.sum b/go.sum index 5ead48299..889ac46c7 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 171b7aa6ea8b8e38acc75fcebb2dfdf1d4fbaea2 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 6 Apr 2023 14:42:01 +0800 Subject: [PATCH 052/439] add a MAINTAINERS to list all members of all roles Signed-off-by: Benjamin Wang --- MAINTAINERS | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 MAINTAINERS diff --git a/MAINTAINERS b/MAINTAINERS new file mode 100644 index 000000000..56342b5b7 --- /dev/null +++ b/MAINTAINERS @@ -0,0 +1,19 @@ +# The official list of maintainers and reviewers for the project maintenance. +# +# Refer to the GOVERNANCE.md in etcd repository for description of the roles. +# +# Names should be added to this file like so: +# Individual's name (@GITHUB_HANDLE) pkg:* +# Individual's name (@GITHUB_HANDLE) pkg:* +# +# Please keep the list sorted. + +# MAINTAINERS +Benjamin Wang (ahrtr@) #owner/#domain-expert +Hitoshi Mitake (@mitake) +Marek Siarkowicz (@serathius) +Piotr Tabor (@ptabor) #owner/#domain-expert +Sahdev Zala (@spzala) + +# REVIEWERS + From feccc712f178d16134c14c955ff04d9e2931a68c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 11 Apr 2023 18:58:53 +0800 Subject: [PATCH 053/439] Export both FreelistArrayType and FreelistMapType so as to keep API compatibility Signed-off-by: Benjamin Wang --- db.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/db.go b/db.go index d17322df4..80be5f585 100644 --- a/db.go +++ b/db.go @@ -18,6 +18,15 @@ import ( // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond +// Export both FreelistArrayType and FreelistMapType so as to keep API compatibility. +// TODO(ahrtr): eventually we should (step by step) +// 1. default to FreelistMapType; +// 2. remove the FreelistArrayType and do not export FreelistMapType; +const ( + FreelistArrayType = common.FreelistArrayType + FreelistMapType = common.FreelistMapType +) + // DB represents a collection of buckets persisted to a file on disk. // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. From 6adc0c47a67c8e5a35e281ccd3c9b29ef6757484 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 12 Apr 2023 07:32:45 +0800 Subject: [PATCH 054/439] move FreelistType from internal/common to top level package bbolt Signed-off-by: Benjamin Wang --- db.go | 21 +++++++++++++-------- freelist.go | 6 +++--- freelist_test.go | 16 ++++++++-------- internal/btesting/btesting.go | 7 +++---- internal/common/types.go | 10 ---------- 5 files changed, 27 insertions(+), 33 deletions(-) diff --git a/db.go b/db.go index 80be5f585..514e7e903 100644 --- a/db.go +++ b/db.go @@ -18,13 +18,18 @@ import ( // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond -// Export both FreelistArrayType and FreelistMapType so as to keep API compatibility. +// FreelistType is the type of the freelist backend +type FreelistType string + // TODO(ahrtr): eventually we should (step by step) -// 1. default to FreelistMapType; -// 2. remove the FreelistArrayType and do not export FreelistMapType; +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( - FreelistArrayType = common.FreelistArrayType - FreelistMapType = common.FreelistMapType + // FreelistArrayType indicates backend freelist type is array + FreelistArrayType = FreelistType("array") + // FreelistMapType indicates backend freelist type is hashmap + FreelistMapType = FreelistType("hashmap") ) // DB represents a collection of buckets persisted to a file on disk. @@ -59,7 +64,7 @@ type DB struct { // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array - FreelistType common.FreelistType + FreelistType FreelistType // When true, skips the truncate call when growing the database. // Setting this to true is only safe on non-ext3/ext4 systems. @@ -1211,7 +1216,7 @@ type Options struct { // The alternative one is using hashmap, it is faster in almost all circumstances // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe. // The default type is array - FreelistType common.FreelistType + FreelistType FreelistType // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). @@ -1253,7 +1258,7 @@ type Options struct { var DefaultOptions = &Options{ Timeout: 0, NoGrowSync: false, - FreelistType: common.FreelistArrayType, + FreelistType: FreelistArrayType, } // Stats represents statistics about the database. diff --git a/freelist.go b/freelist.go index 24dfc3e48..81cb1fd01 100644 --- a/freelist.go +++ b/freelist.go @@ -22,7 +22,7 @@ type pidSet map[common.Pgid]struct{} // freelist represents a list of all pages that are available for allocation. // It also tracks pages that have been freed but are still in use by open transactions. type freelist struct { - freelistType common.FreelistType // freelist type + freelistType FreelistType // freelist type ids []common.Pgid // all free and available free page ids. allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. @@ -38,7 +38,7 @@ type freelist struct { } // newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType common.FreelistType) *freelist { +func newFreelist(freelistType FreelistType) *freelist { f := &freelist{ freelistType: freelistType, allocs: make(map[common.Pgid]common.Txid), @@ -49,7 +49,7 @@ func newFreelist(freelistType common.FreelistType) *freelist { backwardMap: make(map[common.Pgid]uint64), } - if freelistType == common.FreelistMapType { + if freelistType == FreelistMapType { f.allocate = f.hashmapAllocate f.free_count = f.hashmapFreeCount f.mergeSpans = f.hashmapMergeSpans diff --git a/freelist_test.go b/freelist_test.go index ab848dd1d..0989dadcd 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -181,7 +181,7 @@ func TestFreelist_releaseRange(t *testing.T) { func TestFreelistHashmap_allocate(t *testing.T) { f := newTestFreelist() - if f.freelistType != common.FreelistMapType { + if f.freelistType != FreelistMapType { t.Skip() } @@ -211,7 +211,7 @@ func TestFreelistHashmap_allocate(t *testing.T) { // Ensure that a freelist can find contiguous blocks of pages. func TestFreelistArray_allocate(t *testing.T) { f := newTestFreelist() - if f.freelistType != common.FreelistArrayType { + if f.freelistType != FreelistArrayType { t.Skip() } ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} @@ -403,7 +403,7 @@ func Test_freelist_mergeWithExist(t *testing.T) { } for _, tt := range tests { f := newTestFreelist() - if f.freelistType == common.FreelistArrayType { + if f.freelistType == FreelistArrayType { t.Skip() } f.readIDs(tt.ids) @@ -427,9 +427,9 @@ func Test_freelist_mergeWithExist(t *testing.T) { // newTestFreelist get the freelist type from env and initial the freelist func newTestFreelist() *freelist { - freelistType := common.FreelistArrayType - if env := os.Getenv(TestFreelistType); env == string(common.FreelistMapType) { - freelistType = common.FreelistMapType + freelistType := FreelistArrayType + if env := os.Getenv(TestFreelistType); env == string(FreelistMapType) { + freelistType = FreelistMapType } return newFreelist(freelistType) @@ -437,7 +437,7 @@ func newTestFreelist() *freelist { func Test_freelist_hashmapGetFreePageIDs(t *testing.T) { f := newTestFreelist() - if f.freelistType == common.FreelistArrayType { + if f.freelistType == FreelistArrayType { t.Skip() } @@ -461,7 +461,7 @@ func Test_freelist_hashmapGetFreePageIDs(t *testing.T) { func Benchmark_freelist_hashmapGetFreePageIDs(b *testing.B) { f := newTestFreelist() - if f.freelistType == common.FreelistArrayType { + if f.freelistType == FreelistArrayType { b.Skip() } diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index ffa0d8b3a..4477f5f8a 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - "go.etcd.io/bbolt/internal/common" ) var statsFlag = flag.Bool("stats", false, "show performance stats") @@ -50,9 +49,9 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { o = bolt.DefaultOptions } - freelistType := common.FreelistArrayType - if env := os.Getenv(TestFreelistType); env == string(common.FreelistMapType) { - freelistType = common.FreelistMapType + freelistType := bolt.FreelistArrayType + if env := os.Getenv(TestFreelistType); env == string(bolt.FreelistMapType) { + freelistType = bolt.FreelistMapType } o.FreelistType = freelistType diff --git a/internal/common/types.go b/internal/common/types.go index e970e86a2..04b920302 100644 --- a/internal/common/types.go +++ b/internal/common/types.go @@ -36,15 +36,5 @@ const ( // DefaultPageSize is the default page size for db which is set to the OS page size. var DefaultPageSize = os.Getpagesize() -// FreelistType is the type of the freelist backend -type FreelistType string - -const ( - // FreelistArrayType indicates backend freelist type is array - FreelistArrayType = FreelistType("array") - // FreelistMapType indicates backend freelist type is hashmap - FreelistMapType = FreelistType("hashmap") -) - // Txid represents the internal transaction identifier. type Txid uint64 From d142709a732cdb254300a92b1f057c5b95b6781e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 7 Apr 2023 16:07:55 +0800 Subject: [PATCH 055/439] test: improve TestConcurrentReadAndWrite to verify read/write linerizablity Signed-off-by: Benjamin Wang --- concurrent_test.go | 264 ++++++++++++++++++++++++++++++++++++--------- 1 file changed, 214 insertions(+), 50 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index c28ef47f2..da233fa05 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -3,12 +3,15 @@ package bbolt_test import ( crand "crypto/rand" "encoding/hex" + "encoding/json" "fmt" mrand "math/rand" "os" "path/filepath" "reflect" + "sort" "strings" + "sync" "testing" "time" "unicode/utf8" @@ -21,6 +24,13 @@ import ( "go.etcd.io/bbolt/internal/common" ) +/* +TestConcurrentReadAndWrite verifies: + 1. Repeatable read: a read transaction should always see the same data + view during its lifecycle; + 2. Any data written by a writing transaction should be visible to any + following reading transactions (with txid >= previous writing txid). +*/ func TestConcurrentReadAndWrite(t *testing.T) { bucket := []byte("data") keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} @@ -116,24 +126,19 @@ func concurrentReadAndWrite(t *testing.T, stopCh := make(chan struct{}, 1) errCh := make(chan error, readerCount+1) + recordingCh := make(chan historyRecord, readerCount+1) - // start readonly transactions - g := new(errgroup.Group) - for i := 0; i < readerCount; i++ { - reader := &readWorker{ - db: db, - bucket: bucket, - keys: keys, - minReadInterval: minReadInterval, - maxReadInterval: maxReadInterval, - errCh: errCh, - stopCh: stopCh, - t: t, - } - g.Go(reader.run) - } + // collect history records + var records historyRecords + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + records = collectHistoryRecords(recordingCh) + }() // start write transaction + g := new(errgroup.Group) writer := writeWorker{ db: db, bucket: bucket, @@ -143,12 +148,30 @@ func concurrentReadAndWrite(t *testing.T, minWriteInterval: minWriteInterval, maxWriteInterval: maxWriteInterval, - errCh: errCh, - stopCh: stopCh, - t: t, + recordingCh: recordingCh, + errCh: errCh, + stopCh: stopCh, + t: t, } g.Go(writer.run) + // start readonly transactions + for i := 0; i < readerCount; i++ { + reader := &readWorker{ + db: db, + bucket: bucket, + keys: keys, + minReadInterval: minReadInterval, + maxReadInterval: maxReadInterval, + + recordingCh: recordingCh, + errCh: errCh, + stopCh: stopCh, + t: t, + } + g.Go(reader.run) + } + t.Logf("Keep reading and writing transactions running for about %s.", testDuration) select { case <-time.After(testDuration): @@ -161,12 +184,19 @@ func concurrentReadAndWrite(t *testing.T, t.Errorf("Received error: %v", err) } - saveDataIfFailed(t, db) + t.Log("Waiting for the history collector to finish.") + close(recordingCh) + wg.Wait() + t.Log("Analyzing the history records.") + if err := analyzeHistoryRecords(records); err != nil { + t.Errorf("The history records are not linearizable:\n %v", err) + } + + saveDataIfFailed(t, db, records) // TODO (ahrtr): // 1. intentionally inject a random failpoint. - // 2. validate the linearizablity: each reading transaction - // should read the value written by previous writing transaction. + // 2. check db consistency at the end. } type readWorker struct { @@ -177,27 +207,29 @@ type readWorker struct { minReadInterval time.Duration maxReadInterval time.Duration - errCh chan error - stopCh chan struct{} + + recordingCh chan historyRecord + errCh chan error + stopCh chan struct{} t *testing.T } -func (reader *readWorker) run() error { +func (r *readWorker) run() error { for { select { - case <-reader.stopCh: - reader.t.Log("Reading transaction finished.") + case <-r.stopCh: + r.t.Log("Reading transaction finished.") return nil default: } - err := reader.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(reader.bucket) + err := r.db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(r.bucket) - selectedKey := reader.keys[mrand.Intn(len(reader.keys))] + selectedKey := r.keys[mrand.Intn(len(r.keys))] initialVal := b.Get([]byte(selectedKey)) - time.Sleep(randomDurationInRange(reader.minReadInterval, reader.maxReadInterval)) + time.Sleep(randomDurationInRange(r.minReadInterval, r.maxReadInterval)) val := b.Get([]byte(selectedKey)) if !reflect.DeepEqual(initialVal, val) { @@ -205,13 +237,23 @@ func (reader *readWorker) run() error { selectedKey, formatBytes(initialVal), formatBytes(val)) } + clonedVal := make([]byte, len(val)) + copy(clonedVal, val) + + r.recordingCh <- historyRecord{ + OperationType: Read, + Key: selectedKey, + Value: clonedVal, + Txid: tx.ID(), + } + return nil }) if err != nil { readErr := fmt.Errorf("[reader error]: %w", err) - reader.t.Log(readErr) - reader.errCh <- readErr + r.t.Log(readErr) + r.errCh <- readErr return readErr } } @@ -227,43 +269,55 @@ type writeWorker struct { maxWriteBytes int minWriteInterval time.Duration maxWriteInterval time.Duration - errCh chan error - stopCh chan struct{} + + recordingCh chan historyRecord + errCh chan error + stopCh chan struct{} t *testing.T } -func (writer *writeWorker) run() error { +func (w *writeWorker) run() error { for { select { - case <-writer.stopCh: - writer.t.Log("Writing transaction finished.") + case <-w.stopCh: + w.t.Log("Writing transaction finished.") return nil default: } - err := writer.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(writer.bucket) + err := w.db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(w.bucket) - selectedKey := writer.keys[mrand.Intn(len(writer.keys))] + selectedKey := w.keys[mrand.Intn(len(w.keys))] - valueBytes := randomIntInRange(writer.minWriteBytes, writer.maxWriteBytes) + valueBytes := randomIntInRange(w.minWriteBytes, w.maxWriteBytes) v := make([]byte, valueBytes) if _, cErr := crand.Read(v); cErr != nil { return cErr } - return b.Put([]byte(selectedKey), v) + putErr := b.Put([]byte(selectedKey), v) + if putErr == nil { + w.recordingCh <- historyRecord{ + OperationType: Write, + Key: selectedKey, + Value: v, + Txid: tx.ID(), + } + } + + return putErr }) if err != nil { writeErr := fmt.Errorf("[writer error]: %w", err) - writer.t.Log(writeErr) - writer.errCh <- writeErr + w.t.Log(writeErr) + w.errCh <- writeErr return writeErr } - time.Sleep(randomDurationInRange(writer.minWriteInterval, writer.maxWriteInterval)) + time.Sleep(randomDurationInRange(w.minWriteInterval, w.maxWriteInterval)) } } @@ -285,18 +339,35 @@ func formatBytes(val []byte) string { return hex.EncodeToString(val) } -func saveDataIfFailed(t *testing.T, db *btesting.DB) { +func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords) { if t.Failed() { if err := db.Close(); err != nil { t.Errorf("Failed to close db: %v", err) } backupPath := testResultsDirectory(t) - targetFile := filepath.Join(backupPath, "db.bak") + backupDB(t, db, backupPath) + persistHistoryRecords(t, rs, backupPath) + } +} + +func backupDB(t *testing.T, db *btesting.DB, path string) { + targetFile := filepath.Join(path, "db.bak") + t.Logf("Saving the DB file to %s", targetFile) + err := common.CopyFile(db.Path(), targetFile) + require.NoError(t, err) + t.Logf("DB file saved to %s", targetFile) +} - t.Logf("Saving the DB file to %s", targetFile) - err := common.CopyFile(db.Path(), targetFile) +func persistHistoryRecords(t *testing.T, rs historyRecords, path string) { + recordFilePath := filepath.Join(path, "history_records.json") + t.Logf("Saving history records to %s", recordFilePath) + recordFile, err := os.OpenFile(recordFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + require.NoError(t, err) + defer recordFile.Close() + encoder := json.NewEncoder(recordFile) + for _, rec := range rs { + err := encoder.Encode(rec) require.NoError(t, err) - t.Logf("DB file saved to %s", targetFile) } } @@ -321,3 +392,96 @@ func testResultsDirectory(t *testing.T) string { return path } + +/* +********************************************************* +Data structure and functions for analyzing history records +********************************************************* +*/ +type OperationType string + +const ( + Read OperationType = "read" + Write OperationType = "write" +) + +type historyRecord struct { + OperationType OperationType `json:"operationType,omitempty"` + Txid int `json:"txid,omitempty"` + Key string `json:"key,omitempty"` + Value []byte `json:"value,omitempty"` +} + +type historyRecords []historyRecord + +func (rs historyRecords) Len() int { + return len(rs) +} + +func (rs historyRecords) Less(i, j int) bool { + // Sorted by key firstly: all records with the same key are grouped together. + keyCmp := strings.Compare(rs[i].Key, rs[j].Key) + if keyCmp != 0 { + return keyCmp < 0 + } + + // Sorted by txid + if rs[i].Txid != rs[j].Txid { + return rs[i].Txid < rs[j].Txid + } + + // Sorted by workerType: put writer before reader if they have the same txid. + if rs[i].OperationType == Write { + return true + } + + return false +} + +func (rs historyRecords) Swap(i, j int) { + rs[i], rs[j] = rs[j], rs[i] +} + +func collectHistoryRecords(recordingCh chan historyRecord) historyRecords { + var rs historyRecords + for record := range recordingCh { + rs = append(rs, record) + } + sort.Sort(rs) + return rs +} + +func analyzeHistoryRecords(rs historyRecords) error { + lastWriteKeyValueMap := make(map[string]*historyRecord) + + for _, rec := range rs { + if v, ok := lastWriteKeyValueMap[rec.Key]; ok { + if rec.OperationType == Write { + v.Value = rec.Value + v.Txid = rec.Txid + } else { + if !reflect.DeepEqual(v.Value, rec.Value) { + return fmt.Errorf("reader[txid: %d, key: %s] read %x, \nbut writer[txid: %d, key: %s] wrote %x", + rec.Txid, rec.Key, rec.Value, + v.Txid, v.Key, v.Value) + } + } + } else { + if rec.OperationType == Write { + lastWriteKeyValueMap[rec.Key] = &historyRecord{ + OperationType: Write, + Key: rec.Key, + Value: rec.Value, + Txid: rec.Txid, + } + } else { + if len(rec.Value) != 0 { + return fmt.Errorf("expected the first reader[txid: %d, key: %s] read nil, \nbut got %x", + rec.Txid, rec.Key, rec.Value) + } + } + } + } + + return nil +} From 842da43d53bb9e9621a180b958f94512fbb5c6bc Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 14 Apr 2023 13:22:16 +0800 Subject: [PATCH 056/439] test: move starting worker into separate function, and collect operation history locally firstly Signed-off-by: Benjamin Wang --- concurrent_test.go | 143 ++++++++++++++++++++++++++------------------- 1 file changed, 83 insertions(+), 60 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index da233fa05..af2030f45 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -116,7 +116,7 @@ func concurrentReadAndWrite(t *testing.T, minWriteBytes, maxWriteBytes int, testDuration time.Duration) { - // prepare the db + t.Log("Preparing db.") db := btesting.MustCreateDB(t) err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket(bucket) @@ -124,18 +124,45 @@ func concurrentReadAndWrite(t *testing.T, }) require.NoError(t, err) + t.Log("Starting workers.") + records := runWorkers(t, + db, bucket, keys, + readerCount, minReadInterval, maxReadInterval, + minWriteInterval, maxWriteInterval, minWriteBytes, maxWriteBytes, + testDuration) + + t.Log("Analyzing the history records.") + if err := analyzeHistoryRecords(records); err != nil { + t.Errorf("The history records are not linearizable:\n %v", err) + } + + saveDataIfFailed(t, db, records) + + // TODO (ahrtr): + // 1. intentionally inject a random failpoint. + // 2. check db consistency at the end. +} + +/* +********************************************************* +Data structures and functions/methods for running +concurrent workers, including reading and writing workers +********************************************************* +*/ +func runWorkers(t *testing.T, + db *btesting.DB, + bucket []byte, + keys []string, + readerCount int, + minReadInterval, maxReadInterval time.Duration, + minWriteInterval, maxWriteInterval time.Duration, + minWriteBytes, maxWriteBytes int, + testDuration time.Duration) historyRecords { stopCh := make(chan struct{}, 1) errCh := make(chan error, readerCount+1) - recordingCh := make(chan historyRecord, readerCount+1) - // collect history records - var records historyRecords - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - records = collectHistoryRecords(recordingCh) - }() + var mu sync.Mutex + var rs historyRecords // start write transaction g := new(errgroup.Group) @@ -148,12 +175,17 @@ func concurrentReadAndWrite(t *testing.T, minWriteInterval: minWriteInterval, maxWriteInterval: maxWriteInterval, - recordingCh: recordingCh, - errCh: errCh, - stopCh: stopCh, - t: t, + errCh: errCh, + stopCh: stopCh, + t: t, } - g.Go(writer.run) + g.Go(func() error { + wrs, err := writer.run() + mu.Lock() + rs = append(rs, wrs...) + mu.Unlock() + return err + }) // start readonly transactions for i := 0; i < readerCount; i++ { @@ -164,12 +196,17 @@ func concurrentReadAndWrite(t *testing.T, minReadInterval: minReadInterval, maxReadInterval: maxReadInterval, - recordingCh: recordingCh, - errCh: errCh, - stopCh: stopCh, - t: t, + errCh: errCh, + stopCh: stopCh, + t: t, } - g.Go(reader.run) + g.Go(func() error { + rrs, err := reader.run() + mu.Lock() + rs = append(rs, rrs...) + mu.Unlock() + return err + }) } t.Logf("Keep reading and writing transactions running for about %s.", testDuration) @@ -179,24 +216,13 @@ func concurrentReadAndWrite(t *testing.T, } close(stopCh) - t.Log("Wait for all transactions to finish.") + t.Log("Waiting for all transactions to finish.") if err := g.Wait(); err != nil { t.Errorf("Received error: %v", err) } - t.Log("Waiting for the history collector to finish.") - close(recordingCh) - wg.Wait() - t.Log("Analyzing the history records.") - if err := analyzeHistoryRecords(records); err != nil { - t.Errorf("The history records are not linearizable:\n %v", err) - } - - saveDataIfFailed(t, db, records) - - // TODO (ahrtr): - // 1. intentionally inject a random failpoint. - // 2. check db consistency at the end. + sort.Sort(rs) + return rs } type readWorker struct { @@ -208,19 +234,19 @@ type readWorker struct { minReadInterval time.Duration maxReadInterval time.Duration - recordingCh chan historyRecord - errCh chan error - stopCh chan struct{} + errCh chan error + stopCh chan struct{} t *testing.T } -func (r *readWorker) run() error { +func (r *readWorker) run() (historyRecords, error) { + var rs historyRecords for { select { case <-r.stopCh: r.t.Log("Reading transaction finished.") - return nil + return rs, nil default: } @@ -240,12 +266,12 @@ func (r *readWorker) run() error { clonedVal := make([]byte, len(val)) copy(clonedVal, val) - r.recordingCh <- historyRecord{ + rs = append(rs, historyRecord{ OperationType: Read, Key: selectedKey, Value: clonedVal, Txid: tx.ID(), - } + }) return nil }) @@ -254,7 +280,7 @@ func (r *readWorker) run() error { readErr := fmt.Errorf("[reader error]: %w", err) r.t.Log(readErr) r.errCh <- readErr - return readErr + return rs, readErr } } } @@ -270,19 +296,19 @@ type writeWorker struct { minWriteInterval time.Duration maxWriteInterval time.Duration - recordingCh chan historyRecord - errCh chan error - stopCh chan struct{} + errCh chan error + stopCh chan struct{} t *testing.T } -func (w *writeWorker) run() error { +func (w *writeWorker) run() (historyRecords, error) { + var rs historyRecords for { select { case <-w.stopCh: w.t.Log("Writing transaction finished.") - return nil + return rs, nil default: } @@ -299,12 +325,12 @@ func (w *writeWorker) run() error { putErr := b.Put([]byte(selectedKey), v) if putErr == nil { - w.recordingCh <- historyRecord{ + rs = append(rs, historyRecord{ OperationType: Write, Key: selectedKey, Value: v, Txid: tx.ID(), - } + }) } return putErr @@ -314,7 +340,7 @@ func (w *writeWorker) run() error { writeErr := fmt.Errorf("[writer error]: %w", err) w.t.Log(writeErr) w.errCh <- writeErr - return writeErr + return rs, writeErr } time.Sleep(randomDurationInRange(w.minWriteInterval, w.maxWriteInterval)) @@ -339,6 +365,12 @@ func formatBytes(val []byte) string { return hex.EncodeToString(val) } +/* +********************************************************* +Functions for persisting test data, including db file +and operation history +********************************************************* +*/ func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords) { if t.Failed() { if err := db.Close(); err != nil { @@ -395,7 +427,7 @@ func testResultsDirectory(t *testing.T) string { /* ********************************************************* -Data structure and functions for analyzing history records +Data structures and functions for analyzing history records ********************************************************* */ type OperationType string @@ -442,15 +474,6 @@ func (rs historyRecords) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } -func collectHistoryRecords(recordingCh chan historyRecord) historyRecords { - var rs historyRecords - for record := range recordingCh { - rs = append(rs, record) - } - sort.Sort(rs) - return rs -} - func analyzeHistoryRecords(rs historyRecords) error { lastWriteKeyValueMap := make(map[string]*historyRecord) From fd10e601058b1082bd3d9dd6531aa27f7ed4d5f2 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 14 Apr 2023 17:05:03 +0800 Subject: [PATCH 057/439] test: explictly mention serializable verification Signed-off-by: Benjamin Wang --- concurrent_test.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index af2030f45..e1a5b5883 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -132,8 +132,8 @@ func concurrentReadAndWrite(t *testing.T, testDuration) t.Log("Analyzing the history records.") - if err := analyzeHistoryRecords(records); err != nil { - t.Errorf("The history records are not linearizable:\n %v", err) + if err := validateSerializable(records); err != nil { + t.Errorf("The history records are not serializable:\n %v", err) } saveDataIfFailed(t, db, records) @@ -221,7 +221,6 @@ func runWorkers(t *testing.T, t.Errorf("Received error: %v", err) } - sort.Sort(rs) return rs } @@ -474,7 +473,9 @@ func (rs historyRecords) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } -func analyzeHistoryRecords(rs historyRecords) error { +func validateSerializable(rs historyRecords) error { + sort.Sort(rs) + lastWriteKeyValueMap := make(map[string]*historyRecord) for _, rec := range rs { From 8ca298f17f2280856d039f7fb445474cc284951c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 14 Apr 2023 19:02:28 +0800 Subject: [PATCH 058/439] test: get all concurrent test parameters wrapped in a struct Signed-off-by: Benjamin Wang --- concurrent_test.go | 189 +++++++++++++++++++++++++++------------------ 1 file changed, 112 insertions(+), 77 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index e1a5b5883..765ce6647 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -24,6 +24,22 @@ import ( "go.etcd.io/bbolt/internal/common" ) +type duration struct { + min time.Duration + max time.Duration +} + +type bytesRange struct { + min int + max int +} + +type concurrentConfig struct { + readTime duration + writeTime duration + writeBytes bytesRange +} + /* TestConcurrentReadAndWrite verifies: 1. Repeatable read: a read transaction should always see the same data @@ -36,59 +52,88 @@ func TestConcurrentReadAndWrite(t *testing.T) { keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} testCases := []struct { - name string - readerCount int - minReadInterval time.Duration - maxReadInterval time.Duration - minWriteInterval time.Duration - maxWriteInterval time.Duration - minWriteBytes int - maxWriteBytes int - testDuration time.Duration + name string + readerCount int + conf concurrentConfig + testDuration time.Duration }{ { - name: "1 reader", - readerCount: 1, - minReadInterval: 50 * time.Millisecond, - maxReadInterval: 100 * time.Millisecond, - minWriteInterval: 10 * time.Millisecond, - maxWriteInterval: 20 * time.Millisecond, - minWriteBytes: 200, - maxWriteBytes: 8000, - testDuration: 30 * time.Second, + name: "1 reader", + readerCount: 1, + conf: concurrentConfig{ + readTime: duration{ + min: 50 * time.Millisecond, + max: 100 * time.Millisecond, + }, + writeTime: duration{ + min: 10 * time.Millisecond, + max: 20 * time.Millisecond, + }, + writeBytes: bytesRange{ + min: 200, + max: 8000, + }, + }, + testDuration: 30 * time.Second, }, { - name: "10 readers", - readerCount: 10, - minReadInterval: 50 * time.Millisecond, - maxReadInterval: 100 * time.Millisecond, - minWriteInterval: 10 * time.Millisecond, - maxWriteInterval: 20 * time.Millisecond, - minWriteBytes: 200, - maxWriteBytes: 8000, - testDuration: 30 * time.Second, + name: "10 readers", + readerCount: 10, + conf: concurrentConfig{ + readTime: duration{ + min: 50 * time.Millisecond, + max: 100 * time.Millisecond, + }, + writeTime: duration{ + min: 10 * time.Millisecond, + max: 20 * time.Millisecond, + }, + writeBytes: bytesRange{ + min: 200, + max: 8000, + }, + }, + testDuration: 30 * time.Second, }, { - name: "50 readers", - readerCount: 50, - minReadInterval: 50 * time.Millisecond, - maxReadInterval: 100 * time.Millisecond, - minWriteInterval: 10 * time.Millisecond, - maxWriteInterval: 20 * time.Millisecond, - minWriteBytes: 500, - maxWriteBytes: 8000, - testDuration: 30 * time.Second, + name: "50 readers", + readerCount: 50, + conf: concurrentConfig{ + readTime: duration{ + min: 50 * time.Millisecond, + max: 100 * time.Millisecond, + }, + writeTime: duration{ + min: 10 * time.Millisecond, + max: 20 * time.Millisecond, + }, + writeBytes: bytesRange{ + min: 500, + max: 8000, + }, + }, + + testDuration: 30 * time.Second, }, { - name: "100 readers", - readerCount: 100, - minReadInterval: 50 * time.Millisecond, - maxReadInterval: 100 * time.Millisecond, - minWriteInterval: 10 * time.Millisecond, - maxWriteInterval: 20 * time.Millisecond, - minWriteBytes: 500, - maxWriteBytes: 8000, - testDuration: 30 * time.Second, + name: "100 readers", + readerCount: 100, + conf: concurrentConfig{ + readTime: duration{ + min: 50 * time.Millisecond, + max: 100 * time.Millisecond, + }, + writeTime: duration{ + min: 10 * time.Millisecond, + max: 20 * time.Millisecond, + }, + writeBytes: bytesRange{ + min: 500, + max: 8000, + }, + }, + + testDuration: 30 * time.Second, }, } @@ -99,9 +144,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { bucket, keys, tc.readerCount, - tc.minReadInterval, tc.maxReadInterval, - tc.minWriteInterval, tc.maxWriteInterval, - tc.minWriteBytes, tc.maxWriteBytes, + tc.conf, tc.testDuration) }) } @@ -111,9 +154,7 @@ func concurrentReadAndWrite(t *testing.T, bucket []byte, keys []string, readerCount int, - minReadInterval, maxReadInterval time.Duration, - minWriteInterval, maxWriteInterval time.Duration, - minWriteBytes, maxWriteBytes int, + conf concurrentConfig, testDuration time.Duration) { t.Log("Preparing db.") @@ -127,8 +168,8 @@ func concurrentReadAndWrite(t *testing.T, t.Log("Starting workers.") records := runWorkers(t, db, bucket, keys, - readerCount, minReadInterval, maxReadInterval, - minWriteInterval, maxWriteInterval, minWriteBytes, maxWriteBytes, + readerCount, + conf, testDuration) t.Log("Analyzing the history records.") @@ -154,9 +195,7 @@ func runWorkers(t *testing.T, bucket []byte, keys []string, readerCount int, - minReadInterval, maxReadInterval time.Duration, - minWriteInterval, maxWriteInterval time.Duration, - minWriteBytes, maxWriteBytes int, + conf concurrentConfig, testDuration time.Duration) historyRecords { stopCh := make(chan struct{}, 1) errCh := make(chan error, readerCount+1) @@ -167,13 +206,12 @@ func runWorkers(t *testing.T, // start write transaction g := new(errgroup.Group) writer := writeWorker{ - db: db, - bucket: bucket, - keys: keys, - minWriteBytes: minWriteBytes, - maxWriteBytes: maxWriteBytes, - minWriteInterval: minWriteInterval, - maxWriteInterval: maxWriteInterval, + db: db, + bucket: bucket, + keys: keys, + + writeBytes: conf.writeBytes, + writeTime: conf.writeTime, errCh: errCh, stopCh: stopCh, @@ -190,11 +228,11 @@ func runWorkers(t *testing.T, // start readonly transactions for i := 0; i < readerCount; i++ { reader := &readWorker{ - db: db, - bucket: bucket, - keys: keys, - minReadInterval: minReadInterval, - maxReadInterval: maxReadInterval, + db: db, + bucket: bucket, + keys: keys, + + readTime: conf.readTime, errCh: errCh, stopCh: stopCh, @@ -230,8 +268,7 @@ type readWorker struct { bucket []byte keys []string - minReadInterval time.Duration - maxReadInterval time.Duration + readTime duration errCh chan error stopCh chan struct{} @@ -254,7 +291,7 @@ func (r *readWorker) run() (historyRecords, error) { selectedKey := r.keys[mrand.Intn(len(r.keys))] initialVal := b.Get([]byte(selectedKey)) - time.Sleep(randomDurationInRange(r.minReadInterval, r.maxReadInterval)) + time.Sleep(randomDurationInRange(r.readTime.min, r.readTime.max)) val := b.Get([]byte(selectedKey)) if !reflect.DeepEqual(initialVal, val) { @@ -290,10 +327,8 @@ type writeWorker struct { bucket []byte keys []string - minWriteBytes int - maxWriteBytes int - minWriteInterval time.Duration - maxWriteInterval time.Duration + writeBytes bytesRange + writeTime duration errCh chan error stopCh chan struct{} @@ -316,7 +351,7 @@ func (w *writeWorker) run() (historyRecords, error) { selectedKey := w.keys[mrand.Intn(len(w.keys))] - valueBytes := randomIntInRange(w.minWriteBytes, w.maxWriteBytes) + valueBytes := randomIntInRange(w.writeBytes.min, w.writeBytes.max) v := make([]byte, valueBytes) if _, cErr := crand.Read(v); cErr != nil { return cErr @@ -342,7 +377,7 @@ func (w *writeWorker) run() (historyRecords, error) { return rs, writeErr } - time.Sleep(randomDurationInRange(w.minWriteInterval, w.maxWriteInterval)) + time.Sleep(randomDurationInRange(w.writeTime.min, w.writeTime.max)) } } From ba2625514116d3ea080f2048ab75e0c4827d273c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 10 Apr 2023 16:09:42 +0800 Subject: [PATCH 059/439] cmd: add 'surgery freelist rebuild' command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 84 ++++++++++++++++++++++++- cmd/bbolt/command_surgery_cobra_test.go | 75 ++++++++++++++++++++++ 2 files changed, 158 insertions(+), 1 deletion(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index 31816be6d..fc709c98c 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -7,10 +7,16 @@ import ( "github.com/spf13/cobra" + bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" "go.etcd.io/bbolt/internal/surgeon" ) +var ( + ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") +) + var ( surgeryTargetDBFilePath string surgeryPageId uint64 @@ -89,6 +95,7 @@ func newSurgeryFreelistCommand() *cobra.Command { } cmd.AddCommand(newSurgeryFreelistAbandonCommand()) + cmd.AddCommand(newSurgeryFreelistRebuildCommand()) return cmd } @@ -118,7 +125,7 @@ func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { srcDBPath := args[0] if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { - return fmt.Errorf("[abandon-freelist] copy file failed: %w", err) + return fmt.Errorf("[freelist abandon] copy file failed: %w", err) } if err := surgeon.ClearFreelist(surgeryTargetDBFilePath); err != nil { @@ -128,3 +135,78 @@ func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { fmt.Fprintf(os.Stdout, "The freelist was abandoned in both meta pages.\nIt may cause some delay on next startup because bbolt needs to scan the whole db to reconstruct the free list.\n") return nil } + +func newSurgeryFreelistRebuildCommand() *cobra.Command { + rebuildFreelistCmd := &cobra.Command{ + Use: "rebuild [options]", + Short: "Rebuild the freelist", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: surgeryFreelistRebuildFunc, + } + + rebuildFreelistCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + + return rebuildFreelistCmd +} + +func surgeryFreelistRebuildFunc(cmd *cobra.Command, args []string) error { + srcDBPath := args[0] + + // Ensure source file exists. + fi, err := os.Stat(srcDBPath) + if os.IsNotExist(err) { + return fmt.Errorf("source database file %q doesn't exist", srcDBPath) + } else if err != nil { + return fmt.Errorf("failed to open source database file %q: %v", srcDBPath, err) + } + + if surgeryTargetDBFilePath == "" { + return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") + } + + // make sure the freelist isn't present in the file. + meta, err := readMetaPage(srcDBPath) + if err != nil { + return err + } + if meta.Freelist() != common.PgidNoFreelist { + return ErrSurgeryFreelistAlreadyExist + } + + if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[freelist rebuild] copy file failed: %w", err) + } + + // bboltDB automatically reconstruct & sync freelist in write mode. + db, err := bolt.Open(surgeryTargetDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) + if err != nil { + return fmt.Errorf("[freelist rebuild] open db file failed: %w", err) + } + err = db.Close() + if err != nil { + return fmt.Errorf("[freelist rebuild] close db file failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "The freelist was successfully rebuilt.\n") + return nil +} + +func readMetaPage(path string) (*common.Meta, error) { + _, activeMetaPageId, err := guts_cli.GetRootPage(path) + if err != nil { + return nil, fmt.Errorf("read root page failed: %w", err) + } + _, buf, err := guts_cli.ReadPage(path, uint64(activeMetaPageId)) + if err != nil { + return nil, fmt.Errorf("read active mage page failed: %w", err) + } + return common.LoadPageMeta(buf), nil +} diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 63103cd0d..0e35c05e9 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -457,3 +457,78 @@ func loadMetaPage(t *testing.T, dbPath string, pageID uint64) *common.Meta { require.NoError(t, err) return common.LoadPageMeta(buf) } + +func TestSurgery_Freelist_Rebuild(t *testing.T) { + testCases := []struct { + name string + hasFreelist bool + expectedError error + }{ + { + name: "normal operation", + hasFreelist: false, + expectedError: nil, + }, + { + name: "already has freelist", + hasFreelist: true, + expectedError: main.ErrSurgeryFreelistAlreadyExist, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{ + PageSize: pageSize, + NoFreelistSync: !tc.hasFreelist, + }) + srcPath := db.Path() + + err := db.Update(func(tx *bolt.Tx) error { + // do nothing + return nil + }) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // Verify the freelist isn't synced in the beginning + meta := readMetaPage(t, srcPath) + if tc.hasFreelist { + if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { + t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) + } + } else { + require.Equal(t, common.PgidNoFreelist, meta.Freelist()) + } + + // Execute `surgery freelist rebuild` command + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "freelist", "rebuild", srcPath, + "--output", output, + }) + err = rootCmd.Execute() + require.Equal(t, tc.expectedError, err) + + if tc.expectedError == nil { + // Verify the freelist has already been rebuilt. + meta = readMetaPage(t, output) + if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { + t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) + } + } + }) + } +} + +func readMetaPage(t *testing.T, path string) *common.Meta { + _, activeMetaPageId, err := guts_cli.GetRootPage(path) + require.NoError(t, err) + _, buf, err := guts_cli.ReadPage(path, uint64(activeMetaPageId)) + require.NoError(t, err) + return common.LoadPageMeta(buf) +} From 651c74ab07aa2c1ead0ce5262dc16b571c42c9c4 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Sat, 15 Apr 2023 23:48:08 -0400 Subject: [PATCH 060/439] skip concurrent read and write tests in short mode Signed-off-by: Cenk Alti --- concurrent_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/concurrent_test.go b/concurrent_test.go index 765ce6647..09f6be109 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -48,6 +48,9 @@ TestConcurrentReadAndWrite verifies: following reading transactions (with txid >= previous writing txid). */ func TestConcurrentReadAndWrite(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } bucket := []byte("data") keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} From e431258c0dd3aeae97b8ea0f9a87de9c9908eef4 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 16 Apr 2023 13:28:13 +0800 Subject: [PATCH 061/439] test: verify that txids are incremental Signed-off-by: Benjamin Wang --- concurrent_test.go | 53 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 48 insertions(+), 5 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 09f6be109..c6ffc8780 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -208,7 +208,8 @@ func runWorkers(t *testing.T, // start write transaction g := new(errgroup.Group) - writer := writeWorker{ + writer := &writeWorker{ + id: 0, db: db, bucket: bucket, keys: keys, @@ -221,7 +222,7 @@ func runWorkers(t *testing.T, t: t, } g.Go(func() error { - wrs, err := writer.run() + wrs, err := runWorker(t, writer, errCh) mu.Lock() rs = append(rs, wrs...) mu.Unlock() @@ -231,6 +232,7 @@ func runWorkers(t *testing.T, // start readonly transactions for i := 0; i < readerCount; i++ { reader := &readWorker{ + id: i, db: db, bucket: bucket, keys: keys, @@ -242,7 +244,7 @@ func runWorkers(t *testing.T, t: t, } g.Go(func() error { - rrs, err := reader.run() + rrs, err := runWorker(t, reader, errCh) mu.Lock() rs = append(rs, rrs...) mu.Unlock() @@ -265,7 +267,26 @@ func runWorkers(t *testing.T, return rs } +func runWorker(t *testing.T, w worker, errCh chan error) (historyRecords, error) { + rs, err := w.run() + if len(rs) > 0 && err == nil { + if terr := validateIncrementalTxid(rs); terr != nil { + txidErr := fmt.Errorf("[%s]: %w", w.name(), terr) + t.Error(txidErr) + errCh <- txidErr + return rs, txidErr + } + } + return rs, err +} + +type worker interface { + name() string + run() (historyRecords, error) +} + type readWorker struct { + id int db *btesting.DB bucket []byte @@ -279,6 +300,10 @@ type readWorker struct { t *testing.T } +func (w *readWorker) name() string { + return fmt.Sprintf("readWorker-%d", w.id) +} + func (r *readWorker) run() (historyRecords, error) { var rs historyRecords for { @@ -317,7 +342,7 @@ func (r *readWorker) run() (historyRecords, error) { if err != nil { readErr := fmt.Errorf("[reader error]: %w", err) - r.t.Log(readErr) + r.t.Error(readErr) r.errCh <- readErr return rs, readErr } @@ -325,6 +350,7 @@ func (r *readWorker) run() (historyRecords, error) { } type writeWorker struct { + id int db *btesting.DB bucket []byte @@ -339,6 +365,10 @@ type writeWorker struct { t *testing.T } +func (w *writeWorker) name() string { + return fmt.Sprintf("writeWorker-%d", w.id) +} + func (w *writeWorker) run() (historyRecords, error) { var rs historyRecords for { @@ -375,7 +405,7 @@ func (w *writeWorker) run() (historyRecords, error) { if err != nil { writeErr := fmt.Errorf("[writer error]: %w", err) - w.t.Log(writeErr) + w.t.Error(writeErr) w.errCh <- writeErr return rs, writeErr } @@ -511,6 +541,19 @@ func (rs historyRecords) Swap(i, j int) { rs[i], rs[j] = rs[j], rs[i] } +func validateIncrementalTxid(rs historyRecords) error { + lastTxid := rs[0].Txid + + for i := 1; i < len(rs); i++ { + if (rs[i].OperationType == Write && rs[i].Txid <= lastTxid) || (rs[i].OperationType == Read && rs[i].Txid < lastTxid) { + return fmt.Errorf("detected non-incremental txid(%d, %d) in %s mode", lastTxid, rs[i].Txid, rs[i].OperationType) + } + lastTxid = rs[i].Txid + } + + return nil +} + func validateSerializable(rs historyRecords) error { sort.Sort(rs) From 0d799dc620f2d27a185ebb32af794d11a08a106f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 20 Apr 2023 14:40:54 +0800 Subject: [PATCH 062/439] test: support multiple writing transactions in the concurrent test Signed-off-by: Benjamin Wang --- concurrent_test.go | 64 +++++++++++++++++++++++++++------------------- 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index c6ffc8780..3615d4fdc 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -43,9 +43,10 @@ type concurrentConfig struct { /* TestConcurrentReadAndWrite verifies: 1. Repeatable read: a read transaction should always see the same data - view during its lifecycle; + view during its lifecycle. 2. Any data written by a writing transaction should be visible to any following reading transactions (with txid >= previous writing txid). + 3. The txid should never decrease. */ func TestConcurrentReadAndWrite(t *testing.T) { if testing.Short() { @@ -57,12 +58,14 @@ func TestConcurrentReadAndWrite(t *testing.T) { testCases := []struct { name string readerCount int + writerCount int conf concurrentConfig testDuration time.Duration }{ { name: "1 reader", readerCount: 1, + writerCount: 1, conf: concurrentConfig{ readTime: duration{ min: 50 * time.Millisecond, @@ -82,6 +85,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { { name: "10 readers", readerCount: 10, + writerCount: 2, conf: concurrentConfig{ readTime: duration{ min: 50 * time.Millisecond, @@ -101,6 +105,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { { name: "50 readers", readerCount: 50, + writerCount: 10, conf: concurrentConfig{ readTime: duration{ min: 50 * time.Millisecond, @@ -121,6 +126,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { { name: "100 readers", readerCount: 100, + writerCount: 20, conf: concurrentConfig{ readTime: duration{ min: 50 * time.Millisecond, @@ -147,6 +153,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { bucket, keys, tc.readerCount, + tc.writerCount, tc.conf, tc.testDuration) }) @@ -157,6 +164,7 @@ func concurrentReadAndWrite(t *testing.T, bucket []byte, keys []string, readerCount int, + writerCount int, conf concurrentConfig, testDuration time.Duration) { @@ -172,6 +180,7 @@ func concurrentReadAndWrite(t *testing.T, records := runWorkers(t, db, bucket, keys, readerCount, + writerCount, conf, testDuration) @@ -198,6 +207,7 @@ func runWorkers(t *testing.T, bucket []byte, keys []string, readerCount int, + writerCount int, conf concurrentConfig, testDuration time.Duration) historyRecords { stopCh := make(chan struct{}, 1) @@ -206,28 +216,34 @@ func runWorkers(t *testing.T, var mu sync.Mutex var rs historyRecords - // start write transaction - g := new(errgroup.Group) - writer := &writeWorker{ - id: 0, - db: db, - bucket: bucket, - keys: keys, - - writeBytes: conf.writeBytes, - writeTime: conf.writeTime, - - errCh: errCh, - stopCh: stopCh, - t: t, - } - g.Go(func() error { - wrs, err := runWorker(t, writer, errCh) + runFunc := func(w worker) error { + wrs, err := runWorker(t, w, errCh) mu.Lock() rs = append(rs, wrs...) mu.Unlock() return err - }) + } + + // start write transactions + g := new(errgroup.Group) + for i := 0; i < writerCount; i++ { + writer := &writeWorker{ + id: i, + db: db, + bucket: bucket, + keys: keys, + + writeBytes: conf.writeBytes, + writeTime: conf.writeTime, + + errCh: errCh, + stopCh: stopCh, + t: t, + } + g.Go(func() error { + return runFunc(writer) + }) + } // start readonly transactions for i := 0; i < readerCount; i++ { @@ -244,11 +260,7 @@ func runWorkers(t *testing.T, t: t, } g.Go(func() error { - rrs, err := runWorker(t, reader, errCh) - mu.Lock() - rs = append(rs, rrs...) - mu.Unlock() - return err + return runFunc(reader) }) } @@ -300,8 +312,8 @@ type readWorker struct { t *testing.T } -func (w *readWorker) name() string { - return fmt.Sprintf("readWorker-%d", w.id) +func (r *readWorker) name() string { + return fmt.Sprintf("readWorker-%d", r.id) } func (r *readWorker) run() (historyRecords, error) { From f63484f54de3aca863735cb5a31754517c09c4e6 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 23 Apr 2023 11:04:52 +0800 Subject: [PATCH 063/439] remove codecov and build badges Signed-off-by: Benjamin Wang --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 3ff830499..8c7c70658 100644 --- a/README.md +++ b/README.md @@ -2,8 +2,6 @@ bbolt ===== [![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) [![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) From 0ddf0fb8ff343adc9481af1ebd1c78cfd267e820 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 23 Apr 2023 10:17:27 +0800 Subject: [PATCH 064/439] test: refactor worker/operation and support Delete operation Signed-off-by: Benjamin Wang --- concurrent_test.go | 407 +++++++++++++++++++++------------------------ 1 file changed, 188 insertions(+), 219 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 3615d4fdc..edb0b0c8c 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -34,10 +34,16 @@ type bytesRange struct { max int } +type operationChance struct { + operation OperationType + chance int +} + type concurrentConfig struct { - readTime duration - writeTime duration - writeBytes bytesRange + workInterval duration + operationRatio []operationChance + readInterval duration // only used by readOpeartion + writeBytes bytesRange // only used by writeOperation } /* @@ -54,94 +60,60 @@ func TestConcurrentReadAndWrite(t *testing.T) { } bucket := []byte("data") keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} + conf := concurrentConfig{ + workInterval: duration{ + min: 5 * time.Millisecond, + max: 100 * time.Millisecond, + }, + operationRatio: []operationChance{ + {operation: Read, chance: 60}, + {operation: Write, chance: 20}, + {operation: Delete, chance: 20}, + }, + readInterval: duration{ + min: 50 * time.Millisecond, + max: 100 * time.Millisecond, + }, + writeBytes: bytesRange{ + min: 200, + max: 16000, + }, + } testCases := []struct { name string - readerCount int - writerCount int + workerCount int conf concurrentConfig testDuration time.Duration }{ { - name: "1 reader", - readerCount: 1, - writerCount: 1, - conf: concurrentConfig{ - readTime: duration{ - min: 50 * time.Millisecond, - max: 100 * time.Millisecond, - }, - writeTime: duration{ - min: 10 * time.Millisecond, - max: 20 * time.Millisecond, - }, - writeBytes: bytesRange{ - min: 200, - max: 8000, - }, - }, + name: "1 worker", + workerCount: 1, + conf: conf, testDuration: 30 * time.Second, }, { - name: "10 readers", - readerCount: 10, - writerCount: 2, - conf: concurrentConfig{ - readTime: duration{ - min: 50 * time.Millisecond, - max: 100 * time.Millisecond, - }, - writeTime: duration{ - min: 10 * time.Millisecond, - max: 20 * time.Millisecond, - }, - writeBytes: bytesRange{ - min: 200, - max: 8000, - }, - }, + name: "10 workers", + workerCount: 10, + conf: conf, testDuration: 30 * time.Second, }, { - name: "50 readers", - readerCount: 50, - writerCount: 10, - conf: concurrentConfig{ - readTime: duration{ - min: 50 * time.Millisecond, - max: 100 * time.Millisecond, - }, - writeTime: duration{ - min: 10 * time.Millisecond, - max: 20 * time.Millisecond, - }, - writeBytes: bytesRange{ - min: 500, - max: 8000, - }, - }, - + name: "50 workers", + workerCount: 50, + conf: conf, testDuration: 30 * time.Second, }, { - name: "100 readers", - readerCount: 100, - writerCount: 20, - conf: concurrentConfig{ - readTime: duration{ - min: 50 * time.Millisecond, - max: 100 * time.Millisecond, - }, - writeTime: duration{ - min: 10 * time.Millisecond, - max: 20 * time.Millisecond, - }, - writeBytes: bytesRange{ - min: 500, - max: 8000, - }, - }, - + name: "100 workers", + workerCount: 100, + conf: conf, + testDuration: 30 * time.Second, + }, + { + name: "200 workers", + workerCount: 200, + conf: conf, testDuration: 30 * time.Second, }, } @@ -152,8 +124,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { concurrentReadAndWrite(t, bucket, keys, - tc.readerCount, - tc.writerCount, + tc.workerCount, tc.conf, tc.testDuration) }) @@ -163,8 +134,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { func concurrentReadAndWrite(t *testing.T, bucket []byte, keys []string, - readerCount int, - writerCount int, + workerCount int, conf concurrentConfig, testDuration time.Duration) { @@ -179,8 +149,7 @@ func concurrentReadAndWrite(t *testing.T, t.Log("Starting workers.") records := runWorkers(t, db, bucket, keys, - readerCount, - writerCount, + workerCount, conf, testDuration) @@ -198,80 +167,55 @@ func concurrentReadAndWrite(t *testing.T, /* ********************************************************* -Data structures and functions/methods for running -concurrent workers, including reading and writing workers +Data structures and functions/methods for running concurrent +workers, which execute different operations, including `Read`, +`Write` and `Delete`. ********************************************************* */ func runWorkers(t *testing.T, db *btesting.DB, bucket []byte, keys []string, - readerCount int, - writerCount int, + workerCount int, conf concurrentConfig, testDuration time.Duration) historyRecords { stopCh := make(chan struct{}, 1) - errCh := make(chan error, readerCount+1) + errCh := make(chan error, workerCount) var mu sync.Mutex var rs historyRecords - runFunc := func(w worker) error { - wrs, err := runWorker(t, w, errCh) - mu.Lock() - rs = append(rs, wrs...) - mu.Unlock() - return err - } - - // start write transactions g := new(errgroup.Group) - for i := 0; i < writerCount; i++ { - writer := &writeWorker{ - id: i, - db: db, - bucket: bucket, - keys: keys, - - writeBytes: conf.writeBytes, - writeTime: conf.writeTime, - - errCh: errCh, - stopCh: stopCh, - t: t, - } - g.Go(func() error { - return runFunc(writer) - }) - } - - // start readonly transactions - for i := 0; i < readerCount; i++ { - reader := &readWorker{ + for i := 0; i < workerCount; i++ { + w := &worker{ id: i, db: db, bucket: bucket, keys: keys, - readTime: conf.readTime, + conf: conf, errCh: errCh, stopCh: stopCh, t: t, } g.Go(func() error { - return runFunc(reader) + wrs, err := runWorker(t, w, errCh) + mu.Lock() + rs = append(rs, wrs...) + mu.Unlock() + return err }) } - t.Logf("Keep reading and writing transactions running for about %s.", testDuration) + t.Logf("Keep all workers running for about %s.", testDuration) select { case <-time.After(testDuration): case <-errCh: } close(stopCh) - t.Log("Waiting for all transactions to finish.") + t.Log("Waiting for all workers to finish.") if err := g.Wait(); err != nil { t.Errorf("Received error: %v", err) } @@ -279,7 +223,7 @@ func runWorkers(t *testing.T, return rs } -func runWorker(t *testing.T, w worker, errCh chan error) (historyRecords, error) { +func runWorker(t *testing.T, w *worker, errCh chan error) (historyRecords, error) { rs, err := w.run() if len(rs) > 0 && err == nil { if terr := validateIncrementalTxid(rs); terr != nil { @@ -292,19 +236,14 @@ func runWorker(t *testing.T, w worker, errCh chan error) (historyRecords, error) return rs, err } -type worker interface { - name() string - run() (historyRecords, error) -} - -type readWorker struct { +type worker struct { id int db *btesting.DB bucket []byte keys []string - readTime duration + conf concurrentConfig errCh chan error stopCh chan struct{} @@ -312,118 +251,144 @@ type readWorker struct { t *testing.T } -func (r *readWorker) name() string { - return fmt.Sprintf("readWorker-%d", r.id) +func (w *worker) name() string { + return fmt.Sprintf("worker-%d", w.id) } -func (r *readWorker) run() (historyRecords, error) { +func (w *worker) run() (historyRecords, error) { var rs historyRecords for { select { - case <-r.stopCh: - r.t.Log("Reading transaction finished.") + case <-w.stopCh: + w.t.Logf("%q finished.", w.name()) return rs, nil default: } - err := r.db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(r.bucket) + op := w.pickOperation() + rec, err := runOperation(op, w.db, w.bucket, w.keys, w.conf) + if err != nil { + readErr := fmt.Errorf("[%s: %s]: %w", w.name(), op, err) + w.t.Error(readErr) + w.errCh <- readErr + return rs, readErr + } - selectedKey := r.keys[mrand.Intn(len(r.keys))] - initialVal := b.Get([]byte(selectedKey)) - time.Sleep(randomDurationInRange(r.readTime.min, r.readTime.max)) - val := b.Get([]byte(selectedKey)) + rs = append(rs, rec) + time.Sleep(randomDurationInRange(w.conf.workInterval.min, w.conf.workInterval.max)) + } +} - if !reflect.DeepEqual(initialVal, val) { - return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", - selectedKey, formatBytes(initialVal), formatBytes(val)) - } +func (w *worker) pickOperation() OperationType { + sum := 0 + for _, op := range w.conf.operationRatio { + sum += op.chance + } + roll := mrand.Int() % sum + for _, op := range w.conf.operationRatio { + if roll < op.chance { + return op.operation + } + roll -= op.chance + } + panic("unexpected") +} - clonedVal := make([]byte, len(val)) - copy(clonedVal, val) +func runOperation(op OperationType, db *btesting.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { + switch op { + case Read: + return executeRead(db, bucket, keys, conf.readInterval) + case Write: + return executeWrite(db, bucket, keys, conf.writeBytes) + case Delete: + return executeDelete(db, bucket, keys) + default: + panic(fmt.Sprintf("unexpected operation type: %s", op)) + } +} - rs = append(rs, historyRecord{ - OperationType: Read, - Key: selectedKey, - Value: clonedVal, - Txid: tx.ID(), - }) +func executeRead(db *btesting.DB, bucket []byte, keys []string, readInterval duration) (historyRecord, error) { + var rec historyRecord + err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) - return nil - }) + selectedKey := keys[mrand.Intn(len(keys))] + initialVal := b.Get([]byte(selectedKey)) + time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) + val := b.Get([]byte(selectedKey)) - if err != nil { - readErr := fmt.Errorf("[reader error]: %w", err) - r.t.Error(readErr) - r.errCh <- readErr - return rs, readErr + if !reflect.DeepEqual(initialVal, val) { + return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", + selectedKey, formatBytes(initialVal), formatBytes(val)) } - } -} - -type writeWorker struct { - id int - db *btesting.DB - bucket []byte - keys []string + clonedVal := make([]byte, len(val)) + copy(clonedVal, val) - writeBytes bytesRange - writeTime duration + rec = historyRecord{ + OperationType: Read, + Key: selectedKey, + Value: clonedVal, + Txid: tx.ID(), + } - errCh chan error - stopCh chan struct{} + return nil + }) - t *testing.T + return rec, err } -func (w *writeWorker) name() string { - return fmt.Sprintf("writeWorker-%d", w.id) -} +func executeWrite(db *btesting.DB, bucket []byte, keys []string, writeBytes bytesRange) (historyRecord, error) { + var rec historyRecord -func (w *writeWorker) run() (historyRecords, error) { - var rs historyRecords - for { - select { - case <-w.stopCh: - w.t.Log("Writing transaction finished.") - return rs, nil - default: - } + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) - err := w.db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(w.bucket) + selectedKey := keys[mrand.Intn(len(keys))] - selectedKey := w.keys[mrand.Intn(len(w.keys))] + valueBytes := randomIntInRange(writeBytes.min, writeBytes.max) + v := make([]byte, valueBytes) + if _, cErr := crand.Read(v); cErr != nil { + return cErr + } - valueBytes := randomIntInRange(w.writeBytes.min, w.writeBytes.max) - v := make([]byte, valueBytes) - if _, cErr := crand.Read(v); cErr != nil { - return cErr + putErr := b.Put([]byte(selectedKey), v) + if putErr == nil { + rec = historyRecord{ + OperationType: Write, + Key: selectedKey, + Value: v, + Txid: tx.ID(), } + } - putErr := b.Put([]byte(selectedKey), v) - if putErr == nil { - rs = append(rs, historyRecord{ - OperationType: Write, - Key: selectedKey, - Value: v, - Txid: tx.ID(), - }) - } + return putErr + }) - return putErr - }) + return rec, err +} - if err != nil { - writeErr := fmt.Errorf("[writer error]: %w", err) - w.t.Error(writeErr) - w.errCh <- writeErr - return rs, writeErr +func executeDelete(db *btesting.DB, bucket []byte, keys []string) (historyRecord, error) { + var rec historyRecord + + err := db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) + + selectedKey := keys[mrand.Intn(len(keys))] + + deleteErr := b.Delete([]byte(selectedKey)) + if deleteErr == nil { + rec = historyRecord{ + OperationType: Delete, + Key: selectedKey, + Txid: tx.ID(), + } } - time.Sleep(randomDurationInRange(w.writeTime.min, w.writeTime.max)) - } + return deleteErr + }) + + return rec, err } func randomDurationInRange(min, max time.Duration) time.Duration { @@ -512,8 +477,9 @@ Data structures and functions for analyzing history records type OperationType string const ( - Read OperationType = "read" - Write OperationType = "write" + Read OperationType = "read" + Write OperationType = "write" + Delete OperationType = "delete" ) type historyRecord struct { @@ -541,12 +507,13 @@ func (rs historyRecords) Less(i, j int) bool { return rs[i].Txid < rs[j].Txid } - // Sorted by workerType: put writer before reader if they have the same txid. - if rs[i].OperationType == Write { - return true + // Sorted by operation type: put `Read` after other operation types + // if they operate on the same key and have the same txid. + if rs[i].OperationType == Read { + return false } - return false + return true } func (rs historyRecords) Swap(i, j int) { @@ -557,7 +524,7 @@ func validateIncrementalTxid(rs historyRecords) error { lastTxid := rs[0].Txid for i := 1; i < len(rs); i++ { - if (rs[i].OperationType == Write && rs[i].Txid <= lastTxid) || (rs[i].OperationType == Read && rs[i].Txid < lastTxid) { + if (rs[i].OperationType == Read && rs[i].Txid < lastTxid) || (rs[i].OperationType != Read && rs[i].Txid <= lastTxid) { return fmt.Errorf("detected non-incremental txid(%d, %d) in %s mode", lastTxid, rs[i].Txid, rs[i].OperationType) } lastTxid = rs[i].Txid @@ -576,9 +543,11 @@ func validateSerializable(rs historyRecords) error { if rec.OperationType == Write { v.Value = rec.Value v.Txid = rec.Txid + } else if rec.OperationType == Delete { + delete(lastWriteKeyValueMap, rec.Key) } else { if !reflect.DeepEqual(v.Value, rec.Value) { - return fmt.Errorf("reader[txid: %d, key: %s] read %x, \nbut writer[txid: %d, key: %s] wrote %x", + return fmt.Errorf("readOperation[txid: %d, key: %s] read %x, \nbut writer[txid: %d, key: %s] wrote %x", rec.Txid, rec.Key, rec.Value, v.Txid, v.Key, v.Value) } @@ -591,9 +560,9 @@ func validateSerializable(rs historyRecords) error { Value: rec.Value, Txid: rec.Txid, } - } else { + } else if rec.OperationType == Read { if len(rec.Value) != 0 { - return fmt.Errorf("expected the first reader[txid: %d, key: %s] read nil, \nbut got %x", + return fmt.Errorf("expected the first readOperation[txid: %d, key: %s] read nil, \nbut got %x", rec.Txid, rec.Key, rec.Value) } } From ec3ff470f61b9c3e1269d19d05eb0402914a8389 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 24 Apr 2023 18:55:18 +0800 Subject: [PATCH 065/439] test: rename runOperation to executeOperation Signed-off-by: Benjamin Wang --- concurrent_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index edb0b0c8c..581e07935 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -266,7 +266,7 @@ func (w *worker) run() (historyRecords, error) { } op := w.pickOperation() - rec, err := runOperation(op, w.db, w.bucket, w.keys, w.conf) + rec, err := executeOperation(op, w.db, w.bucket, w.keys, w.conf) if err != nil { readErr := fmt.Errorf("[%s: %s]: %w", w.name(), op, err) w.t.Error(readErr) @@ -294,7 +294,7 @@ func (w *worker) pickOperation() OperationType { panic("unexpected") } -func runOperation(op OperationType, db *btesting.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { +func executeOperation(op OperationType, db *btesting.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { switch op { case Read: return executeRead(db, bucket, keys, conf.readInterval) From 690c9c9a51f2b85d692a6d8f533e6a325feb8771 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 26 Apr 2023 19:55:39 +0800 Subject: [PATCH 066/439] test: add test cases to cover some old commmands Signed-off-by: Benjamin Wang --- cmd/bbolt/main_test.go | 91 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index c6ac96f27..ffed33cfd 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -19,6 +19,7 @@ import ( bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/guts_cli" ) // Ensure the "info" command can print information about a database. @@ -76,6 +77,96 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { } } +func TestCheckCommand_Run(t *testing.T) { + db := btesting.MustCreateDB(t) + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + m := NewMain() + err := m.Run("check", db.Path()) + require.NoError(t, err) + if m.Stdout.String() != "OK\n" { + t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) + } +} + +func TestDumpCommand_Run(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + exp := `0000010 edda 0ced 0200 0000 0010 0000 0000 0000` + + m := NewMain() + err := m.Run("dump", db.Path(), "0") + require.NoError(t, err) + if !strings.Contains(m.Stdout.String(), exp) { + t.Fatalf("unexpected stdout:\n%s\n", m.Stdout.String()) + } +} + +func TestPageCommand_Run(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + exp := "Page ID: 0\n" + + "Page Type: meta\n" + + "Total Size: 4096 bytes\n" + + "Overflow pages: 0\n" + + "Version: 2\n" + + "Page Size: 4096 bytes\n" + + "Flags: 00000000\n" + + "Root: \n" + + "Freelist: \n" + + "HWM: \n" + + "Txn ID: 0\n" + + "Checksum: 07516e114689fdee\n\n" + + m := NewMain() + err := m.Run("page", db.Path(), "0") + require.NoError(t, err) + if m.Stdout.String() != exp { + t.Fatalf("unexpected stdout:\n%s\n%s", m.Stdout.String(), exp) + } +} + +func TestPageItemCommand_Run(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + srcPath := db.Path() + + // Insert some sample data + t.Log("Insert some sample data") + err := db.Fill([]byte("data"), 1, 100, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("key_%d", k)) }, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("value_%d", k)) }, + ) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + meta := readMetaPage(t, srcPath) + leafPageId := 0 + for i := 2; i < int(meta.Pgid()); i++ { + p, _, err := guts_cli.ReadPage(srcPath, uint64(i)) + require.NoError(t, err) + if p.IsLeafPage() && p.Count() > 1 { + leafPageId = int(p.Id()) + } + } + require.NotEqual(t, 0, leafPageId) + + m := NewMain() + err = m.Run("page-item", db.Path(), fmt.Sprintf("%d", leafPageId), "0") + require.NoError(t, err) + if !strings.Contains(m.Stdout.String(), "key_0") || !strings.Contains(m.Stdout.String(), "value_0") { + t.Fatalf("Unexpected output:\n%s\n", m.Stdout.String()) + } +} + // Ensure the "stats" command can execute correctly. func TestStatsCommand_Run(t *testing.T) { // Ignore From 8164464f8e84bf4ba7562b7cf59ddefb85eecc4b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 26 Apr 2023 11:26:28 +0800 Subject: [PATCH 067/439] migrate 'surgery revert-meta-page' to cobra style command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 57 +++++++++++++++++++++++++ cmd/bbolt/command_surgery_cobra_test.go | 46 ++++++++++++++++++++ cmd/bbolt/surgery_commands.go | 52 ---------------------- cmd/bbolt/surgery_commands_test.go | 41 ------------------ 4 files changed, 103 insertions(+), 93 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index fc709c98c..a62bec40a 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -30,12 +30,54 @@ func newSurgeryCobraCommand() *cobra.Command { Short: "surgery related commands", } + surgeryCmd.AddCommand(newSurgeryRevertMetaPageCommand()) surgeryCmd.AddCommand(newSurgeryClearPageElementsCommand()) surgeryCmd.AddCommand(newSurgeryFreelistCommand()) return surgeryCmd } +func newSurgeryRevertMetaPageCommand() *cobra.Command { + revertMetaPageCmd := &cobra.Command{ + Use: "revert-meta-page [options]", + Short: "Revert the meta page to revert the changes performed by the latest transaction", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: surgeryRevertMetaPageFunc, + } + + revertMetaPageCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + + return revertMetaPageCmd +} + +func surgeryRevertMetaPageFunc(cmd *cobra.Command, args []string) error { + srcDBPath := args[0] + + if err := checkDBPaths(srcDBPath, surgeryTargetDBFilePath); err != nil { + return err + } + + if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[revert-meta-page] copy file failed: %w", err) + } + + if err := surgeon.RevertMetaPage(surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("revert-meta-page command failed: %w", err) + } + + fmt.Fprintln(os.Stdout, "The meta page is reverted.") + + return nil +} + func newSurgeryClearPageElementsCommand() *cobra.Command { clearElementCmd := &cobra.Command{ Use: "clear-page-elements [options]", @@ -210,3 +252,18 @@ func readMetaPage(path string) (*common.Meta, error) { } return common.LoadPageMeta(buf), nil } + +func checkDBPaths(srcPath, dstPath string) error { + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source database file %q doesn't exist", srcPath) + } else if err != nil { + return fmt.Errorf("failed to open source database file %q: %v", srcPath, err) + } + + if dstPath == "" { + return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") + } + + return nil +} diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index 0e35c05e9..fc0f6376e 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -2,6 +2,7 @@ package main_test import ( "fmt" + "os" "path/filepath" "testing" @@ -15,6 +16,51 @@ import ( "go.etcd.io/bbolt/internal/guts_cli" ) +func TestSurgery_RevertMetaPage(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + srcFile, err := os.Open(srcPath) + require.NoError(t, err) + defer srcFile.Close() + + // Read both meta0 and meta1 from srcFile + srcBuf0 := readPage(t, srcPath, 0, pageSize) + srcBuf1 := readPage(t, srcPath, 1, pageSize) + meta0Page := common.LoadPageMeta(srcBuf0) + meta1Page := common.LoadPageMeta(srcBuf1) + + // Get the non-active meta page + nonActiveSrcBuf := srcBuf0 + nonActiveMetaPageId := 0 + if meta0Page.Txid() > meta1Page.Txid() { + nonActiveSrcBuf = srcBuf1 + nonActiveMetaPageId = 1 + } + t.Logf("non active meta page id: %d", nonActiveMetaPageId) + + // revert the meta page + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "revert-meta-page", srcPath, + "--output", output, + }) + err = rootCmd.Execute() + require.NoError(t, err) + + // read both meta0 and meta1 from dst file + dstBuf0 := readPage(t, output, 0, pageSize) + dstBuf1 := readPage(t, output, 1, pageSize) + + // check result. Note we should skip the page ID + assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf0)) + assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf1)) +} + func TestSurgery_ClearPageElements_Without_Overflow(t *testing.T) { testCases := []struct { name string diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index d0a1c8da7..385903134 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -40,8 +40,6 @@ func (cmd *surgeryCommand) Run(args ...string) error { case "help": fmt.Fprintln(cmd.Stderr, cmd.Usage()) return ErrUsage - case "revert-meta-page": - return newRevertMetaPageCommand(cmd).Run(args[1:]...) case "copy-page": return newCopyPageCommand(cmd).Run(args[1:]...) case "clear-page": @@ -90,56 +88,6 @@ Use "bbolt surgery [command] -h" for more information about a command. `, "\n") } -// revertMetaPageCommand represents the "surgery revert-meta-page" command execution. -type revertMetaPageCommand struct { - *surgeryCommand -} - -// newRevertMetaPageCommand returns a revertMetaPageCommand. -func newRevertMetaPageCommand(m *surgeryCommand) *revertMetaPageCommand { - c := &revertMetaPageCommand{} - c.surgeryCommand = m - return c -} - -// Run executes the command. -func (cmd *revertMetaPageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - if err := cmd.parsePathsAndCopyFile(fs); err != nil { - return fmt.Errorf("revertMetaPageCommand failed to parse paths and copy file: %w", err) - } - - // revert the meta page - if err := surgeon.RevertMetaPage(cmd.dstPath); err != nil { - return fmt.Errorf("revertMetaPageCommand failed: %w", err) - } - - fmt.Fprintln(cmd.Stdout, "The meta page is reverted.") - return nil -} - -// Usage returns the help message. -func (cmd *revertMetaPageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt surgery revert-meta-page SRC DST - -RevertMetaPage copies the database file at SRC to a newly created database -file at DST. Afterwards, it reverts the meta page on the newly created -database at DST. - -The original database is left untouched. -`, "\n") -} - // copyPageCommand represents the "surgery copy-page" command execution. type copyPageCommand struct { *surgeryCommand diff --git a/cmd/bbolt/surgery_commands_test.go b/cmd/bbolt/surgery_commands_test.go index 8d96eb326..567c0c493 100644 --- a/cmd/bbolt/surgery_commands_test.go +++ b/cmd/bbolt/surgery_commands_test.go @@ -14,47 +14,6 @@ import ( "go.etcd.io/bbolt/internal/common" ) -func TestSurgery_RevertMetaPage(t *testing.T) { - pageSize := 4096 - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) - srcPath := db.Path() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - srcFile, err := os.Open(srcPath) - require.NoError(t, err) - defer srcFile.Close() - - // Read both meta0 and meta1 from srcFile - srcBuf0 := readPage(t, srcPath, 0, pageSize) - srcBuf1 := readPage(t, srcPath, 1, pageSize) - meta0Page := common.LoadPageMeta(srcBuf0) - meta1Page := common.LoadPageMeta(srcBuf1) - - // Get the non-active meta page - nonActiveSrcBuf := srcBuf0 - nonActiveMetaPageId := 0 - if meta0Page.Txid() > meta1Page.Txid() { - nonActiveSrcBuf = srcBuf1 - nonActiveMetaPageId = 1 - } - t.Logf("non active meta page id: %d", nonActiveMetaPageId) - - // revert the meta page - dstPath := filepath.Join(t.TempDir(), "dstdb") - m := NewMain() - err = m.Run("surgery", "revert-meta-page", srcPath, dstPath) - require.NoError(t, err) - - // read both meta0 and meta1 from dst file - dstBuf0 := readPage(t, dstPath, 0, pageSize) - dstBuf1 := readPage(t, dstPath, 1, pageSize) - - // check result. Note we should skip the page ID - assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf0)) - assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf1)) -} - func TestSurgery_CopyPage(t *testing.T) { pageSize := 4096 db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) From e0f875d11b14c2d01ffef28acbfd31cacf1b63cf Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 27 Apr 2023 14:27:35 +0800 Subject: [PATCH 068/439] test: save data in deferred function Signed-off-by: Benjamin Wang --- concurrent_test.go | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 581e07935..779649f8d 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -146,8 +146,17 @@ func concurrentReadAndWrite(t *testing.T, }) require.NoError(t, err) + var records historyRecords + // t.Failed() returns false during panicking. We need to forcibly + // save data on panicking. + // Refer to: https://github.com/golang/go/issues/49929 + panicked := true + defer func() { + saveDataIfFailed(t, db, records, panicked) + }() + t.Log("Starting workers.") - records := runWorkers(t, + records = runWorkers(t, db, bucket, keys, workerCount, conf, @@ -158,8 +167,7 @@ func concurrentReadAndWrite(t *testing.T, t.Errorf("The history records are not serializable:\n %v", err) } - saveDataIfFailed(t, db, records) - + panicked = false // TODO (ahrtr): // 1. intentionally inject a random failpoint. // 2. check db consistency at the end. @@ -415,8 +423,8 @@ Functions for persisting test data, including db file and operation history ********************************************************* */ -func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords) { - if t.Failed() { +func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords, force bool) { + if t.Failed() || force { if err := db.Close(); err != nil { t.Errorf("Failed to close db: %v", err) } From 39964028c5dd7058b55d47f8fc24b0db129f2084 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 1 May 2023 12:57:34 +0800 Subject: [PATCH 069/439] test: check database consistency at the end of concurrent test case Signed-off-by: Benjamin Wang --- concurrent_test.go | 28 +++++++++++++++++++++++----- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 779649f8d..97d0be514 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -63,7 +63,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { conf := concurrentConfig{ workInterval: duration{ min: 5 * time.Millisecond, - max: 100 * time.Millisecond, + max: 10 * time.Millisecond, }, operationRatio: []operationChance{ {operation: Read, chance: 60}, @@ -163,14 +163,32 @@ func concurrentReadAndWrite(t *testing.T, testDuration) t.Log("Analyzing the history records.") - if err := validateSerializable(records); err != nil { - t.Errorf("The history records are not serializable:\n %v", err) + if err := validateSequential(records); err != nil { + t.Errorf("The history records are not sequential:\n %v", err) + } + + t.Log("Checking database consistency.") + if err := checkConsistency(t, db); err != nil { + t.Errorf("The data isn't consistency: %v", err) } panicked = false // TODO (ahrtr): // 1. intentionally inject a random failpoint. - // 2. check db consistency at the end. +} + +func checkConsistency(t *testing.T, db *btesting.DB) error { + return db.View(func(tx *bolt.Tx) error { + cnt := 0 + for err := range tx.Check() { + t.Errorf("Consistency error: %v", err) + cnt++ + } + if cnt > 0 { + return fmt.Errorf("%d consistency errors found", cnt) + } + return nil + }) } /* @@ -541,7 +559,7 @@ func validateIncrementalTxid(rs historyRecords) error { return nil } -func validateSerializable(rs historyRecords) error { +func validateSequential(rs historyRecords) error { sort.Sort(rs) lastWriteKeyValueMap := make(map[string]*historyRecord) From aaada7b154310830bc66f6315e43bdab56b4fa9b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 2 May 2023 15:14:55 +0800 Subject: [PATCH 070/439] test: update concurrent test case to remove the dependency on internal package Signed-off-by: Benjamin Wang --- concurrent_test.go | 95 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 82 insertions(+), 13 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 97d0be514..446b1ede7 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -5,6 +5,7 @@ import ( "encoding/hex" "encoding/json" "fmt" + "io" mrand "math/rand" "os" "path/filepath" @@ -20,8 +21,6 @@ import ( "golang.org/x/sync/errgroup" bolt "go.etcd.io/bbolt" - "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" ) type duration struct { @@ -139,7 +138,8 @@ func concurrentReadAndWrite(t *testing.T, testDuration time.Duration) { t.Log("Preparing db.") - db := btesting.MustCreateDB(t) + db := mustCreateDB(t, nil) + defer db.Close() err := db.Update(func(tx *bolt.Tx) error { _, err := tx.CreateBucket(bucket) return err @@ -177,7 +177,32 @@ func concurrentReadAndWrite(t *testing.T, // 1. intentionally inject a random failpoint. } -func checkConsistency(t *testing.T, db *btesting.DB) error { +// mustCreateDB is created in place of `btesting.MustCreateDB`, and it's +// only supposed to be used by the concurrent test case. The purpose is +// to ensure the test case can be executed on old branches or versions, +// e.g. `release-1.3` or `1.3.[5-7]`. +func mustCreateDB(t *testing.T, o *bolt.Options) *bolt.DB { + f := filepath.Join(t.TempDir(), "db") + + t.Logf("Opening bbolt DB at: %s", f) + if o == nil { + o = bolt.DefaultOptions + } + + freelistType := bolt.FreelistArrayType + if env := os.Getenv("TEST_FREELIST_TYPE"); env == string(bolt.FreelistMapType) { + freelistType = bolt.FreelistMapType + } + + o.FreelistType = freelistType + + db, err := bolt.Open(f, 0666, o) + require.NoError(t, err) + + return db +} + +func checkConsistency(t *testing.T, db *bolt.DB) error { return db.View(func(tx *bolt.Tx) error { cnt := 0 for err := range tx.Check() { @@ -199,7 +224,7 @@ workers, which execute different operations, including `Read`, ********************************************************* */ func runWorkers(t *testing.T, - db *btesting.DB, + db *bolt.DB, bucket []byte, keys []string, workerCount int, @@ -264,7 +289,7 @@ func runWorker(t *testing.T, w *worker, errCh chan error) (historyRecords, error type worker struct { id int - db *btesting.DB + db *bolt.DB bucket []byte keys []string @@ -320,7 +345,7 @@ func (w *worker) pickOperation() OperationType { panic("unexpected") } -func executeOperation(op OperationType, db *btesting.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { +func executeOperation(op OperationType, db *bolt.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { switch op { case Read: return executeRead(db, bucket, keys, conf.readInterval) @@ -333,7 +358,7 @@ func executeOperation(op OperationType, db *btesting.DB, bucket []byte, keys []s } } -func executeRead(db *btesting.DB, bucket []byte, keys []string, readInterval duration) (historyRecord, error) { +func executeRead(db *bolt.DB, bucket []byte, keys []string, readInterval duration) (historyRecord, error) { var rec historyRecord err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucket) @@ -364,7 +389,7 @@ func executeRead(db *btesting.DB, bucket []byte, keys []string, readInterval dur return rec, err } -func executeWrite(db *btesting.DB, bucket []byte, keys []string, writeBytes bytesRange) (historyRecord, error) { +func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRange) (historyRecord, error) { var rec historyRecord err := db.Update(func(tx *bolt.Tx) error { @@ -394,7 +419,7 @@ func executeWrite(db *btesting.DB, bucket []byte, keys []string, writeBytes byte return rec, err } -func executeDelete(db *btesting.DB, bucket []byte, keys []string) (historyRecord, error) { +func executeDelete(db *bolt.DB, bucket []byte, keys []string) (historyRecord, error) { var rec historyRecord err := db.Update(func(tx *bolt.Tx) error { @@ -441,7 +466,7 @@ Functions for persisting test data, including db file and operation history ********************************************************* */ -func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords, force bool) { +func saveDataIfFailed(t *testing.T, db *bolt.DB, rs historyRecords, force bool) { if t.Failed() || force { if err := db.Close(); err != nil { t.Errorf("Failed to close db: %v", err) @@ -452,14 +477,58 @@ func saveDataIfFailed(t *testing.T, db *btesting.DB, rs historyRecords, force bo } } -func backupDB(t *testing.T, db *btesting.DB, path string) { +func backupDB(t *testing.T, db *bolt.DB, path string) { targetFile := filepath.Join(path, "db.bak") t.Logf("Saving the DB file to %s", targetFile) - err := common.CopyFile(db.Path(), targetFile) + err := copyFile(db.Path(), targetFile) require.NoError(t, err) t.Logf("DB file saved to %s", targetFile) } +func copyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} + func persistHistoryRecords(t *testing.T, rs historyRecords, path string) { recordFilePath := filepath.Join(path, "history_records.json") t.Logf("Saving history records to %s", recordFilePath) From 578d3a857e6cf0bcb3cb19e568002a2865093723 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 3 May 2023 08:33:08 +0800 Subject: [PATCH 071/439] test: support no-op write transaction A no-op write transaction has two consequences: 1. The txid increases by 1; 2. Two meta pages point to the same root page. Please also read https://github.com/etcd-io/etcd/issues/15498#issuecomment-1528971788. Signed-off-by: Benjamin Wang --- concurrent_test.go | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 446b1ede7..237f5da69 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -23,6 +23,8 @@ import ( bolt "go.etcd.io/bbolt" ) +const noopTxKey string = "%magic-no-op-key%" + type duration struct { min time.Duration max time.Duration @@ -42,6 +44,7 @@ type concurrentConfig struct { workInterval duration operationRatio []operationChance readInterval duration // only used by readOpeartion + noopWriteRatio int // only used by writeOperation writeBytes bytesRange // only used by writeOperation } @@ -73,6 +76,7 @@ func TestConcurrentReadAndWrite(t *testing.T) { min: 50 * time.Millisecond, max: 100 * time.Millisecond, }, + noopWriteRatio: 20, writeBytes: bytesRange{ min: 200, max: 16000, @@ -350,7 +354,7 @@ func executeOperation(op OperationType, db *bolt.DB, bucket []byte, keys []strin case Read: return executeRead(db, bucket, keys, conf.readInterval) case Write: - return executeWrite(db, bucket, keys, conf.writeBytes) + return executeWrite(db, bucket, keys, conf.writeBytes, conf.noopWriteRatio) case Delete: return executeDelete(db, bucket, keys) default: @@ -389,10 +393,23 @@ func executeRead(db *bolt.DB, bucket []byte, keys []string, readInterval duratio return rec, err } -func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRange) (historyRecord, error) { +func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRange, noopWriteRatio int) (historyRecord, error) { var rec historyRecord err := db.Update(func(tx *bolt.Tx) error { + if mrand.Intn(100) < noopWriteRatio { + // A no-op write transaction has two consequences: + // 1. The txid increases by 1; + // 2. Two meta pages point to the same root page. + rec = historyRecord{ + OperationType: Write, + Key: noopTxKey, + Value: nil, + Txid: tx.ID(), + } + return nil + } + b := tx.Bucket(bucket) selectedKey := keys[mrand.Intn(len(keys))] @@ -636,8 +653,10 @@ func validateSequential(rs historyRecords) error { for _, rec := range rs { if v, ok := lastWriteKeyValueMap[rec.Key]; ok { if rec.OperationType == Write { - v.Value = rec.Value v.Txid = rec.Txid + if rec.Key != noopTxKey { + v.Value = rec.Value + } } else if rec.OperationType == Delete { delete(lastWriteKeyValueMap, rec.Key) } else { @@ -648,7 +667,7 @@ func validateSequential(rs historyRecords) error { } } } else { - if rec.OperationType == Write { + if rec.OperationType == Write && rec.Key != noopTxKey { lastWriteKeyValueMap[rec.Key] = &historyRecord{ OperationType: Write, Key: rec.Key, From 465077b9e2f3d0bfe593beda5a4b04ea1cd0915a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 5 May 2023 15:47:04 +0800 Subject: [PATCH 072/439] add failpoint 'resizeFileError' to simulate file.Truncate error Signed-off-by: Benjamin Wang --- db.go | 2 ++ tests/failpoint/db_failpoint_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/db.go b/db.go index 514e7e903..1d7cd137f 100644 --- a/db.go +++ b/db.go @@ -1135,6 +1135,8 @@ func (db *DB) grow(sz int) error { // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if runtime.GOOS != "windows" { + // gofail: var resizeFileError string + // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index ef7d7ca63..b38b16ac1 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -94,3 +94,31 @@ func TestFailpoint_mLockFail_When_remap(t *testing.T) { require.NoError(t, err) } + +func TestFailpoint_ResizeFileFail(t *testing.T) { + db := btesting.MustCreateDB(t) + + err := gofail.Enable("resizeFileError", `return("resizeFile somehow failed")`) + require.NoError(t, err) + + err = db.Fill([]byte("data"), 1, 10000, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 100) }, + ) + + require.Error(t, err) + require.ErrorContains(t, err, "resizeFile somehow failed") + + // It should work after disabling the failpoint. + err = gofail.Disable("resizeFileError") + require.NoError(t, err) + db.MustClose() + db.MustReopen() + + err = db.Fill([]byte("data"), 1, 10000, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 100) }, + ) + + require.NoError(t, err) +} From e618196323d77e4eab6f7e54d2c5aa233a8d5226 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 4 May 2023 15:53:45 +0800 Subject: [PATCH 073/439] cmd: migrate 'surgery copy-page' command to cobra style comamnd Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 56 ++++++++++++++++++++-- cmd/bbolt/command_surgery_cobra_test.go | 59 +++++++++++++++++++++++ cmd/bbolt/surgery_commands.go | 64 ------------------------- cmd/bbolt/surgery_commands_test.go | 54 --------------------- 4 files changed, 111 insertions(+), 122 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index a62bec40a..7c9757c5a 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -18,10 +18,12 @@ var ( ) var ( - surgeryTargetDBFilePath string - surgeryPageId uint64 - surgeryStartElementIdx int - surgeryEndElementIdx int + surgeryTargetDBFilePath string + surgeryPageId uint64 + surgeryStartElementIdx int + surgeryEndElementIdx int + surgerySourcePageId uint64 + surgeryDestinationPageId uint64 ) func newSurgeryCobraCommand() *cobra.Command { @@ -31,6 +33,7 @@ func newSurgeryCobraCommand() *cobra.Command { } surgeryCmd.AddCommand(newSurgeryRevertMetaPageCommand()) + surgeryCmd.AddCommand(newSurgeryCopyPageCommand()) surgeryCmd.AddCommand(newSurgeryClearPageElementsCommand()) surgeryCmd.AddCommand(newSurgeryFreelistCommand()) @@ -78,6 +81,51 @@ func surgeryRevertMetaPageFunc(cmd *cobra.Command, args []string) error { return nil } +func newSurgeryCopyPageCommand() *cobra.Command { + copyPageCmd := &cobra.Command{ + Use: "copy-page [options]", + Short: "Copy page from the source page Id to the destination page Id", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: surgeryCopyPageFunc, + } + + copyPageCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + copyPageCmd.Flags().Uint64VarP(&surgerySourcePageId, "from-page", "", 0, "source page Id") + copyPageCmd.Flags().Uint64VarP(&surgeryDestinationPageId, "to-page", "", 0, "destination page Id") + + return copyPageCmd +} + +func surgeryCopyPageFunc(cmd *cobra.Command, args []string) error { + srcDBPath := args[0] + + if surgerySourcePageId == surgeryDestinationPageId { + return fmt.Errorf("'--from-page' and '--to-page' have the same value: %d", surgerySourcePageId) + } + + if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[copy-page] copy file failed: %w", err) + } + + if err := surgeon.CopyPage(surgeryTargetDBFilePath, common.Pgid(surgerySourcePageId), common.Pgid(surgeryDestinationPageId)); err != nil { + return fmt.Errorf("copy-page command failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "WARNING: the free list might have changed.\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + + fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", surgerySourcePageId, surgeryDestinationPageId) + return nil +} + func newSurgeryClearPageElementsCommand() *cobra.Command { clearElementCmd := &cobra.Command{ Use: "clear-page-elements [options]", diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_cobra_test.go index fc0f6376e..5c506a91a 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_cobra_test.go @@ -61,6 +61,44 @@ func TestSurgery_RevertMetaPage(t *testing.T) { assert.Equal(t, pageDataWithoutPageId(nonActiveSrcBuf), pageDataWithoutPageId(dstBuf1)) } +func TestSurgery_CopyPage(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + // Insert some sample data + t.Log("Insert some sample data") + err := db.Fill([]byte("data"), 1, 20, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 10) }, + ) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // copy page 3 to page 2 + t.Log("copy page 3 to page 2") + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "dstdb") + rootCmd.SetArgs([]string{ + "surgery", "copy-page", srcPath, + "--output", output, + "--from-page", "3", + "--to-page", "2", + }) + err = rootCmd.Execute() + require.NoError(t, err) + + // The page 2 should have exactly the same data as page 3. + t.Log("Verify result") + srcPageId3Data := readPage(t, srcPath, 3, pageSize) + dstPageId3Data := readPage(t, output, 3, pageSize) + dstPageId2Data := readPage(t, output, 2, pageSize) + + assert.Equal(t, srcPageId3Data, dstPageId3Data) + assert.Equal(t, pageDataWithoutPageId(srcPageId3Data), pageDataWithoutPageId(dstPageId2Data)) +} + func TestSurgery_ClearPageElements_Without_Overflow(t *testing.T) { testCases := []struct { name string @@ -578,3 +616,24 @@ func readMetaPage(t *testing.T, path string) *common.Meta { require.NoError(t, err) return common.LoadPageMeta(buf) } + +func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { + dbFile, err := os.Open(path) + require.NoError(t, err) + defer dbFile.Close() + + fi, err := dbFile.Stat() + require.NoError(t, err) + require.GreaterOrEqual(t, fi.Size(), int64((pageId+1)*pageSize)) + + buf := make([]byte, pageSize) + byteRead, err := dbFile.ReadAt(buf, int64(pageId*pageSize)) + require.NoError(t, err) + require.Equal(t, pageSize, byteRead) + + return buf +} + +func pageDataWithoutPageId(buf []byte) []byte { + return buf[8:] +} diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go index 385903134..64b970ae3 100644 --- a/cmd/bbolt/surgery_commands.go +++ b/cmd/bbolt/surgery_commands.go @@ -40,8 +40,6 @@ func (cmd *surgeryCommand) Run(args ...string) error { case "help": fmt.Fprintln(cmd.Stderr, cmd.Usage()) return ErrUsage - case "copy-page": - return newCopyPageCommand(cmd).Run(args[1:]...) case "clear-page": return newClearPageCommand(cmd).Run(args[1:]...) default: @@ -81,73 +79,11 @@ Usage: The commands are: help print this screen clear-page clear all elements at the given pageId - copy-page copy page from source pageId to target pageId - revert-meta-page revert the meta page change made by the last transaction Use "bbolt surgery [command] -h" for more information about a command. `, "\n") } -// copyPageCommand represents the "surgery copy-page" command execution. -type copyPageCommand struct { - *surgeryCommand -} - -// newCopyPageCommand returns a copyPageCommand. -func newCopyPageCommand(m *surgeryCommand) *copyPageCommand { - c := ©PageCommand{} - c.surgeryCommand = m - return c -} - -// Run executes the command. -func (cmd *copyPageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - if err := cmd.parsePathsAndCopyFile(fs); err != nil { - return fmt.Errorf("copyPageCommand failed to parse paths and copy file: %w", err) - } - - // Read page id. - srcPageId, err := strconv.ParseUint(fs.Arg(2), 10, 64) - if err != nil { - return err - } - dstPageId, err := strconv.ParseUint(fs.Arg(3), 10, 64) - if err != nil { - return err - } - - // copy the page - if err := surgeon.CopyPage(cmd.dstPath, common.Pgid(srcPageId), common.Pgid(dstPageId)); err != nil { - return fmt.Errorf("copyPageCommand failed: %w", err) - } - - fmt.Fprintf(cmd.Stdout, "The page %d was copied to page %d\n", srcPageId, dstPageId) - return nil -} - -// Usage returns the help message. -func (cmd *copyPageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt surgery copy-page SRC DST srcPageId dstPageid - -CopyPage copies the database file at SRC to a newly created database -file at DST. Afterwards, it copies the page at srcPageId to the page -at dstPageId in DST. - -The original database is left untouched. -`, "\n") -} - // clearPageCommand represents the "surgery clear-page" command execution. type clearPageCommand struct { *surgeryCommand diff --git a/cmd/bbolt/surgery_commands_test.go b/cmd/bbolt/surgery_commands_test.go index 567c0c493..af3b1393e 100644 --- a/cmd/bbolt/surgery_commands_test.go +++ b/cmd/bbolt/surgery_commands_test.go @@ -2,7 +2,6 @@ package main_test import ( "fmt" - "os" "path/filepath" "testing" @@ -14,38 +13,6 @@ import ( "go.etcd.io/bbolt/internal/common" ) -func TestSurgery_CopyPage(t *testing.T) { - pageSize := 4096 - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) - srcPath := db.Path() - - // Insert some sample data - t.Log("Insert some sample data") - err := db.Fill([]byte("data"), 1, 20, - func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, - func(tx int, k int) []byte { return make([]byte, 10) }, - ) - require.NoError(t, err) - - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - - // copy page 3 to page 2 - t.Log("copy page 3 to page 2") - dstPath := filepath.Join(t.TempDir(), "dstdb") - m := NewMain() - err = m.Run("surgery", "copy-page", srcPath, dstPath, "3", "2") - require.NoError(t, err) - - // The page 2 should have exactly the same data as page 3. - t.Log("Verify result") - srcPageId3Data := readPage(t, srcPath, 3, pageSize) - dstPageId3Data := readPage(t, dstPath, 3, pageSize) - dstPageId2Data := readPage(t, dstPath, 2, pageSize) - - assert.Equal(t, srcPageId3Data, dstPageId3Data) - assert.Equal(t, pageDataWithoutPageId(srcPageId3Data), pageDataWithoutPageId(dstPageId2Data)) -} - // TODO(ahrtr): add test case below for `surgery clear-page` command: // 1. The page is a branch page. All its children should become free pages. func TestSurgery_ClearPage(t *testing.T) { @@ -78,24 +45,3 @@ func TestSurgery_ClearPage(t *testing.T) { assert.Equal(t, uint16(0), p.Count()) assert.Equal(t, uint32(0), p.Overflow()) } - -func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { - dbFile, err := os.Open(path) - require.NoError(t, err) - defer dbFile.Close() - - fi, err := dbFile.Stat() - require.NoError(t, err) - require.GreaterOrEqual(t, fi.Size(), int64((pageId+1)*pageSize)) - - buf := make([]byte, pageSize) - byteRead, err := dbFile.ReadAt(buf, int64(pageId*pageSize)) - require.NoError(t, err) - require.Equal(t, pageSize, byteRead) - - return buf -} - -func pageDataWithoutPageId(buf []byte) []byte { - return buf[8:] -} From b027e485cef21d2bd614f48823394cc857351a1e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 6 May 2023 06:25:23 +0800 Subject: [PATCH 074/439] cmd: print a warning to abandon the freelist if present in 'surgery copy-page' command Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 12 +++++++++--- internal/common/meta.go | 4 ++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index 7c9757c5a..936775dac 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -119,8 +119,14 @@ func surgeryCopyPageFunc(cmd *cobra.Command, args []string) error { return fmt.Errorf("copy-page command failed: %w", err) } - fmt.Fprintf(os.Stdout, "WARNING: the free list might have changed.\n") - fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + meta, err := readMetaPage(srcDBPath) + if err != nil { + return err + } + if meta.IsFreelistPersisted() { + fmt.Fprintf(os.Stdout, "WARNING: the free list might have changed.\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + } fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", surgerySourcePageId, surgeryDestinationPageId) return nil @@ -267,7 +273,7 @@ func surgeryFreelistRebuildFunc(cmd *cobra.Command, args []string) error { if err != nil { return err } - if meta.Freelist() != common.PgidNoFreelist { + if meta.IsFreelistPersisted() { return ErrSurgeryFreelistAlreadyExist } diff --git a/internal/common/meta.go b/internal/common/meta.go index 79727c8cb..b97949a57 100644 --- a/internal/common/meta.go +++ b/internal/common/meta.go @@ -108,6 +108,10 @@ func (m *Meta) SetFreelist(v Pgid) { m.freelist = v } +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + func (m *Meta) Pgid() Pgid { return m.pgid } From 81eb691a8ec6a9bafd1eea355aecbf2896fb613b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 6 May 2023 06:35:11 +0800 Subject: [PATCH 075/439] cmd: get all surgery options included in a struct surgeryOptions Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery_cobra.go | 109 ++++++++++++++++------------- 1 file changed, 59 insertions(+), 50 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index 936775dac..eb7073d91 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -17,14 +17,18 @@ var ( ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") ) -var ( +type surgeryOptions struct { surgeryTargetDBFilePath string surgeryPageId uint64 surgeryStartElementIdx int surgeryEndElementIdx int surgerySourcePageId uint64 surgeryDestinationPageId uint64 -) +} + +func defaultSurgeryOptions() surgeryOptions { + return surgeryOptions{} +} func newSurgeryCobraCommand() *cobra.Command { surgeryCmd := &cobra.Command{ @@ -41,6 +45,7 @@ func newSurgeryCobraCommand() *cobra.Command { } func newSurgeryRevertMetaPageCommand() *cobra.Command { + cfg := defaultSurgeryOptions() revertMetaPageCmd := &cobra.Command{ Use: "revert-meta-page [options]", Short: "Revert the meta page to revert the changes performed by the latest transaction", @@ -53,26 +58,26 @@ func newSurgeryRevertMetaPageCommand() *cobra.Command { } return nil }, - RunE: surgeryRevertMetaPageFunc, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryRevertMetaPageFunc(args[0], cfg) + }, } - revertMetaPageCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + revertMetaPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") return revertMetaPageCmd } -func surgeryRevertMetaPageFunc(cmd *cobra.Command, args []string) error { - srcDBPath := args[0] - - if err := checkDBPaths(srcDBPath, surgeryTargetDBFilePath); err != nil { +func surgeryRevertMetaPageFunc(srcDBPath string, cfg surgeryOptions) error { + if err := checkDBPaths(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return err } - if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[revert-meta-page] copy file failed: %w", err) } - if err := surgeon.RevertMetaPage(surgeryTargetDBFilePath); err != nil { + if err := surgeon.RevertMetaPage(cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("revert-meta-page command failed: %w", err) } @@ -82,6 +87,7 @@ func surgeryRevertMetaPageFunc(cmd *cobra.Command, args []string) error { } func newSurgeryCopyPageCommand() *cobra.Command { + cfg := defaultSurgeryOptions() copyPageCmd := &cobra.Command{ Use: "copy-page [options]", Short: "Copy page from the source page Id to the destination page Id", @@ -94,28 +100,28 @@ func newSurgeryCopyPageCommand() *cobra.Command { } return nil }, - RunE: surgeryCopyPageFunc, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryCopyPageFunc(args[0], cfg) + }, } - copyPageCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") - copyPageCmd.Flags().Uint64VarP(&surgerySourcePageId, "from-page", "", 0, "source page Id") - copyPageCmd.Flags().Uint64VarP(&surgeryDestinationPageId, "to-page", "", 0, "destination page Id") + copyPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") + copyPageCmd.Flags().Uint64VarP(&cfg.surgerySourcePageId, "from-page", "", 0, "source page Id") + copyPageCmd.Flags().Uint64VarP(&cfg.surgeryDestinationPageId, "to-page", "", 0, "destination page Id") return copyPageCmd } -func surgeryCopyPageFunc(cmd *cobra.Command, args []string) error { - srcDBPath := args[0] - - if surgerySourcePageId == surgeryDestinationPageId { - return fmt.Errorf("'--from-page' and '--to-page' have the same value: %d", surgerySourcePageId) +func surgeryCopyPageFunc(srcDBPath string, cfg surgeryOptions) error { + if cfg.surgerySourcePageId == cfg.surgeryDestinationPageId { + return fmt.Errorf("'--from-page' and '--to-page' have the same value: %d", cfg.surgerySourcePageId) } - if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[copy-page] copy file failed: %w", err) } - if err := surgeon.CopyPage(surgeryTargetDBFilePath, common.Pgid(surgerySourcePageId), common.Pgid(surgeryDestinationPageId)); err != nil { + if err := surgeon.CopyPage(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgerySourcePageId), common.Pgid(cfg.surgeryDestinationPageId)); err != nil { return fmt.Errorf("copy-page command failed: %w", err) } @@ -128,11 +134,12 @@ func surgeryCopyPageFunc(cmd *cobra.Command, args []string) error { fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") } - fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", surgerySourcePageId, surgeryDestinationPageId) + fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", cfg.surgerySourcePageId, cfg.surgeryDestinationPageId) return nil } func newSurgeryClearPageElementsCommand() *cobra.Command { + cfg := defaultSurgeryOptions() clearElementCmd := &cobra.Command{ Use: "clear-page-elements [options]", Short: "Clears elements from the given page, which can be a branch or leaf page", @@ -145,29 +152,29 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { } return nil }, - RunE: surgeryClearPageElementFunc, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryClearPageElementFunc(args[0], cfg) + }, } - clearElementCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") - clearElementCmd.Flags().Uint64VarP(&surgeryPageId, "pageId", "", 0, "page id") - clearElementCmd.Flags().IntVarP(&surgeryStartElementIdx, "from-index", "", 0, "start element index (included) to clear, starting from 0") - clearElementCmd.Flags().IntVarP(&surgeryEndElementIdx, "to-index", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") + clearElementCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") + clearElementCmd.Flags().Uint64VarP(&cfg.surgeryPageId, "pageId", "", 0, "page id") + clearElementCmd.Flags().IntVarP(&cfg.surgeryStartElementIdx, "from-index", "", 0, "start element index (included) to clear, starting from 0") + clearElementCmd.Flags().IntVarP(&cfg.surgeryEndElementIdx, "to-index", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") return clearElementCmd } -func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { - srcDBPath := args[0] - - if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { +func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryOptions) error { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[clear-page-element] copy file failed: %w", err) } - if surgeryPageId < 2 { - return fmt.Errorf("the pageId must be at least 2, but got %d", surgeryPageId) + if cfg.surgeryPageId < 2 { + return fmt.Errorf("the pageId must be at least 2, but got %d", cfg.surgeryPageId) } - needAbandonFreelist, err := surgeon.ClearPageElements(surgeryTargetDBFilePath, common.Pgid(surgeryPageId), surgeryStartElementIdx, surgeryEndElementIdx, false) + needAbandonFreelist, err := surgeon.ClearPageElements(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgeryPageId), cfg.surgeryStartElementIdx, cfg.surgeryEndElementIdx, false) if err != nil { return fmt.Errorf("clear-page-element command failed: %w", err) } @@ -177,7 +184,7 @@ func surgeryClearPageElementFunc(cmd *cobra.Command, args []string) error { fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") } - fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", surgeryStartElementIdx, surgeryEndElementIdx, surgeryPageId) + fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", cfg.surgeryStartElementIdx, cfg.surgeryEndElementIdx, cfg.surgeryPageId) return nil } @@ -197,6 +204,7 @@ func newSurgeryFreelistCommand() *cobra.Command { } func newSurgeryFreelistAbandonCommand() *cobra.Command { + cfg := defaultSurgeryOptions() abandonFreelistCmd := &cobra.Command{ Use: "abandon [options]", Short: "Abandon the freelist from both meta pages", @@ -209,22 +217,22 @@ func newSurgeryFreelistAbandonCommand() *cobra.Command { } return nil }, - RunE: surgeryFreelistAbandonFunc, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryFreelistAbandonFunc(args[0], cfg) + }, } - abandonFreelistCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + abandonFreelistCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") return abandonFreelistCmd } -func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { - srcDBPath := args[0] - - if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { +func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryOptions) error { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[freelist abandon] copy file failed: %w", err) } - if err := surgeon.ClearFreelist(surgeryTargetDBFilePath); err != nil { + if err := surgeon.ClearFreelist(cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("abandom-freelist command failed: %w", err) } @@ -233,6 +241,7 @@ func surgeryFreelistAbandonFunc(cmd *cobra.Command, args []string) error { } func newSurgeryFreelistRebuildCommand() *cobra.Command { + cfg := defaultSurgeryOptions() rebuildFreelistCmd := &cobra.Command{ Use: "rebuild [options]", Short: "Rebuild the freelist", @@ -245,17 +254,17 @@ func newSurgeryFreelistRebuildCommand() *cobra.Command { } return nil }, - RunE: surgeryFreelistRebuildFunc, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryFreelistRebuildFunc(args[0], cfg) + }, } - rebuildFreelistCmd.Flags().StringVar(&surgeryTargetDBFilePath, "output", "", "path to the target db file") + rebuildFreelistCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") return rebuildFreelistCmd } -func surgeryFreelistRebuildFunc(cmd *cobra.Command, args []string) error { - srcDBPath := args[0] - +func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryOptions) error { // Ensure source file exists. fi, err := os.Stat(srcDBPath) if os.IsNotExist(err) { @@ -264,7 +273,7 @@ func surgeryFreelistRebuildFunc(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to open source database file %q: %v", srcDBPath, err) } - if surgeryTargetDBFilePath == "" { + if cfg.surgeryTargetDBFilePath == "" { return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") } @@ -277,12 +286,12 @@ func surgeryFreelistRebuildFunc(cmd *cobra.Command, args []string) error { return ErrSurgeryFreelistAlreadyExist } - if err := common.CopyFile(srcDBPath, surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { return fmt.Errorf("[freelist rebuild] copy file failed: %w", err) } // bboltDB automatically reconstruct & sync freelist in write mode. - db, err := bolt.Open(surgeryTargetDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) + db, err := bolt.Open(cfg.surgeryTargetDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) if err != nil { return fmt.Errorf("[freelist rebuild] open db file failed: %w", err) } From 7f578facbeaaa7b98ba48aebea01bb543779f97b Mon Sep 17 00:00:00 2001 From: James Blair Date: Mon, 8 May 2023 13:03:08 +1200 Subject: [PATCH 076/439] Updated go to latest patch release 1.19.9. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index c972f3804..5c954fa8e 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.7" + go-version: "1.19.9" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index d399ff3fb..90eed6e77 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.7" + go-version: "1.19.9" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.7" + go-version: "1.19.9" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.17.13" + go-version: "1.19.9" - run: make coverage From 3ebf0b741bbd1a16f9b8ed5ad29d46ad1ba89f1a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 8 May 2023 06:29:16 +0800 Subject: [PATCH 077/439] test: enhance the concurrent test to support customize bucket and key Also support customize the test duration by environment variable. Signed-off-by: Benjamin Wang --- concurrent_test.go | 185 +++++++++++++++++++++++++++++---------------- 1 file changed, 119 insertions(+), 66 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 237f5da69..cfedcd7c6 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -23,7 +23,16 @@ import ( bolt "go.etcd.io/bbolt" ) -const noopTxKey string = "%magic-no-op-key%" +const ( + bucketPrefix = "bucket" + keyPrefix = "key" + noopTxKey = "%magic-no-op-key%" + + // TestConcurrentCaseDuration is used as a env variable to specify the + // concurrent test duration. + testConcurrentCaseDuration = "TEST_CONCURRENT_CASE_DURATION" + defaultConcurrentTestDuration = 30 * time.Second +) type duration struct { min time.Duration @@ -41,9 +50,11 @@ type operationChance struct { } type concurrentConfig struct { + bucketCount int + keyCount int workInterval duration operationRatio []operationChance - readInterval duration // only used by readOpeartion + readInterval duration // only used by readOperation noopWriteRatio int // only used by writeOperation writeBytes bytesRange // only used by writeOperation } @@ -60,13 +71,12 @@ func TestConcurrentReadAndWrite(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } - bucket := []byte("data") - keys := []string{"key0", "key1", "key2", "key3", "key4", "key5", "key6", "key7", "key8", "key9"} + + testDuration := concurrentTestDuration(t) conf := concurrentConfig{ - workInterval: duration{ - min: 5 * time.Millisecond, - max: 10 * time.Millisecond, - }, + bucketCount: 5, + keyCount: 10000, + workInterval: duration{}, operationRatio: []operationChance{ {operation: Read, chance: 60}, {operation: Write, chance: 20}, @@ -93,31 +103,31 @@ func TestConcurrentReadAndWrite(t *testing.T) { name: "1 worker", workerCount: 1, conf: conf, - testDuration: 30 * time.Second, + testDuration: testDuration, }, { name: "10 workers", workerCount: 10, conf: conf, - testDuration: 30 * time.Second, + testDuration: testDuration, }, { name: "50 workers", workerCount: 50, conf: conf, - testDuration: 30 * time.Second, + testDuration: testDuration, }, { name: "100 workers", workerCount: 100, conf: conf, - testDuration: 30 * time.Second, + testDuration: testDuration, }, { name: "200 workers", workerCount: 200, conf: conf, - testDuration: 30 * time.Second, + testDuration: testDuration, }, } @@ -125,8 +135,6 @@ func TestConcurrentReadAndWrite(t *testing.T) { tc := tc t.Run(tc.name, func(t *testing.T) { concurrentReadAndWrite(t, - bucket, - keys, tc.workerCount, tc.conf, tc.testDuration) @@ -134,9 +142,24 @@ func TestConcurrentReadAndWrite(t *testing.T) { } } +func concurrentTestDuration(t *testing.T) time.Duration { + durationInEnv := strings.ToLower(os.Getenv(testConcurrentCaseDuration)) + if durationInEnv == "" { + t.Logf("%q not set, defaults to %s", testConcurrentCaseDuration, defaultConcurrentTestDuration) + return defaultConcurrentTestDuration + } + + d, err := time.ParseDuration(durationInEnv) + if err != nil { + t.Logf("Failed to parse %s=%s, error: %v, defaults to %s", testConcurrentCaseDuration, durationInEnv, err, defaultConcurrentTestDuration) + return defaultConcurrentTestDuration + } + + t.Logf("Concurrent test duration set by %s=%s", testConcurrentCaseDuration, d) + return d +} + func concurrentReadAndWrite(t *testing.T, - bucket []byte, - keys []string, workerCount int, conf concurrentConfig, testDuration time.Duration) { @@ -145,8 +168,12 @@ func concurrentReadAndWrite(t *testing.T, db := mustCreateDB(t, nil) defer db.Close() err := db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucket(bucket) - return err + for i := 0; i < conf.bucketCount; i++ { + if _, err := tx.CreateBucketIfNotExists(bucketName(i)); err != nil { + return err + } + } + return nil }) require.NoError(t, err) @@ -156,12 +183,13 @@ func concurrentReadAndWrite(t *testing.T, // Refer to: https://github.com/golang/go/issues/49929 panicked := true defer func() { + t.Log("Save data if failed.") saveDataIfFailed(t, db, records, panicked) }() t.Log("Starting workers.") records = runWorkers(t, - db, bucket, keys, + db, workerCount, conf, testDuration) @@ -229,8 +257,6 @@ workers, which execute different operations, including `Read`, */ func runWorkers(t *testing.T, db *bolt.DB, - bucket []byte, - keys []string, workerCount int, conf concurrentConfig, testDuration time.Duration) historyRecords { @@ -243,10 +269,8 @@ func runWorkers(t *testing.T, g := new(errgroup.Group) for i := 0; i < workerCount; i++ { w := &worker{ - id: i, - db: db, - bucket: bucket, - keys: keys, + id: i, + db: db, conf: conf, @@ -295,9 +319,6 @@ type worker struct { id int db *bolt.DB - bucket []byte - keys []string - conf concurrentConfig errCh chan error @@ -321,7 +342,8 @@ func (w *worker) run() (historyRecords, error) { } op := w.pickOperation() - rec, err := executeOperation(op, w.db, w.bucket, w.keys, w.conf) + bucket, key := w.pickBucket(), w.pickKey() + rec, err := executeOperation(op, w.db, bucket, key, w.conf) if err != nil { readErr := fmt.Errorf("[%s: %s]: %w", w.name(), op, err) w.t.Error(readErr) @@ -330,10 +352,26 @@ func (w *worker) run() (historyRecords, error) { } rs = append(rs, rec) - time.Sleep(randomDurationInRange(w.conf.workInterval.min, w.conf.workInterval.max)) + if w.conf.workInterval != (duration{}) { + time.Sleep(randomDurationInRange(w.conf.workInterval.min, w.conf.workInterval.max)) + } } } +func (w *worker) pickBucket() []byte { + return bucketName(mrand.Intn(w.conf.bucketCount)) +} + +func bucketName(index int) []byte { + bucket := fmt.Sprintf("%s_%d", bucketPrefix, index) + return []byte(bucket) +} + +func (w *worker) pickKey() []byte { + key := fmt.Sprintf("%s_%d", keyPrefix, mrand.Intn(w.conf.keyCount)) + return []byte(key) +} + func (w *worker) pickOperation() OperationType { sum := 0 for _, op := range w.conf.operationRatio { @@ -349,32 +387,31 @@ func (w *worker) pickOperation() OperationType { panic("unexpected") } -func executeOperation(op OperationType, db *bolt.DB, bucket []byte, keys []string, conf concurrentConfig) (historyRecord, error) { +func executeOperation(op OperationType, db *bolt.DB, bucket []byte, key []byte, conf concurrentConfig) (historyRecord, error) { switch op { case Read: - return executeRead(db, bucket, keys, conf.readInterval) + return executeRead(db, bucket, key, conf.readInterval) case Write: - return executeWrite(db, bucket, keys, conf.writeBytes, conf.noopWriteRatio) + return executeWrite(db, bucket, key, conf.writeBytes, conf.noopWriteRatio) case Delete: - return executeDelete(db, bucket, keys) + return executeDelete(db, bucket, key) default: panic(fmt.Sprintf("unexpected operation type: %s", op)) } } -func executeRead(db *bolt.DB, bucket []byte, keys []string, readInterval duration) (historyRecord, error) { +func executeRead(db *bolt.DB, bucket []byte, key []byte, readInterval duration) (historyRecord, error) { var rec historyRecord err := db.View(func(tx *bolt.Tx) error { b := tx.Bucket(bucket) - selectedKey := keys[mrand.Intn(len(keys))] - initialVal := b.Get([]byte(selectedKey)) + initialVal := b.Get(key) time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) - val := b.Get([]byte(selectedKey)) + val := b.Get(key) if !reflect.DeepEqual(initialVal, val) { return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", - selectedKey, formatBytes(initialVal), formatBytes(val)) + string(key), formatBytes(initialVal), formatBytes(val)) } clonedVal := make([]byte, len(val)) @@ -382,7 +419,8 @@ func executeRead(db *bolt.DB, bucket []byte, keys []string, readInterval duratio rec = historyRecord{ OperationType: Read, - Key: selectedKey, + Bucket: string(bucket), + Key: string(key), Value: clonedVal, Txid: tx.ID(), } @@ -393,7 +431,7 @@ func executeRead(db *bolt.DB, bucket []byte, keys []string, readInterval duratio return rec, err } -func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRange, noopWriteRatio int) (historyRecord, error) { +func executeWrite(db *bolt.DB, bucket []byte, key []byte, writeBytes bytesRange, noopWriteRatio int) (historyRecord, error) { var rec historyRecord err := db.Update(func(tx *bolt.Tx) error { @@ -403,6 +441,7 @@ func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRan // 2. Two meta pages point to the same root page. rec = historyRecord{ OperationType: Write, + Bucket: string(bucket), Key: noopTxKey, Value: nil, Txid: tx.ID(), @@ -412,19 +451,18 @@ func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRan b := tx.Bucket(bucket) - selectedKey := keys[mrand.Intn(len(keys))] - valueBytes := randomIntInRange(writeBytes.min, writeBytes.max) v := make([]byte, valueBytes) if _, cErr := crand.Read(v); cErr != nil { return cErr } - putErr := b.Put([]byte(selectedKey), v) + putErr := b.Put(key, v) if putErr == nil { rec = historyRecord{ OperationType: Write, - Key: selectedKey, + Bucket: string(bucket), + Key: string(key), Value: v, Txid: tx.ID(), } @@ -436,19 +474,18 @@ func executeWrite(db *bolt.DB, bucket []byte, keys []string, writeBytes bytesRan return rec, err } -func executeDelete(db *bolt.DB, bucket []byte, keys []string) (historyRecord, error) { +func executeDelete(db *bolt.DB, bucket []byte, key []byte) (historyRecord, error) { var rec historyRecord err := db.Update(func(tx *bolt.Tx) error { b := tx.Bucket(bucket) - selectedKey := keys[mrand.Intn(len(keys))] - - deleteErr := b.Delete([]byte(selectedKey)) + deleteErr := b.Delete(key) if deleteErr == nil { rec = historyRecord{ OperationType: Delete, - Key: selectedKey, + Bucket: string(bucket), + Key: string(key), Txid: tx.ID(), } } @@ -485,19 +522,21 @@ and operation history */ func saveDataIfFailed(t *testing.T, db *bolt.DB, rs historyRecords, force bool) { if t.Failed() || force { + t.Log("Saving data...") + dbPath := db.Path() if err := db.Close(); err != nil { t.Errorf("Failed to close db: %v", err) } backupPath := testResultsDirectory(t) - backupDB(t, db, backupPath) + backupDB(t, dbPath, backupPath) persistHistoryRecords(t, rs, backupPath) } } -func backupDB(t *testing.T, db *bolt.DB, path string) { - targetFile := filepath.Join(path, "db.bak") +func backupDB(t *testing.T, srcPath string, dstPath string) { + targetFile := filepath.Join(dstPath, "db.bak") t.Logf("Saving the DB file to %s", targetFile) - err := copyFile(db.Path(), targetFile) + err := copyFile(srcPath, targetFile) require.NoError(t, err) t.Logf("DB file saved to %s", targetFile) } @@ -597,6 +636,7 @@ const ( type historyRecord struct { OperationType OperationType `json:"operationType,omitempty"` Txid int `json:"txid,omitempty"` + Bucket string `json:"bucket,omitempty"` Key string `json:"key,omitempty"` Value []byte `json:"value,omitempty"` } @@ -608,7 +648,12 @@ func (rs historyRecords) Len() int { } func (rs historyRecords) Less(i, j int) bool { - // Sorted by key firstly: all records with the same key are grouped together. + // Sorted by (bucket, key) firstly: all records in the same + // (bucket, key) are grouped together. + bucketCmp := strings.Compare(rs[i].Bucket, rs[j].Bucket) + if bucketCmp != 0 { + return bucketCmp < 0 + } keyCmp := strings.Compare(rs[i].Key, rs[j].Key) if keyCmp != 0 { return keyCmp < 0 @@ -620,7 +665,7 @@ func (rs historyRecords) Less(i, j int) bool { } // Sorted by operation type: put `Read` after other operation types - // if they operate on the same key and have the same txid. + // if they operate on the same (bucket, key) and have the same txid. if rs[i].OperationType == Read { return false } @@ -648,36 +693,44 @@ func validateIncrementalTxid(rs historyRecords) error { func validateSequential(rs historyRecords) error { sort.Sort(rs) - lastWriteKeyValueMap := make(map[string]*historyRecord) + type bucketAndKey struct { + bucket string + key string + } + lastWriteKeyValueMap := make(map[bucketAndKey]*historyRecord) for _, rec := range rs { - if v, ok := lastWriteKeyValueMap[rec.Key]; ok { + bk := bucketAndKey{ + bucket: rec.Bucket, + key: rec.Key, + } + if v, ok := lastWriteKeyValueMap[bk]; ok { if rec.OperationType == Write { v.Txid = rec.Txid if rec.Key != noopTxKey { v.Value = rec.Value } } else if rec.OperationType == Delete { - delete(lastWriteKeyValueMap, rec.Key) + delete(lastWriteKeyValueMap, bk) } else { if !reflect.DeepEqual(v.Value, rec.Value) { - return fmt.Errorf("readOperation[txid: %d, key: %s] read %x, \nbut writer[txid: %d, key: %s] wrote %x", - rec.Txid, rec.Key, rec.Value, - v.Txid, v.Key, v.Value) + return fmt.Errorf("readOperation[txid: %d, bucket: %s, key: %s] read %x, \nbut writer[txid: %d] wrote %x", + rec.Txid, rec.Bucket, rec.Key, rec.Value, v.Txid, v.Value) } } } else { if rec.OperationType == Write && rec.Key != noopTxKey { - lastWriteKeyValueMap[rec.Key] = &historyRecord{ + lastWriteKeyValueMap[bk] = &historyRecord{ OperationType: Write, + Bucket: rec.Bucket, Key: rec.Key, Value: rec.Value, Txid: rec.Txid, } } else if rec.OperationType == Read { if len(rec.Value) != 0 { - return fmt.Errorf("expected the first readOperation[txid: %d, key: %s] read nil, \nbut got %x", - rec.Txid, rec.Key, rec.Value) + return fmt.Errorf("expected the first readOperation[txid: %d, bucket: %s, key: %s] read nil, \nbut got %x", + rec.Txid, rec.Bucket, rec.Key, rec.Value) } } } From f483d799ae5c0e4daf43fe02602b3a4774f39a76 Mon Sep 17 00:00:00 2001 From: Marek Siarkowicz Date: Sun, 7 May 2023 08:15:16 +0200 Subject: [PATCH 078/439] Decompose command options Signed-off-by: Marek Siarkowicz --- cmd/bbolt/command_surgery_cobra.go | 204 +++++++++++++++++------------ 1 file changed, 118 insertions(+), 86 deletions(-) diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery_cobra.go index eb7073d91..50ea686cf 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery_cobra.go @@ -6,6 +6,7 @@ import ( "os" "github.com/spf13/cobra" + "github.com/spf13/pflag" bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/common" @@ -17,19 +18,6 @@ var ( ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") ) -type surgeryOptions struct { - surgeryTargetDBFilePath string - surgeryPageId uint64 - surgeryStartElementIdx int - surgeryEndElementIdx int - surgerySourcePageId uint64 - surgeryDestinationPageId uint64 -} - -func defaultSurgeryOptions() surgeryOptions { - return surgeryOptions{} -} - func newSurgeryCobraCommand() *cobra.Command { surgeryCmd := &cobra.Command{ Use: "surgery ", @@ -44,8 +32,23 @@ func newSurgeryCobraCommand() *cobra.Command { return surgeryCmd } +type surgeryBaseOptions struct { + outputDBFilePath string +} + +func (o *surgeryBaseOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.outputDBFilePath, "output", o.outputDBFilePath, "path to the filePath db file") +} + +func (o *surgeryBaseOptions) Validate() error { + if o.outputDBFilePath == "" { + return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") + } + return nil +} + func newSurgeryRevertMetaPageCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryBaseOptions revertMetaPageCmd := &cobra.Command{ Use: "revert-meta-page [options]", Short: "Revert the meta page to revert the changes performed by the latest transaction", @@ -59,25 +62,26 @@ func newSurgeryRevertMetaPageCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryRevertMetaPageFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryRevertMetaPageFunc(args[0], o) }, } - - revertMetaPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") - + o.AddFlags(revertMetaPageCmd.Flags()) return revertMetaPageCmd } -func surgeryRevertMetaPageFunc(srcDBPath string, cfg surgeryOptions) error { - if err := checkDBPaths(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { +func surgeryRevertMetaPageFunc(srcDBPath string, cfg surgeryBaseOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { return err } - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[revert-meta-page] copy file failed: %w", err) } - if err := surgeon.RevertMetaPage(cfg.surgeryTargetDBFilePath); err != nil { + if err := surgeon.RevertMetaPage(cfg.outputDBFilePath); err != nil { return fmt.Errorf("revert-meta-page command failed: %w", err) } @@ -86,8 +90,30 @@ func surgeryRevertMetaPageFunc(srcDBPath string, cfg surgeryOptions) error { return nil } +type surgeryCopyPageOptions struct { + surgeryBaseOptions + sourcePageId uint64 + destinationPageId uint64 +} + +func (o *surgeryCopyPageOptions) AddFlags(fs *pflag.FlagSet) { + o.surgeryBaseOptions.AddFlags(fs) + fs.Uint64VarP(&o.sourcePageId, "from-page", "", o.sourcePageId, "source page Id") + fs.Uint64VarP(&o.destinationPageId, "to-page", "", o.destinationPageId, "destination page Id") +} + +func (o *surgeryCopyPageOptions) Validate() error { + if err := o.surgeryBaseOptions.Validate(); err != nil { + return err + } + if o.sourcePageId == o.destinationPageId { + return fmt.Errorf("'--from-page' and '--to-page' have the same value: %d", o.sourcePageId) + } + return nil +} + func newSurgeryCopyPageCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryCopyPageOptions copyPageCmd := &cobra.Command{ Use: "copy-page [options]", Short: "Copy page from the source page Id to the destination page Id", @@ -101,27 +127,22 @@ func newSurgeryCopyPageCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryCopyPageFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryCopyPageFunc(args[0], o) }, } - - copyPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") - copyPageCmd.Flags().Uint64VarP(&cfg.surgerySourcePageId, "from-page", "", 0, "source page Id") - copyPageCmd.Flags().Uint64VarP(&cfg.surgeryDestinationPageId, "to-page", "", 0, "destination page Id") - + o.AddFlags(copyPageCmd.Flags()) return copyPageCmd } -func surgeryCopyPageFunc(srcDBPath string, cfg surgeryOptions) error { - if cfg.surgerySourcePageId == cfg.surgeryDestinationPageId { - return fmt.Errorf("'--from-page' and '--to-page' have the same value: %d", cfg.surgerySourcePageId) - } - - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { +func surgeryCopyPageFunc(srcDBPath string, cfg surgeryCopyPageOptions) error { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[copy-page] copy file failed: %w", err) } - if err := surgeon.CopyPage(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgerySourcePageId), common.Pgid(cfg.surgeryDestinationPageId)); err != nil { + if err := surgeon.CopyPage(cfg.outputDBFilePath, common.Pgid(cfg.sourcePageId), common.Pgid(cfg.destinationPageId)); err != nil { return fmt.Errorf("copy-page command failed: %w", err) } @@ -134,12 +155,36 @@ func surgeryCopyPageFunc(srcDBPath string, cfg surgeryOptions) error { fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") } - fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", cfg.surgerySourcePageId, cfg.surgeryDestinationPageId) + fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", cfg.sourcePageId, cfg.destinationPageId) + return nil +} + +type surgeryClearPageElementsOptions struct { + surgeryBaseOptions + pageId uint64 + startElementIdx int + endElementIdx int +} + +func (o *surgeryClearPageElementsOptions) AddFlags(fs *pflag.FlagSet) { + o.surgeryBaseOptions.AddFlags(fs) + fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page id") + fs.IntVarP(&o.startElementIdx, "from-index", "", o.startElementIdx, "start element index (included) to clear, starting from 0") + fs.IntVarP(&o.endElementIdx, "to-index", "", o.endElementIdx, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") +} + +func (o *surgeryClearPageElementsOptions) Validate() error { + if err := o.surgeryBaseOptions.Validate(); err != nil { + return err + } + if o.pageId < 2 { + return fmt.Errorf("the pageId must be at least 2, but got %d", o.pageId) + } return nil } func newSurgeryClearPageElementsCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryClearPageElementsOptions clearElementCmd := &cobra.Command{ Use: "clear-page-elements [options]", Short: "Clears elements from the given page, which can be a branch or leaf page", @@ -153,28 +198,22 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryClearPageElementFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryClearPageElementFunc(args[0], o) }, } - - clearElementCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") - clearElementCmd.Flags().Uint64VarP(&cfg.surgeryPageId, "pageId", "", 0, "page id") - clearElementCmd.Flags().IntVarP(&cfg.surgeryStartElementIdx, "from-index", "", 0, "start element index (included) to clear, starting from 0") - clearElementCmd.Flags().IntVarP(&cfg.surgeryEndElementIdx, "to-index", "", 0, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") - + o.AddFlags(clearElementCmd.Flags()) return clearElementCmd } -func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryOptions) error { - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { +func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsOptions) error { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[clear-page-element] copy file failed: %w", err) } - if cfg.surgeryPageId < 2 { - return fmt.Errorf("the pageId must be at least 2, but got %d", cfg.surgeryPageId) - } - - needAbandonFreelist, err := surgeon.ClearPageElements(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgeryPageId), cfg.surgeryStartElementIdx, cfg.surgeryEndElementIdx, false) + needAbandonFreelist, err := surgeon.ClearPageElements(cfg.outputDBFilePath, common.Pgid(cfg.pageId), cfg.startElementIdx, cfg.endElementIdx, false) if err != nil { return fmt.Errorf("clear-page-element command failed: %w", err) } @@ -184,7 +223,7 @@ func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryOptions) error { fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") } - fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", cfg.surgeryStartElementIdx, cfg.surgeryEndElementIdx, cfg.surgeryPageId) + fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", cfg.startElementIdx, cfg.endElementIdx, cfg.pageId) return nil } @@ -204,7 +243,7 @@ func newSurgeryFreelistCommand() *cobra.Command { } func newSurgeryFreelistAbandonCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryBaseOptions abandonFreelistCmd := &cobra.Command{ Use: "abandon [options]", Short: "Abandon the freelist from both meta pages", @@ -218,21 +257,23 @@ func newSurgeryFreelistAbandonCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryFreelistAbandonFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryFreelistAbandonFunc(args[0], o) }, } - - abandonFreelistCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") + o.AddFlags(abandonFreelistCmd.Flags()) return abandonFreelistCmd } -func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryOptions) error { - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { +func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryBaseOptions) error { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[freelist abandon] copy file failed: %w", err) } - if err := surgeon.ClearFreelist(cfg.surgeryTargetDBFilePath); err != nil { + if err := surgeon.ClearFreelist(cfg.outputDBFilePath); err != nil { return fmt.Errorf("abandom-freelist command failed: %w", err) } @@ -241,7 +282,7 @@ func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryOptions) error { } func newSurgeryFreelistRebuildCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryBaseOptions rebuildFreelistCmd := &cobra.Command{ Use: "rebuild [options]", Short: "Rebuild the freelist", @@ -255,26 +296,22 @@ func newSurgeryFreelistRebuildCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryFreelistRebuildFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryFreelistRebuildFunc(args[0], o) }, } - - rebuildFreelistCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") + o.AddFlags(rebuildFreelistCmd.Flags()) return rebuildFreelistCmd } -func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryOptions) error { +func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryBaseOptions) error { // Ensure source file exists. - fi, err := os.Stat(srcDBPath) - if os.IsNotExist(err) { - return fmt.Errorf("source database file %q doesn't exist", srcDBPath) - } else if err != nil { - return fmt.Errorf("failed to open source database file %q: %v", srcDBPath, err) - } - - if cfg.surgeryTargetDBFilePath == "" { - return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") + fi, err := checkSourceDBPath(srcDBPath) + if err != nil { + return err } // make sure the freelist isn't present in the file. @@ -286,12 +323,12 @@ func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryOptions) error { return ErrSurgeryFreelistAlreadyExist } - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[freelist rebuild] copy file failed: %w", err) } // bboltDB automatically reconstruct & sync freelist in write mode. - db, err := bolt.Open(cfg.surgeryTargetDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) + db, err := bolt.Open(cfg.outputDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) if err != nil { return fmt.Errorf("[freelist rebuild] open db file failed: %w", err) } @@ -316,17 +353,12 @@ func readMetaPage(path string) (*common.Meta, error) { return common.LoadPageMeta(buf), nil } -func checkDBPaths(srcPath, dstPath string) error { - _, err := os.Stat(srcPath) +func checkSourceDBPath(srcPath string) (os.FileInfo, error) { + fi, err := os.Stat(srcPath) if os.IsNotExist(err) { - return fmt.Errorf("source database file %q doesn't exist", srcPath) + return nil, fmt.Errorf("source database file %q doesn't exist", srcPath) } else if err != nil { - return fmt.Errorf("failed to open source database file %q: %v", srcPath, err) + return nil, fmt.Errorf("failed to open source database file %q: %v", srcPath, err) } - - if dstPath == "" { - return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") - } - - return nil + return fi, nil } From f2d7356f6dbbcc2d365e82f9543cf393b210a774 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 15:02:30 +0000 Subject: [PATCH 079/439] build(deps): Bump golang.org/x/sync from 0.1.0 to 0.2.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.1.0 to 0.2.0. - [Commits](https://github.com/golang/sync/compare/v0.1.0...v0.2.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index db1a8d2f1..167808c15 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/spf13/cobra v1.7.0 github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.1.0 + golang.org/x/sync v0.2.0 golang.org/x/sys v0.7.0 ) diff --git a/go.sum b/go.sum index 889ac46c7..86a5483e4 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,8 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= +golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From d76c5b8f3e6c4a5866d922b8daa9e7746a8aa3b5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 May 2023 21:52:11 +0000 Subject: [PATCH 080/439] build(deps): Bump golang.org/x/sys from 0.7.0 to 0.8.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.7.0 to 0.8.0. - [Commits](https://github.com/golang/sys/compare/v0.7.0...v0.8.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 167808c15..a9e5183c6 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/stretchr/testify v1.8.2 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.7.0 + golang.org/x/sys v0.8.0 ) require ( diff --git a/go.sum b/go.sum index 86a5483e4..6eaa074c4 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From 6e12e088d423604ad0ec0d4377e80d319d59140a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 7 May 2023 14:07:14 +0800 Subject: [PATCH 081/439] cmd: migrate 'surgery copy-page' command to cobra style command Signed-off-by: Benjamin Wang --- ...nd_surgery_cobra.go => command_surgery.go} | 52 ++++++- ..._cobra_test.go => command_surgery_test.go} | 37 +++++ cmd/bbolt/main.go | 2 - cmd/bbolt/surgery_commands.go | 146 ------------------ cmd/bbolt/surgery_commands_test.go | 47 ------ 5 files changed, 86 insertions(+), 198 deletions(-) rename cmd/bbolt/{command_surgery_cobra.go => command_surgery.go} (86%) rename cmd/bbolt/{command_surgery_cobra_test.go => command_surgery_test.go} (93%) delete mode 100644 cmd/bbolt/surgery_commands.go delete mode 100644 cmd/bbolt/surgery_commands_test.go diff --git a/cmd/bbolt/command_surgery_cobra.go b/cmd/bbolt/command_surgery.go similarity index 86% rename from cmd/bbolt/command_surgery_cobra.go rename to cmd/bbolt/command_surgery.go index 50ea686cf..bb930d9e5 100644 --- a/cmd/bbolt/command_surgery_cobra.go +++ b/cmd/bbolt/command_surgery.go @@ -26,6 +26,7 @@ func newSurgeryCobraCommand() *cobra.Command { surgeryCmd.AddCommand(newSurgeryRevertMetaPageCommand()) surgeryCmd.AddCommand(newSurgeryCopyPageCommand()) + surgeryCmd.AddCommand(newSurgeryClearPageCommand()) surgeryCmd.AddCommand(newSurgeryClearPageElementsCommand()) surgeryCmd.AddCommand(newSurgeryFreelistCommand()) @@ -183,6 +184,54 @@ func (o *surgeryClearPageElementsOptions) Validate() error { return nil } +func newSurgeryClearPageCommand() *cobra.Command { + cfg := defaultSurgeryOptions() + clearPageCmd := &cobra.Command{ + Use: "clear-page [options]", + Short: "Clears all elements from the given page, which can be a branch or leaf page", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryClearPageFunc(args[0], cfg) + }, + } + + clearPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") + clearPageCmd.Flags().Uint64VarP(&cfg.surgeryPageId, "pageId", "", 0, "page id") + + return clearPageCmd +} + +func surgeryClearPageFunc(srcDBPath string, cfg surgeryOptions) error { + if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { + return fmt.Errorf("[clear-page] copy file failed: %w", err) + } + + if cfg.surgeryPageId < 2 { + return fmt.Errorf("the pageId must be at least 2, but got %d", cfg.surgeryPageId) + } + + needAbandonFreelist, err := surgeon.ClearPage(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgeryPageId)) + if err != nil { + return fmt.Errorf("clear-page command failed: %w", err) + } + + if needAbandonFreelist { + fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + } + + fmt.Fprintf(os.Stdout, "The page (%d) was cleared\n", cfg.surgeryPageId) + return nil +} + func newSurgeryClearPageElementsCommand() *cobra.Command { var o surgeryClearPageElementsOptions clearElementCmd := &cobra.Command{ @@ -227,9 +276,6 @@ func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsO return nil } -// TODO(ahrtr): add `bbolt surgery freelist rebuild/check ...` commands, -// and move all `surgery freelist` commands into a separate file, -// e.g command_surgery_freelist.go. func newSurgeryFreelistCommand() *cobra.Command { cmd := &cobra.Command{ Use: "freelist ", diff --git a/cmd/bbolt/command_surgery_cobra_test.go b/cmd/bbolt/command_surgery_test.go similarity index 93% rename from cmd/bbolt/command_surgery_cobra_test.go rename to cmd/bbolt/command_surgery_test.go index 5c506a91a..2c99e21cd 100644 --- a/cmd/bbolt/command_surgery_cobra_test.go +++ b/cmd/bbolt/command_surgery_test.go @@ -99,6 +99,43 @@ func TestSurgery_CopyPage(t *testing.T) { assert.Equal(t, pageDataWithoutPageId(srcPageId3Data), pageDataWithoutPageId(dstPageId2Data)) } +// TODO(ahrtr): add test case below for `surgery clear-page` command: +// 1. The page is a branch page. All its children should become free pages. +func TestSurgery_ClearPage(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + // Insert some sample data + t.Log("Insert some sample data") + err := db.Fill([]byte("data"), 1, 20, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 10) }, + ) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // clear page 3 + t.Log("clear page 3") + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "dstdb") + rootCmd.SetArgs([]string{ + "surgery", "clear-page", srcPath, + "--output", output, + "--pageId", "3", + }) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Verify result") + dstPageId3Data := readPage(t, output, 3, pageSize) + + p := common.LoadPage(dstPageId3Data) + assert.Equal(t, uint16(0), p.Count()) + assert.Equal(t, uint32(0), p.Overflow()) +} + func TestSurgery_ClearPageElements_Without_Overflow(t *testing.T) { testCases := []struct { name string diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 31d0d62b0..6e2e7310e 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -140,8 +140,6 @@ func (m *Main) Run(args ...string) error { return newPagesCommand(m).Run(args[1:]...) case "stats": return newStatsCommand(m).Run(args[1:]...) - case "surgery": - return newSurgeryCommand(m).Run(args[1:]...) default: return ErrUnknownCommand } diff --git a/cmd/bbolt/surgery_commands.go b/cmd/bbolt/surgery_commands.go deleted file mode 100644 index 64b970ae3..000000000 --- a/cmd/bbolt/surgery_commands.go +++ /dev/null @@ -1,146 +0,0 @@ -package main - -import ( - "errors" - "flag" - "fmt" - "os" - "strconv" - "strings" - - "go.etcd.io/bbolt/internal/common" - "go.etcd.io/bbolt/internal/surgeon" -) - -// surgeryCommand represents the "surgery" command execution. -type surgeryCommand struct { - baseCommand - - srcPath string - dstPath string -} - -// newSurgeryCommand returns a SurgeryCommand. -func newSurgeryCommand(m *Main) *surgeryCommand { - c := &surgeryCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the `surgery` program. -func (cmd *surgeryCommand) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - case "clear-page": - return newClearPageCommand(cmd).Run(args[1:]...) - default: - return ErrUnknownCommand - } -} - -func (cmd *surgeryCommand) parsePathsAndCopyFile(fs *flag.FlagSet) error { - // Require database paths. - cmd.srcPath = fs.Arg(0) - if cmd.srcPath == "" { - return ErrPathRequired - } - - cmd.dstPath = fs.Arg(1) - if cmd.dstPath == "" { - return errors.New("output file required") - } - - // Copy database from SrcPath to DstPath - if err := common.CopyFile(cmd.srcPath, cmd.dstPath); err != nil { - return fmt.Errorf("failed to copy file: %w", err) - } - - return nil -} - -// Usage returns the help message. -func (cmd *surgeryCommand) Usage() string { - return strings.TrimLeft(` -Surgery is a command for performing low level update on bbolt databases. - -Usage: - - bbolt surgery command [arguments] - -The commands are: - help print this screen - clear-page clear all elements at the given pageId - -Use "bbolt surgery [command] -h" for more information about a command. -`, "\n") -} - -// clearPageCommand represents the "surgery clear-page" command execution. -type clearPageCommand struct { - *surgeryCommand -} - -// newClearPageCommand returns a clearPageCommand. -func newClearPageCommand(m *surgeryCommand) *clearPageCommand { - c := &clearPageCommand{} - c.surgeryCommand = m - return c -} - -// Run executes the command. -func (cmd *clearPageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - if err := cmd.parsePathsAndCopyFile(fs); err != nil { - return fmt.Errorf("clearPageCommand failed to parse paths and copy file: %w", err) - } - - // Read page id. - pageId, err := strconv.ParseUint(fs.Arg(2), 10, 64) - if err != nil { - return err - } - - needAbandonFreelist, err := surgeon.ClearPage(cmd.dstPath, common.Pgid(pageId)) - if err != nil { - return fmt.Errorf("clearPageCommand failed: %w", err) - } - - if needAbandonFreelist { - fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") - fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") - } - - fmt.Fprintf(cmd.Stdout, "Page (%d) was cleared\n", pageId) - return nil -} - -// Usage returns the help message. -func (cmd *clearPageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt surgery clear-page SRC DST pageId - -ClearPage copies the database file at SRC to a newly created database -file at DST. Afterwards, it clears all elements in the page at pageId -in DST. - -The original database is left untouched. -`, "\n") -} diff --git a/cmd/bbolt/surgery_commands_test.go b/cmd/bbolt/surgery_commands_test.go deleted file mode 100644 index af3b1393e..000000000 --- a/cmd/bbolt/surgery_commands_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package main_test - -import ( - "fmt" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - bolt "go.etcd.io/bbolt" - "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/common" -) - -// TODO(ahrtr): add test case below for `surgery clear-page` command: -// 1. The page is a branch page. All its children should become free pages. -func TestSurgery_ClearPage(t *testing.T) { - pageSize := 4096 - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) - srcPath := db.Path() - - // Insert some sample data - t.Log("Insert some sample data") - err := db.Fill([]byte("data"), 1, 20, - func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, - func(tx int, k int) []byte { return make([]byte, 10) }, - ) - require.NoError(t, err) - - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - - // clear page 3 - t.Log("clear page 3") - dstPath := filepath.Join(t.TempDir(), "dstdb") - m := NewMain() - err = m.Run("surgery", "clear-page", srcPath, dstPath, "3") - require.NoError(t, err) - - // The page 2 should have exactly the same data as page 3. - t.Log("Verify result") - dstPageId3Data := readPage(t, dstPath, 3, pageSize) - - p := common.LoadPage(dstPageId3Data) - assert.Equal(t, uint16(0), p.Count()) - assert.Equal(t, uint32(0), p.Overflow()) -} From 383d99079464699fb07cbe01d03ba675e5e87804 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 9 May 2023 08:38:50 +0800 Subject: [PATCH 082/439] cmd: wrap 'surgery clear-page' options into surgeryClearPageOptions Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery.go | 62 +++++++++++++++++++++++------------- 1 file changed, 39 insertions(+), 23 deletions(-) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index bb930d9e5..59e525c1c 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -160,21 +160,17 @@ func surgeryCopyPageFunc(srcDBPath string, cfg surgeryCopyPageOptions) error { return nil } -type surgeryClearPageElementsOptions struct { +type surgeryClearPageOptions struct { surgeryBaseOptions - pageId uint64 - startElementIdx int - endElementIdx int + pageId uint64 } -func (o *surgeryClearPageElementsOptions) AddFlags(fs *pflag.FlagSet) { +func (o *surgeryClearPageOptions) AddFlags(fs *pflag.FlagSet) { o.surgeryBaseOptions.AddFlags(fs) - fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page id") - fs.IntVarP(&o.startElementIdx, "from-index", "", o.startElementIdx, "start element index (included) to clear, starting from 0") - fs.IntVarP(&o.endElementIdx, "to-index", "", o.endElementIdx, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") + fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page Id") } -func (o *surgeryClearPageElementsOptions) Validate() error { +func (o *surgeryClearPageOptions) Validate() error { if err := o.surgeryBaseOptions.Validate(); err != nil { return err } @@ -185,7 +181,7 @@ func (o *surgeryClearPageElementsOptions) Validate() error { } func newSurgeryClearPageCommand() *cobra.Command { - cfg := defaultSurgeryOptions() + var o surgeryClearPageOptions clearPageCmd := &cobra.Command{ Use: "clear-page [options]", Short: "Clears all elements from the given page, which can be a branch or leaf page", @@ -199,26 +195,22 @@ func newSurgeryClearPageCommand() *cobra.Command { return nil }, RunE: func(cmd *cobra.Command, args []string) error { - return surgeryClearPageFunc(args[0], cfg) + if err := o.Validate(); err != nil { + return err + } + return surgeryClearPageFunc(args[0], o) }, } - - clearPageCmd.Flags().StringVar(&cfg.surgeryTargetDBFilePath, "output", "", "path to the target db file") - clearPageCmd.Flags().Uint64VarP(&cfg.surgeryPageId, "pageId", "", 0, "page id") - + o.AddFlags(clearPageCmd.Flags()) return clearPageCmd } -func surgeryClearPageFunc(srcDBPath string, cfg surgeryOptions) error { - if err := common.CopyFile(srcDBPath, cfg.surgeryTargetDBFilePath); err != nil { +func surgeryClearPageFunc(srcDBPath string, cfg surgeryClearPageOptions) error { + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[clear-page] copy file failed: %w", err) } - if cfg.surgeryPageId < 2 { - return fmt.Errorf("the pageId must be at least 2, but got %d", cfg.surgeryPageId) - } - - needAbandonFreelist, err := surgeon.ClearPage(cfg.surgeryTargetDBFilePath, common.Pgid(cfg.surgeryPageId)) + needAbandonFreelist, err := surgeon.ClearPage(cfg.outputDBFilePath, common.Pgid(cfg.pageId)) if err != nil { return fmt.Errorf("clear-page command failed: %w", err) } @@ -228,7 +220,31 @@ func surgeryClearPageFunc(srcDBPath string, cfg surgeryOptions) error { fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") } - fmt.Fprintf(os.Stdout, "The page (%d) was cleared\n", cfg.surgeryPageId) + fmt.Fprintf(os.Stdout, "The page (%d) was cleared\n", cfg.pageId) + return nil +} + +type surgeryClearPageElementsOptions struct { + surgeryBaseOptions + pageId uint64 + startElementIdx int + endElementIdx int +} + +func (o *surgeryClearPageElementsOptions) AddFlags(fs *pflag.FlagSet) { + o.surgeryBaseOptions.AddFlags(fs) + fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page id") + fs.IntVarP(&o.startElementIdx, "from-index", "", o.startElementIdx, "start element index (included) to clear, starting from 0") + fs.IntVarP(&o.endElementIdx, "to-index", "", o.endElementIdx, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") +} + +func (o *surgeryClearPageElementsOptions) Validate() error { + if err := o.surgeryBaseOptions.Validate(); err != nil { + return err + } + if o.pageId < 2 { + return fmt.Errorf("the pageId must be at least 2, but got %d", o.pageId) + } return nil } From 8974e912fb58598c5bded04b69092cfb1877ff2b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 9 May 2023 08:50:17 +0800 Subject: [PATCH 083/439] cmd: check source db path for all surgery commands Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 59e525c1c..9f75603c3 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -139,6 +139,10 @@ func newSurgeryCopyPageCommand() *cobra.Command { } func surgeryCopyPageFunc(srcDBPath string, cfg surgeryCopyPageOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[copy-page] copy file failed: %w", err) } @@ -206,6 +210,10 @@ func newSurgeryClearPageCommand() *cobra.Command { } func surgeryClearPageFunc(srcDBPath string, cfg surgeryClearPageOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[clear-page] copy file failed: %w", err) } @@ -274,6 +282,10 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { } func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[clear-page-element] copy file failed: %w", err) } @@ -331,6 +343,10 @@ func newSurgeryFreelistAbandonCommand() *cobra.Command { } func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryBaseOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { return fmt.Errorf("[freelist abandon] copy file failed: %w", err) } From 6c836cbcaf02184b1750ddbaf7a39af73cb13652 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Mon, 15 May 2023 15:58:31 -0400 Subject: [PATCH 084/439] Fix deprecated comment. Correct way of marking a method deprecated is adding a paragraph that starts with "Deprecated:". https://go.dev/blog/godoc https://github.com/golang/go/blob/ff3aefbad4bed0cdd25688329e5cc4f908276a46/src/cmd/api/api.go#L1137 Signed-off-by: Cenk Alti --- tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx.go b/tx.go index 340dda808..5b8e25b8b 100644 --- a/tx.go +++ b/tx.go @@ -330,7 +330,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err From 065a3e1953625f20c1134fc579b5e07a6095f097 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 16 May 2023 14:04:48 +0800 Subject: [PATCH 085/439] cmd: mark some flags of surgery commands as required Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery.go | 7 +++ cmd/bbolt/command_surgery_test.go | 84 +++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 9f75603c3..9d2931608 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -39,6 +39,7 @@ type surgeryBaseOptions struct { func (o *surgeryBaseOptions) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.outputDBFilePath, "output", o.outputDBFilePath, "path to the filePath db file") + _ = cobra.MarkFlagRequired(fs, "output") } func (o *surgeryBaseOptions) Validate() error { @@ -101,6 +102,8 @@ func (o *surgeryCopyPageOptions) AddFlags(fs *pflag.FlagSet) { o.surgeryBaseOptions.AddFlags(fs) fs.Uint64VarP(&o.sourcePageId, "from-page", "", o.sourcePageId, "source page Id") fs.Uint64VarP(&o.destinationPageId, "to-page", "", o.destinationPageId, "destination page Id") + _ = cobra.MarkFlagRequired(fs, "from-page") + _ = cobra.MarkFlagRequired(fs, "to-page") } func (o *surgeryCopyPageOptions) Validate() error { @@ -172,6 +175,7 @@ type surgeryClearPageOptions struct { func (o *surgeryClearPageOptions) AddFlags(fs *pflag.FlagSet) { o.surgeryBaseOptions.AddFlags(fs) fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page Id") + _ = cobra.MarkFlagRequired(fs, "pageId") } func (o *surgeryClearPageOptions) Validate() error { @@ -244,6 +248,9 @@ func (o *surgeryClearPageElementsOptions) AddFlags(fs *pflag.FlagSet) { fs.Uint64VarP(&o.pageId, "pageId", "", o.pageId, "page id") fs.IntVarP(&o.startElementIdx, "from-index", "", o.startElementIdx, "start element index (included) to clear, starting from 0") fs.IntVarP(&o.endElementIdx, "to-index", "", o.endElementIdx, "end element index (excluded) to clear, starting from 0, -1 means to the end of page") + _ = cobra.MarkFlagRequired(fs, "pageId") + _ = cobra.MarkFlagRequired(fs, "from-index") + _ = cobra.MarkFlagRequired(fs, "to-index") } func (o *surgeryClearPageElementsOptions) Validate() error { diff --git a/cmd/bbolt/command_surgery_test.go b/cmd/bbolt/command_surgery_test.go index 2c99e21cd..7e131e420 100644 --- a/cmd/bbolt/command_surgery_test.go +++ b/cmd/bbolt/command_surgery_test.go @@ -674,3 +674,87 @@ func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { func pageDataWithoutPageId(buf []byte) []byte { return buf[8:] } + +func TestSurgeryRequiredFlags(t *testing.T) { + errMsgFmt := `required flag(s) "%s" not set` + testCases := []struct { + name string + args []string + expectedErrMsg string + }{ + // --output is required for all surgery commands + { + name: "no output flag for revert-meta-page", + args: []string{"surgery", "revert-meta-page", "db"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + { + name: "no output flag for copy-page", + args: []string{"surgery", "copy-page", "db", "--from-page", "3", "--to-page", "2"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + { + name: "no output flag for clear-page", + args: []string{"surgery", "clear-page", "db", "--pageId", "3"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + { + name: "no output flag for clear-page-element", + args: []string{"surgery", "clear-page-elements", "db", "--pageId", "4", "--from-index", "3", "--to-index", "5"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + { + name: "no output flag for freelist abandon", + args: []string{"surgery", "freelist", "abandon", "db"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + { + name: "no output flag for freelist rebuild", + args: []string{"surgery", "freelist", "rebuild", "db"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "output"), + }, + // --from-page and --to-page are required for 'surgery copy-page' command + { + name: "no from-page flag for copy-page", + args: []string{"surgery", "copy-page", "db", "--output", "db", "--to-page", "2"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "from-page"), + }, + { + name: "no to-page flag for copy-page", + args: []string{"surgery", "copy-page", "db", "--output", "db", "--from-page", "2"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "to-page"), + }, + // --pageId is required for 'surgery clear-page' command + { + name: "no pageId flag for clear-page", + args: []string{"surgery", "clear-page", "db", "--output", "db"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "pageId"), + }, + // --pageId, --from-index and --to-index are required for 'surgery clear-page-element' command + { + name: "no pageId flag for clear-page-element", + args: []string{"surgery", "clear-page-elements", "db", "--output", "newdb", "--from-index", "3", "--to-index", "5"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "pageId"), + }, + { + name: "no from-index flag for clear-page-element", + args: []string{"surgery", "clear-page-elements", "db", "--output", "newdb", "--pageId", "2", "--to-index", "5"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "from-index"), + }, + { + name: "no to-index flag for clear-page-element", + args: []string{"surgery", "clear-page-elements", "db", "--output", "newdb", "--pageId", "2", "--from-index", "3"}, + expectedErrMsg: fmt.Sprintf(errMsgFmt, "to-index"), + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + rootCmd := main.NewRootCommand() + rootCmd.SetArgs(tc.args) + err := rootCmd.Execute() + require.ErrorContains(t, err, tc.expectedErrMsg) + }) + } +} From c2efe9f0d8c9950a5962b3db6a92edd085529c3c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 16 May 2023 16:51:06 +0800 Subject: [PATCH 086/439] cmd: split 'surgery freelist' into separate files Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery.go | 116 ------------------- cmd/bbolt/command_surgery_freelist.go | 128 +++++++++++++++++++++ cmd/bbolt/command_surgery_freelist_test.go | 103 +++++++++++++++++ cmd/bbolt/command_surgery_test.go | 124 -------------------- cmd/bbolt/utils_test.go | 46 ++++++++ 5 files changed, 277 insertions(+), 240 deletions(-) create mode 100644 cmd/bbolt/command_surgery_freelist.go create mode 100644 cmd/bbolt/command_surgery_freelist_test.go create mode 100644 cmd/bbolt/utils_test.go diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 9d2931608..15fa48cb9 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -8,7 +8,6 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" "go.etcd.io/bbolt/internal/surgeon" @@ -311,121 +310,6 @@ func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsO return nil } -func newSurgeryFreelistCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "freelist ", - Short: "freelist related surgery commands", - } - - cmd.AddCommand(newSurgeryFreelistAbandonCommand()) - cmd.AddCommand(newSurgeryFreelistRebuildCommand()) - - return cmd -} - -func newSurgeryFreelistAbandonCommand() *cobra.Command { - var o surgeryBaseOptions - abandonFreelistCmd := &cobra.Command{ - Use: "abandon [options]", - Short: "Abandon the freelist from both meta pages", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - if err := o.Validate(); err != nil { - return err - } - return surgeryFreelistAbandonFunc(args[0], o) - }, - } - o.AddFlags(abandonFreelistCmd.Flags()) - - return abandonFreelistCmd -} - -func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryBaseOptions) error { - if _, err := checkSourceDBPath(srcDBPath); err != nil { - return err - } - - if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { - return fmt.Errorf("[freelist abandon] copy file failed: %w", err) - } - - if err := surgeon.ClearFreelist(cfg.outputDBFilePath); err != nil { - return fmt.Errorf("abandom-freelist command failed: %w", err) - } - - fmt.Fprintf(os.Stdout, "The freelist was abandoned in both meta pages.\nIt may cause some delay on next startup because bbolt needs to scan the whole db to reconstruct the free list.\n") - return nil -} - -func newSurgeryFreelistRebuildCommand() *cobra.Command { - var o surgeryBaseOptions - rebuildFreelistCmd := &cobra.Command{ - Use: "rebuild [options]", - Short: "Rebuild the freelist", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - if err := o.Validate(); err != nil { - return err - } - return surgeryFreelistRebuildFunc(args[0], o) - }, - } - o.AddFlags(rebuildFreelistCmd.Flags()) - - return rebuildFreelistCmd -} - -func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryBaseOptions) error { - // Ensure source file exists. - fi, err := checkSourceDBPath(srcDBPath) - if err != nil { - return err - } - - // make sure the freelist isn't present in the file. - meta, err := readMetaPage(srcDBPath) - if err != nil { - return err - } - if meta.IsFreelistPersisted() { - return ErrSurgeryFreelistAlreadyExist - } - - if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { - return fmt.Errorf("[freelist rebuild] copy file failed: %w", err) - } - - // bboltDB automatically reconstruct & sync freelist in write mode. - db, err := bolt.Open(cfg.outputDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) - if err != nil { - return fmt.Errorf("[freelist rebuild] open db file failed: %w", err) - } - err = db.Close() - if err != nil { - return fmt.Errorf("[freelist rebuild] close db file failed: %w", err) - } - - fmt.Fprintf(os.Stdout, "The freelist was successfully rebuilt.\n") - return nil -} - func readMetaPage(path string) (*common.Meta, error) { _, activeMetaPageId, err := guts_cli.GetRootPage(path) if err != nil { diff --git a/cmd/bbolt/command_surgery_freelist.go b/cmd/bbolt/command_surgery_freelist.go new file mode 100644 index 000000000..81e2ea9a9 --- /dev/null +++ b/cmd/bbolt/command_surgery_freelist.go @@ -0,0 +1,128 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/surgeon" +) + +func newSurgeryFreelistCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "freelist ", + Short: "freelist related surgery commands", + } + + cmd.AddCommand(newSurgeryFreelistAbandonCommand()) + cmd.AddCommand(newSurgeryFreelistRebuildCommand()) + + return cmd +} + +func newSurgeryFreelistAbandonCommand() *cobra.Command { + var o surgeryBaseOptions + abandonFreelistCmd := &cobra.Command{ + Use: "abandon [options]", + Short: "Abandon the freelist from both meta pages", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Validate(); err != nil { + return err + } + return surgeryFreelistAbandonFunc(args[0], o) + }, + } + o.AddFlags(abandonFreelistCmd.Flags()) + + return abandonFreelistCmd +} + +func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryBaseOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { + return fmt.Errorf("[freelist abandon] copy file failed: %w", err) + } + + if err := surgeon.ClearFreelist(cfg.outputDBFilePath); err != nil { + return fmt.Errorf("abandom-freelist command failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "The freelist was abandoned in both meta pages.\nIt may cause some delay on next startup because bbolt needs to scan the whole db to reconstruct the free list.\n") + return nil +} + +func newSurgeryFreelistRebuildCommand() *cobra.Command { + var o surgeryBaseOptions + rebuildFreelistCmd := &cobra.Command{ + Use: "rebuild [options]", + Short: "Rebuild the freelist", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Validate(); err != nil { + return err + } + return surgeryFreelistRebuildFunc(args[0], o) + }, + } + o.AddFlags(rebuildFreelistCmd.Flags()) + + return rebuildFreelistCmd +} + +func surgeryFreelistRebuildFunc(srcDBPath string, cfg surgeryBaseOptions) error { + // Ensure source file exists. + fi, err := checkSourceDBPath(srcDBPath) + if err != nil { + return err + } + + // make sure the freelist isn't present in the file. + meta, err := readMetaPage(srcDBPath) + if err != nil { + return err + } + if meta.IsFreelistPersisted() { + return ErrSurgeryFreelistAlreadyExist + } + + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { + return fmt.Errorf("[freelist rebuild] copy file failed: %w", err) + } + + // bboltDB automatically reconstruct & sync freelist in write mode. + db, err := bolt.Open(cfg.outputDBFilePath, fi.Mode(), &bolt.Options{NoFreelistSync: false}) + if err != nil { + return fmt.Errorf("[freelist rebuild] open db file failed: %w", err) + } + err = db.Close() + if err != nil { + return fmt.Errorf("[freelist rebuild] close db file failed: %w", err) + } + + fmt.Fprintf(os.Stdout, "The freelist was successfully rebuilt.\n") + return nil +} diff --git a/cmd/bbolt/command_surgery_freelist_test.go b/cmd/bbolt/command_surgery_freelist_test.go new file mode 100644 index 000000000..87f274760 --- /dev/null +++ b/cmd/bbolt/command_surgery_freelist_test.go @@ -0,0 +1,103 @@ +package main_test + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" +) + +func TestSurgery_Freelist_Abandon(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "freelist", "abandon", srcPath, + "--output", output, + }) + err := rootCmd.Execute() + require.NoError(t, err) + + meta0 := loadMetaPage(t, output, 0) + assert.Equal(t, common.PgidNoFreelist, meta0.Freelist()) + meta1 := loadMetaPage(t, output, 1) + assert.Equal(t, common.PgidNoFreelist, meta1.Freelist()) +} + +func TestSurgery_Freelist_Rebuild(t *testing.T) { + testCases := []struct { + name string + hasFreelist bool + expectedError error + }{ + { + name: "normal operation", + hasFreelist: false, + expectedError: nil, + }, + { + name: "already has freelist", + hasFreelist: true, + expectedError: main.ErrSurgeryFreelistAlreadyExist, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{ + PageSize: pageSize, + NoFreelistSync: !tc.hasFreelist, + }) + srcPath := db.Path() + + err := db.Update(func(tx *bolt.Tx) error { + // do nothing + return nil + }) + require.NoError(t, err) + + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + // Verify the freelist isn't synced in the beginning + meta := readMetaPage(t, srcPath) + if tc.hasFreelist { + if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { + t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) + } + } else { + require.Equal(t, common.PgidNoFreelist, meta.Freelist()) + } + + // Execute `surgery freelist rebuild` command + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "freelist", "rebuild", srcPath, + "--output", output, + }) + err = rootCmd.Execute() + require.Equal(t, tc.expectedError, err) + + if tc.expectedError == nil { + // Verify the freelist has already been rebuilt. + meta = readMetaPage(t, output) + if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { + t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) + } + } + }) + } +} diff --git a/cmd/bbolt/command_surgery_test.go b/cmd/bbolt/command_surgery_test.go index 7e131e420..dc8bdabb9 100644 --- a/cmd/bbolt/command_surgery_test.go +++ b/cmd/bbolt/command_surgery_test.go @@ -551,130 +551,6 @@ func testSurgeryClearPageElementsWithOverflow(t *testing.T, startIdx, endIdx int compareDataAfterClearingElement(t, srcPath, output, pageId, false, startIdx, endIdx) } -func TestSurgery_Freelist_Abandon(t *testing.T) { - pageSize := 4096 - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) - srcPath := db.Path() - - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - - rootCmd := main.NewRootCommand() - output := filepath.Join(t.TempDir(), "db") - rootCmd.SetArgs([]string{ - "surgery", "freelist", "abandon", srcPath, - "--output", output, - }) - err := rootCmd.Execute() - require.NoError(t, err) - - meta0 := loadMetaPage(t, output, 0) - assert.Equal(t, common.PgidNoFreelist, meta0.Freelist()) - meta1 := loadMetaPage(t, output, 1) - assert.Equal(t, common.PgidNoFreelist, meta1.Freelist()) -} - -func loadMetaPage(t *testing.T, dbPath string, pageID uint64) *common.Meta { - _, buf, err := guts_cli.ReadPage(dbPath, 0) - require.NoError(t, err) - return common.LoadPageMeta(buf) -} - -func TestSurgery_Freelist_Rebuild(t *testing.T) { - testCases := []struct { - name string - hasFreelist bool - expectedError error - }{ - { - name: "normal operation", - hasFreelist: false, - expectedError: nil, - }, - { - name: "already has freelist", - hasFreelist: true, - expectedError: main.ErrSurgeryFreelistAlreadyExist, - }, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - pageSize := 4096 - db := btesting.MustCreateDBWithOption(t, &bolt.Options{ - PageSize: pageSize, - NoFreelistSync: !tc.hasFreelist, - }) - srcPath := db.Path() - - err := db.Update(func(tx *bolt.Tx) error { - // do nothing - return nil - }) - require.NoError(t, err) - - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - - // Verify the freelist isn't synced in the beginning - meta := readMetaPage(t, srcPath) - if tc.hasFreelist { - if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { - t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) - } - } else { - require.Equal(t, common.PgidNoFreelist, meta.Freelist()) - } - - // Execute `surgery freelist rebuild` command - rootCmd := main.NewRootCommand() - output := filepath.Join(t.TempDir(), "db") - rootCmd.SetArgs([]string{ - "surgery", "freelist", "rebuild", srcPath, - "--output", output, - }) - err = rootCmd.Execute() - require.Equal(t, tc.expectedError, err) - - if tc.expectedError == nil { - // Verify the freelist has already been rebuilt. - meta = readMetaPage(t, output) - if meta.Freelist() <= 1 || meta.Freelist() >= meta.Pgid() { - t.Fatalf("freelist (%d) isn't in the valid range (1, %d)", meta.Freelist(), meta.Pgid()) - } - } - }) - } -} - -func readMetaPage(t *testing.T, path string) *common.Meta { - _, activeMetaPageId, err := guts_cli.GetRootPage(path) - require.NoError(t, err) - _, buf, err := guts_cli.ReadPage(path, uint64(activeMetaPageId)) - require.NoError(t, err) - return common.LoadPageMeta(buf) -} - -func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { - dbFile, err := os.Open(path) - require.NoError(t, err) - defer dbFile.Close() - - fi, err := dbFile.Stat() - require.NoError(t, err) - require.GreaterOrEqual(t, fi.Size(), int64((pageId+1)*pageSize)) - - buf := make([]byte, pageSize) - byteRead, err := dbFile.ReadAt(buf, int64(pageId*pageSize)) - require.NoError(t, err) - require.Equal(t, pageSize, byteRead) - - return buf -} - -func pageDataWithoutPageId(buf []byte) []byte { - return buf[8:] -} - func TestSurgeryRequiredFlags(t *testing.T) { errMsgFmt := `required flag(s) "%s" not set` testCases := []struct { diff --git a/cmd/bbolt/utils_test.go b/cmd/bbolt/utils_test.go new file mode 100644 index 000000000..5ea11b28a --- /dev/null +++ b/cmd/bbolt/utils_test.go @@ -0,0 +1,46 @@ +package main_test + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" +) + +func loadMetaPage(t *testing.T, dbPath string, pageID uint64) *common.Meta { + _, buf, err := guts_cli.ReadPage(dbPath, 0) + require.NoError(t, err) + return common.LoadPageMeta(buf) +} + +func readMetaPage(t *testing.T, path string) *common.Meta { + _, activeMetaPageId, err := guts_cli.GetRootPage(path) + require.NoError(t, err) + _, buf, err := guts_cli.ReadPage(path, uint64(activeMetaPageId)) + require.NoError(t, err) + return common.LoadPageMeta(buf) +} + +func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { + dbFile, err := os.Open(path) + require.NoError(t, err) + defer dbFile.Close() + + fi, err := dbFile.Stat() + require.NoError(t, err) + require.GreaterOrEqual(t, fi.Size(), int64((pageId+1)*pageSize)) + + buf := make([]byte, pageSize) + byteRead, err := dbFile.ReadAt(buf, int64(pageId*pageSize)) + require.NoError(t, err) + require.Equal(t, pageSize, byteRead) + + return buf +} + +func pageDataWithoutPageId(buf []byte) []byte { + return buf[8:] +} From daff0306460a3714075860bf1f5551153dc93170 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 16 May 2023 14:56:40 -0400 Subject: [PATCH 087/439] Update Tx.Commit documentation Signed-off-by: Cenk Alti --- tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx.go b/tx.go index 5b8e25b8b..a8e4d9d0b 100644 --- a/tx.go +++ b/tx.go @@ -136,7 +136,7 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. func (tx *Tx) Commit() error { From 6ec43f8da3138b746704b000478deddd81faa648 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 16 May 2023 16:04:31 -0400 Subject: [PATCH 088/439] Add bucket retrieve example to README Signed-off-by: Cenk Alti --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 8c7c70658..052e9ab10 100644 --- a/README.md +++ b/README.md @@ -296,6 +296,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return fmt.Errorf("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can From b3df07c58c12bd44e794fbdab2901e0d03b56f38 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 16 May 2023 16:37:36 -0400 Subject: [PATCH 089/439] Remove unnecessary masking Signed-off-by: Cenk Alti --- bolt_windows.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bolt_windows.go b/bolt_windows.go index 020f1a123..ec21ecb85 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -72,7 +72,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. From b871a9aa7c68886c90a2387d000b5acdee97ebd4 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 16 May 2023 17:32:42 -0400 Subject: [PATCH 090/439] Rename aix and solaris os specific files Signed-off-by: Cenk Alti --- bolt_unix_aix.go => bolt_aix.go | 0 bolt_unix_solaris.go => bolt_solaris.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename bolt_unix_aix.go => bolt_aix.go (100%) rename bolt_unix_solaris.go => bolt_solaris.go (100%) diff --git a/bolt_unix_aix.go b/bolt_aix.go similarity index 100% rename from bolt_unix_aix.go rename to bolt_aix.go diff --git a/bolt_unix_solaris.go b/bolt_solaris.go similarity index 100% rename from bolt_unix_solaris.go rename to bolt_solaris.go From 05286ad2af9e2f4ceb3bd2c4e38a5835d010a0ca Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Wed, 17 May 2023 15:17:51 -0400 Subject: [PATCH 091/439] Do not create db file if opened read-only Signed-off-by: Cenk Alti --- db.go | 3 ++- db_test.go | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/db.go b/db.go index 1d7cd137f..4b8a4b82d 100644 --- a/db.go +++ b/db.go @@ -196,6 +196,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -205,7 +206,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Open data file and separate sync handler for metadata writes. var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() return nil, err } diff --git a/db_test.go b/db_test.go index 153de475e..1b103676f 100644 --- a/db_test.go +++ b/db_test.go @@ -562,6 +562,12 @@ func TestDB_Open_ReadOnly(t *testing.T) { } } +func TestDB_Open_ReadOnly_NoCreate(t *testing.T) { + f := filepath.Join(t.TempDir(), "db") + _, err := bolt.Open(f, 0666, &bolt.Options{ReadOnly: true}) + require.ErrorIs(t, err, os.ErrNotExist) +} + // TestOpen_BigPage checks the database uses bigger pages when // changing PageSize. func TestOpen_BigPage(t *testing.T) { From a78b0c40ed2698e31c749a8c73db5e9bbf6b2424 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 19 May 2023 19:33:21 +0800 Subject: [PATCH 092/439] set page flags directly instead of XOR the value Signed-off-by: Benjamin Wang --- freelist.go | 2 +- internal/common/meta.go | 2 +- internal/common/page.go | 24 ++++++++++-------------- 3 files changed, 12 insertions(+), 16 deletions(-) diff --git a/freelist.go b/freelist.go index 81cb1fd01..2b09e7626 100644 --- a/freelist.go +++ b/freelist.go @@ -302,7 +302,7 @@ func (f *freelist) write(p *common.Page) error { // Combine the old free pgids and pgids waiting on an open transaction. // Update the header flag. - p.FlagsXOR(common.FreelistPageFlag) + p.SetFlags(common.FreelistPageFlag) // The page.count can only hold up to 64k elements so if we overflow that // number then we handle it by putting the size in the first element. diff --git a/internal/common/meta.go b/internal/common/meta.go index b97949a57..4517d3716 100644 --- a/internal/common/meta.go +++ b/internal/common/meta.go @@ -49,7 +49,7 @@ func (m *Meta) Write(p *Page) { // Page id is either going to be 0 or 1 which we can determine by the transaction ID. p.id = Pgid(m.txid % 2) - p.flags |= MetaPageFlag + p.SetFlags(MetaPageFlag) // Calculate the checksum. m.checksum = m.Sum64() diff --git a/internal/common/page.go b/internal/common/page.go index 504feb8f3..808484c19 100644 --- a/internal/common/page.go +++ b/internal/common/page.go @@ -58,19 +58,19 @@ func (p *Page) Typ() string { } func (p *Page) IsBranchPage() bool { - return p.flags&BranchPageFlag != 0 + return p.flags == BranchPageFlag } func (p *Page) IsLeafPage() bool { - return p.flags&LeafPageFlag != 0 + return p.flags == LeafPageFlag } func (p *Page) IsMetaPage() bool { - return p.flags&MetaPageFlag != 0 + return p.flags == MetaPageFlag } func (p *Page) IsFreelistPage() bool { - return p.flags&FreelistPageFlag != 0 + return p.flags == FreelistPageFlag } // Meta returns a pointer to the metadata section of the page. @@ -81,10 +81,10 @@ func (p *Page) Meta() *Meta { func (p *Page) FastCheck(id Pgid) { Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) // Only one flag of page-type can be set. - Assert(p.flags == BranchPageFlag || - p.flags == LeafPageFlag || - p.flags == MetaPageFlag || - p.flags == FreelistPageFlag, + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), "page %v: has unexpected type/flags: %x", p.id, p.flags) } @@ -123,7 +123,7 @@ func (p *Page) BranchPageElements() []branchPageElement { } func (p *Page) FreelistPageCount() (int, int) { - Assert(p.flags == FreelistPageFlag, fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) // If the page.count is at the max uint16 value (64k) then it's considered // an overflow and the size of the freelist is stored as the first element. @@ -141,7 +141,7 @@ func (p *Page) FreelistPageCount() (int, int) { } func (p *Page) FreelistPageIds() []Pgid { - Assert(p.flags == FreelistPageFlag, fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) idx, count := p.FreelistPageCount() @@ -185,10 +185,6 @@ func (p *Page) SetFlags(v uint16) { p.flags = v } -func (p *Page) FlagsXOR(v uint16) { - p.flags |= v -} - func (p *Page) Count() uint16 { return p.count } From b1760abba964198d8477b930ec8c38e4f37d1350 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 22 May 2023 15:01:52 +0000 Subject: [PATCH 093/439] build(deps): Bump github.com/stretchr/testify from 1.8.2 to 1.8.3 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.2 to 1.8.3. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.2...v1.8.3) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 11 ++--------- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index a9e5183c6..4ed94ece5 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,8 @@ go 1.19 require ( github.com/spf13/cobra v1.7.0 - github.com/stretchr/testify v1.8.2 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.8.3 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.2.0 golang.org/x/sys v0.8.0 @@ -14,6 +15,5 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 6eaa074c4..b6e98b0db 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,4 @@ github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -11,13 +10,8 @@ github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= @@ -26,6 +20,5 @@ golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= From dbbfc0ec548cc4925a1f1b67db6ca2f0e1684d6e Mon Sep 17 00:00:00 2001 From: Mechiel Lukkien Date: Mon, 22 May 2023 14:21:39 +0200 Subject: [PATCH 094/439] add github.com/mjl-/bstore to list of projects using bbolt Signed-off-by: Mechiel Lukkien --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 052e9ab10..f96474e02 100644 --- a/README.md +++ b/README.md @@ -938,6 +938,7 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. From 3501667d555d96e4e1bc0e215b613fcfe2f0433c Mon Sep 17 00:00:00 2001 From: Mechiel Lukkien Date: Tue, 23 May 2023 09:17:34 +0200 Subject: [PATCH 095/439] Update import paths used to pkg.go.dev and go report card They pointed to github.com/etcd-io/bbolt, which only returns the bbolt version from before the module path change. Some time ago, I caught myself using the older github.com import path, and I suspect it was because of these links. This changes the link from godoc.org to pkg.go.dev, and uses the badge from https://pkg.go.dev/badge/. Signed-off-by: Mechiel Lukkien --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 052e9ab10..492b9b376 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) From 1eaf75a9d9caef18160dec18cc60dc151d527e60 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 24 May 2023 13:32:11 +0800 Subject: [PATCH 096/439] test: add test case `TestConcurrentRepeatableRead` to verify repeatable read Signed-off-by: Benjamin Wang --- concurrent_test.go | 203 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 197 insertions(+), 6 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index cfedcd7c6..d58add5d4 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -1,6 +1,7 @@ package bbolt_test import ( + "bytes" crand "crypto/rand" "encoding/hex" "encoding/json" @@ -9,7 +10,6 @@ import ( mrand "math/rand" "os" "path/filepath" - "reflect" "sort" "strings" "sync" @@ -67,7 +67,7 @@ TestConcurrentReadAndWrite verifies: following reading transactions (with txid >= previous writing txid). 3. The txid should never decrease. */ -func TestConcurrentReadAndWrite(t *testing.T) { +func TestConcurrentGenericReadAndWrite(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } @@ -216,7 +216,21 @@ func concurrentReadAndWrite(t *testing.T, func mustCreateDB(t *testing.T, o *bolt.Options) *bolt.DB { f := filepath.Join(t.TempDir(), "db") - t.Logf("Opening bbolt DB at: %s", f) + return mustOpenDB(t, f, o) +} + +func mustReOpenDB(t *testing.T, db *bolt.DB, o *bolt.Options) *bolt.DB { + f := db.Path() + + t.Logf("CLosing bbolt DB at: %s", f) + err := db.Close() + require.NoError(t, err) + + return mustOpenDB(t, f, o) +} + +func mustOpenDB(t *testing.T, dbPath string, o *bolt.Options) *bolt.DB { + t.Logf("Opening bbolt DB at: %s", dbPath) if o == nil { o = bolt.DefaultOptions } @@ -228,7 +242,7 @@ func mustCreateDB(t *testing.T, o *bolt.Options) *bolt.DB { o.FreelistType = freelistType - db, err := bolt.Open(f, 0666, o) + db, err := bolt.Open(dbPath, 0666, o) require.NoError(t, err) return db @@ -409,7 +423,7 @@ func executeRead(db *bolt.DB, bucket []byte, key []byte, readInterval duration) time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) val := b.Get(key) - if !reflect.DeepEqual(initialVal, val) { + if !bytes.Equal(initialVal, val) { return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", string(key), formatBytes(initialVal), formatBytes(val)) } @@ -713,7 +727,7 @@ func validateSequential(rs historyRecords) error { } else if rec.OperationType == Delete { delete(lastWriteKeyValueMap, bk) } else { - if !reflect.DeepEqual(v.Value, rec.Value) { + if !bytes.Equal(v.Value, rec.Value) { return fmt.Errorf("readOperation[txid: %d, bucket: %s, key: %s] read %x, \nbut writer[txid: %d] wrote %x", rec.Txid, rec.Bucket, rec.Key, rec.Value, v.Txid, v.Value) } @@ -738,3 +752,180 @@ func validateSequential(rs historyRecords) error { return nil } + +func TestConcurrentRepeatableRead(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + testCases := []struct { + name string + noFreelistSync bool + freelistType bolt.FreelistType + }{ + // [array] freelist + { + name: "sync array freelist", + noFreelistSync: false, + freelistType: bolt.FreelistArrayType, + }, + { + name: "not sync array freelist", + noFreelistSync: true, + freelistType: bolt.FreelistArrayType, + }, + // [map] freelist + { + name: "sync map freelist", + noFreelistSync: false, + freelistType: bolt.FreelistMapType, + }, + { + name: "not sync map freelist", + noFreelistSync: true, + freelistType: bolt.FreelistMapType, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + + t.Log("Preparing db.") + var ( + bucket = []byte("data") + key = []byte("mykey") + + option = &bolt.Options{ + PageSize: 4096, + NoFreelistSync: tc.noFreelistSync, + FreelistType: tc.freelistType, + } + ) + + db := mustCreateDB(t, option) + defer func() { + db.Close() + }() + + // Create lots of K/V to allocate some pages + err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists(bucket) + if err != nil { + return err + } + for i := 0; i < 1000; i++ { + k := fmt.Sprintf("key_%d", i) + if err := b.Put([]byte(k), make([]byte, 1024)); err != nil { + return err + } + } + return nil + }) + require.NoError(t, err) + + // Remove all K/V to create some free pages + err = db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) + for i := 0; i < 1000; i++ { + k := fmt.Sprintf("key_%d", i) + if err := b.Delete([]byte(k)); err != nil { + return err + } + } + return b.Put(key, []byte("randomValue")) + }) + require.NoError(t, err) + + db = mustReOpenDB(t, db, option) + + var ( + wg sync.WaitGroup + longRunningReaderCount = 10 + stopCh = make(chan struct{}) + errCh = make(chan error, longRunningReaderCount) + readInterval = duration{5 * time.Millisecond, 10 * time.Millisecond} + + writeOperationCountInBetween = 5 + writeBytes = bytesRange{10, 20} + + testDuration = 10 * time.Second + ) + + for i := 0; i < longRunningReaderCount; i++ { + readWorkerName := fmt.Sprintf("reader_%d", i) + t.Logf("Starting long running read operation: %s", readWorkerName) + wg.Add(1) + go func() { + defer wg.Done() + rErr := executeLongRunningRead(t, readWorkerName, db, bucket, key, readInterval, stopCh) + if rErr != nil { + errCh <- rErr + } + }() + time.Sleep(500 * time.Millisecond) + + t.Logf("Perform %d write operations after starting a long running read operation", writeOperationCountInBetween) + for j := 0; j < writeOperationCountInBetween; j++ { + _, err := executeWrite(db, bucket, key, writeBytes, 0) + require.NoError(t, err) + } + } + + t.Log("Perform lots of write operations to check whether the long running read operations will read dirty data") + wg.Add(1) + go func() { + defer wg.Done() + cnt := longRunningReaderCount * writeOperationCountInBetween + for i := 0; i < cnt; i++ { + select { + case <-stopCh: + return + default: + } + _, err := executeWrite(db, bucket, key, writeBytes, 0) + require.NoError(t, err) + } + }() + + t.Log("Waiting for result") + select { + case err := <-errCh: + close(stopCh) + t.Errorf("Detected dirty read: %v", err) + case <-time.After(testDuration): + close(stopCh) + } + + wg.Wait() + }) + } +} + +func executeLongRunningRead(t *testing.T, name string, db *bolt.DB, bucket []byte, key []byte, readInterval duration, stopCh chan struct{}) error { + err := db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucket) + + initialVal := b.Get(key) + + for { + select { + case <-stopCh: + t.Logf("%q finished.", name) + return nil + default: + } + + time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) + val := b.Get(key) + + if !bytes.Equal(initialVal, val) { + dirtyReadErr := fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", + string(key), formatBytes(initialVal), formatBytes(val)) + return dirtyReadErr + } + } + }) + + return err +} From 99071629bf61b9d79a26c9e58245ec707a5f73a1 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 25 May 2023 10:59:05 +0800 Subject: [PATCH 097/439] test: add comments to describe TestConcurrentRepeatableRead Signed-off-by: Benjamin Wang --- concurrent_test.go | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index d58add5d4..8e93acab4 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -60,7 +60,7 @@ type concurrentConfig struct { } /* -TestConcurrentReadAndWrite verifies: +TestConcurrentGenericReadAndWrite verifies: 1. Repeatable read: a read transaction should always see the same data view during its lifecycle. 2. Any data written by a writing transaction should be visible to any @@ -222,7 +222,7 @@ func mustCreateDB(t *testing.T, o *bolt.Options) *bolt.DB { func mustReOpenDB(t *testing.T, db *bolt.DB, o *bolt.Options) *bolt.DB { f := db.Path() - t.Logf("CLosing bbolt DB at: %s", f) + t.Logf("Closing bbolt DB at: %s", f) err := db.Close() require.NoError(t, err) @@ -753,6 +753,15 @@ func validateSequential(rs historyRecords) error { return nil } +/* +TestConcurrentRepeatableRead verifies repeatable read. The case +intentionally creates a scenario that read and write transactions +are interleaved. It performs several writing operations after starting +each long-running read transaction to ensure it has a larger txid +than previous read transaction. It verifies that bbolt correctly +releases free pages, and will not pollute (e.g. prematurely release) +any pages which are still being used by any read transaction. +*/ func TestConcurrentRepeatableRead(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") @@ -805,6 +814,11 @@ func TestConcurrentRepeatableRead(t *testing.T) { db := mustCreateDB(t, option) defer func() { + // The db will be reopened later, so put `db.Close()` in a function + // to avoid premature evaluation of `db`. Note that the execution + // of a deferred function is deferred to the moment the surrounding + // function returns, but the function value and parameters to the + // call are evaluated as usual and saved anew. db.Close() }() @@ -837,6 +851,11 @@ func TestConcurrentRepeatableRead(t *testing.T) { }) require.NoError(t, err) + // bbolt will not release free pages directly after committing + // a writing transaction; instead all pages freed are putting + // into a pending list. Accordingly, the free pages might not + // be able to be reused by following writing transactions. So + // we reopen the db to completely release all free pages. db = mustReOpenDB(t, db, option) var ( From d19b7c8fbaf71df2bc2c7c9ffab554125aa84a47 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 26 May 2023 15:00:45 +0800 Subject: [PATCH 098/439] test: fix 'loadMetaPage' always read the first meta page Signed-off-by: Benjamin Wang --- cmd/bbolt/utils_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/bbolt/utils_test.go b/cmd/bbolt/utils_test.go index 5ea11b28a..7a7fc7c92 100644 --- a/cmd/bbolt/utils_test.go +++ b/cmd/bbolt/utils_test.go @@ -11,7 +11,7 @@ import ( ) func loadMetaPage(t *testing.T, dbPath string, pageID uint64) *common.Meta { - _, buf, err := guts_cli.ReadPage(dbPath, 0) + _, buf, err := guts_cli.ReadPage(dbPath, pageID) require.NoError(t, err) return common.LoadPageMeta(buf) } From fddd3ac7f799cbb22871ad0760ffbf28646d204c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 26 May 2023 16:13:13 +0800 Subject: [PATCH 099/439] remove field 'filesz' from DB There is no reason to maintain a file size. It's only used when mapping the db or growing the db; obviously neither are hot path. Signed-off-by: Benjamin Wang --- db.go | 33 +++++++++++++++++++++------------ db_test.go | 3 ++- 2 files changed, 23 insertions(+), 13 deletions(-) diff --git a/db.go b/db.go index 4b8a4b82d..4a73ce1d1 100644 --- a/db.go +++ b/db.go @@ -119,7 +119,6 @@ type DB struct { dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int - filesz int // current on disk file size meta0 *common.Meta meta1 *common.Meta pageSize int @@ -402,21 +401,29 @@ func (db *DB) hasSyncedFreelist() bool { return db.meta().Freelist() != common.PgidNoFreelist } +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil +} + // mmap opens the underlying memory-mapped file and initializes the meta references. // minsz is the minimum size that the new mmap can be. func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() + // Ensure the size is at least the minimum size. + fileSize, err := db.fileSize() if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") + return err } - - // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) var size = fileSize if size < minsz { size = minsz @@ -610,7 +617,6 @@ func (db *DB) init() error { if err := fdatasync(db); err != nil { return err } - db.filesz = len(buf) return nil } @@ -1120,7 +1126,11 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + fileSize, err := db.fileSize() + if err != nil { + return err + } + if sz <= fileSize { return nil } @@ -1147,13 +1157,12 @@ func (db *DB) grow(sz int) error { } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } diff --git a/db_test.go b/db_test.go index 1b103676f..5700f8647 100644 --- a/db_test.go +++ b/db_test.go @@ -11,6 +11,7 @@ import ( "os" "path/filepath" "reflect" + "strings" "sync" "testing" "time" @@ -443,7 +444,7 @@ func TestOpen_FileTooSmall(t *testing.T) { } _, err = bolt.Open(path, 0666, nil) - if err == nil || err.Error() != "file size too small" { + if err == nil || !strings.Contains(err.Error(), "file size too small") { t.Fatalf("unexpected error: %s", err) } } From 03e5cc7958c042a1fe88eb0acd0e0e814c08e34b Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Tue, 30 May 2023 10:04:05 -0400 Subject: [PATCH 100/439] Adjust rebalance threshold with Bucket.FillPercent Signed-off-by: Cenk Alti --- node.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node.go b/node.go index 2f4d46baf..fe67c3c89 100644 --- a/node.go +++ b/node.go @@ -371,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } From 3b3316810d521d4c3745ea94901ad38fe81ed1c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:01:14 +0000 Subject: [PATCH 101/439] build(deps): Bump golangci/golangci-lint-action from 3.4.0 to 3.5.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.4.0 to 3.5.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/08e2f20817b15149a52b5b3ebe7de50aff2ba8c5...5f1fec7010f6ae3b84ea4f7b2129beb8639b564f) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 90eed6e77..8c26947df 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -45,7 +45,7 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0 + uses: golangci/golangci-lint-action@5f1fec7010f6ae3b84ea4f7b2129beb8639b564f # v3.5.0 test-windows: strategy: @@ -82,7 +82,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@08e2f20817b15149a52b5b3ebe7de50aff2ba8c5 # v3.4.0 + uses: golangci/golangci-lint-action@5f1fec7010f6ae3b84ea4f7b2129beb8639b564f # v3.5.0 coverage: needs: ["test-linux", "test-windows"] From 1b3c29385aa8310af5b4a022be7c677d3f658584 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Jun 2023 15:01:26 +0000 Subject: [PATCH 102/439] build(deps): Bump github.com/stretchr/testify from 1.8.3 to 1.8.4 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.3 to 1.8.4. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.3...v1.8.4) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4ed94ece5..2eb58267d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.19 require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.3 + github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.2.0 golang.org/x/sys v0.8.0 diff --git a/go.sum b/go.sum index b6e98b0db..867c96353 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= From c8cb5273d9c7f0e4e96a79d11b432b9803d0f43f Mon Sep 17 00:00:00 2001 From: charles-chenzz Date: Thu, 8 Jun 2023 20:00:09 +0800 Subject: [PATCH 103/439] update go to latest patch release 1.19.10 Signed-off-by: charles-chenzz --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 5c954fa8e..bb0af19fa 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.9" + go-version: "1.19.10" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 8c26947df..7d30e9cc2 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.9" + go-version: "1.19.10" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.9" + go-version: "1.19.10" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.9" + go-version: "1.19.10" - run: make coverage From df4062a4343710b6667842631da29f06d7cd123b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Jun 2023 15:03:11 +0000 Subject: [PATCH 104/439] build(deps): Bump golangci/golangci-lint-action from 3.5.0 to 3.6.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.5.0 to 3.6.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/5f1fec7010f6ae3b84ea4f7b2129beb8639b564f...639cd343e1d3b897ff35927a75193d57cfcba299) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7d30e9cc2..1ab1be14f 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -45,7 +45,7 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@5f1fec7010f6ae3b84ea4f7b2129beb8639b564f # v3.5.0 + uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 test-windows: strategy: @@ -82,7 +82,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@5f1fec7010f6ae3b84ea4f7b2129beb8639b564f # v3.5.0 + uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 coverage: needs: ["test-linux", "test-windows"] From 3f7812656f9a07c1c78658ec32110c6afae73e90 Mon Sep 17 00:00:00 2001 From: Cenk Alti Date: Sun, 18 Jun 2023 20:27:01 -0400 Subject: [PATCH 105/439] Update Bucket.Get documentation Signed-off-by: Cenk Alti --- bucket.go | 1 + 1 file changed, 1 insertion(+) diff --git a/bucket.go b/bucket.go index b1c1b8586..6a3c48ae8 100644 --- a/bucket.go +++ b/bucket.go @@ -248,6 +248,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) From 11a0cdf6a0fb8f12449eff6b9778f6c2f1f53923 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 15:01:11 +0000 Subject: [PATCH 106/439] build(deps): Bump golang.org/x/sys from 0.8.0 to 0.9.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.8.0 to 0.9.0. - [Commits](https://github.com/golang/sys/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 2eb58267d..8772fa830 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.2.0 - golang.org/x/sys v0.8.0 + golang.org/x/sys v0.9.0 ) require ( diff --git a/go.sum b/go.sum index 867c96353..d83748ce1 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 4ff523cf64a10596a1582eb8a9d9e6a8f66bd7cc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 19 Jun 2023 18:06:54 +0000 Subject: [PATCH 107/439] build(deps): Bump golang.org/x/sync from 0.2.0 to 0.3.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.2.0 to 0.3.0. - [Commits](https://github.com/golang/sync/compare/v0.2.0...v0.3.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8772fa830..9501166f7 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.2.0 + golang.org/x/sync v0.3.0 golang.org/x/sys v0.9.0 ) diff --git a/go.sum b/go.sum index d83748ce1..735ff6e72 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 5773a82712125fcc8079e5f0111e956796921db2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 14:44:36 +0000 Subject: [PATCH 108/439] build(deps): Bump golang.org/x/sys from 0.9.0 to 0.10.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.9.0 to 0.10.0. - [Commits](https://github.com/golang/sys/compare/v0.9.0...v0.10.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 9501166f7..6b86a1680 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.9.0 + golang.org/x/sys v0.10.0 ) require ( diff --git a/go.sum b/go.sum index 735ff6e72..ea91fc23b 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= -golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From e1112bf01a37ea1782cd72b0445d48f1262d0ad9 Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 13 Jul 2023 21:14:38 +1200 Subject: [PATCH 109/439] Updated go to latest patch release 1.19.11. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index bb0af19fa..09820f3a4 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.10" + go-version: "1.19.11" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 1ab1be14f..16ad7e664 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.10" + go-version: "1.19.11" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.10" + go-version: "1.19.11" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.10" + go-version: "1.19.11" - run: make coverage From df89d0a3834158ca2ec051df19ee82b44fbe7273 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 13 Jul 2023 16:33:07 +0100 Subject: [PATCH 110/439] Add binary file /bbolt into .gitignore Signed-off-by: Benjamin Wang --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9fa948ebf..fdc9164cc 100644 --- a/.gitignore +++ b/.gitignore @@ -6,5 +6,6 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt From c1a02c4913c4fc77ada7058ae1016f37b2575335 Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Mon, 17 Jul 2023 09:07:24 +0530 Subject: [PATCH 111/439] updated bbolt stats help. Signed-off-by: ishan16696 --- cmd/bbolt/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 6e2e7310e..026cf8680 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -829,9 +829,9 @@ The following errors can be reported: The page type is not "meta", "leaf", "branch", or "freelist". No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the Bolt project page: +experience corruption, please submit a ticket to the etcd-io/bbolt project page: - https://github.com/boltdb/bolt/issues + https://github.com/etcd-io/bbolt/issues `, "\n") } From 7cab6be2ae419b6ca22762a8402b26670d4e27a0 Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Wed, 26 Jul 2023 00:27:33 +0530 Subject: [PATCH 112/439] Documentation: standardised examples in README.md to use file mode:0600 Signed-off-by: Ishan Tyagi --- README.md | 4 ++-- db.go | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index a23ddf927..254f02785 100644 --- a/README.md +++ b/README.md @@ -101,7 +101,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -664,7 +664,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } diff --git a/db.go b/db.go index 4a73ce1d1..0d35861d8 100644 --- a/db.go +++ b/db.go @@ -164,9 +164,10 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db := &DB{ opened: true, From c0b6749ca4ebdd8bbd9e6c63811fe25b9539c64d Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Sat, 29 Jul 2023 18:06:19 +0530 Subject: [PATCH 113/439] Fix tests to open a bbolt database with file mode:0600 instead of 0666. Signed-off-by: Ishan Tyagi --- bucket_test.go | 6 ++--- cmd/bbolt/main_test.go | 2 +- concurrent_test.go | 2 +- cursor_test.go | 4 ++-- db_test.go | 34 ++++++++++++++-------------- internal/btesting/btesting.go | 4 ++-- tests/failpoint/db_failpoint_test.go | 10 ++++---- tx_test.go | 10 ++++---- 8 files changed, 36 insertions(+), 36 deletions(-) diff --git a/bucket_test.go b/bucket_test.go index 74d85e429..21becb330 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -1885,7 +1885,7 @@ func TestBucket_Delete_Quick(t *testing.T) { func ExampleBucket_Put() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -1928,7 +1928,7 @@ func ExampleBucket_Put() { func ExampleBucket_Delete() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -1986,7 +1986,7 @@ func ExampleBucket_Delete() { func ExampleBucket_ForEach() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index ffed33cfd..dddacd8d0 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -583,7 +583,7 @@ func fillBucket(b *bolt.Bucket, prefix []byte) error { } func chkdb(path string) ([]byte, error) { - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return nil, err } diff --git a/concurrent_test.go b/concurrent_test.go index 8e93acab4..3995c0466 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -242,7 +242,7 @@ func mustOpenDB(t *testing.T, dbPath string, o *bolt.Options) *bolt.DB { o.FreelistType = freelistType - db, err := bolt.Open(dbPath, 0666, o) + db, err := bolt.Open(dbPath, 0600, o) require.NoError(t, err) return db diff --git a/cursor_test.go b/cursor_test.go index 20e661424..42e2cd6c0 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -744,7 +744,7 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { func ExampleCursor() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -798,7 +798,7 @@ func ExampleCursor() { func ExampleCursor_reverse() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } diff --git a/db_test.go b/db_test.go index 5700f8647..3d360857e 100644 --- a/db_test.go +++ b/db_test.go @@ -49,7 +49,7 @@ func TestOpen(t *testing.T) { path := tempfile() defer os.RemoveAll(path) - db, err := bolt.Open(path, 0666, nil) + db, err := bolt.Open(path, 0600, nil) if err != nil { t.Fatal(err) } else if db == nil { @@ -108,7 +108,7 @@ func TestOpen_MultipleGoroutines(t *testing.T) { // Ensure that opening a database with a blank path returns an error. func TestOpen_ErrPathRequired(t *testing.T) { - _, err := bolt.Open("", 0666, nil) + _, err := bolt.Open("", 0600, nil) if err == nil { t.Fatalf("expected error") } @@ -116,7 +116,7 @@ func TestOpen_ErrPathRequired(t *testing.T) { // Ensure that opening a database with a bad path returns an error. func TestOpen_ErrNotExists(t *testing.T) { - _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0666, nil) + _, err := bolt.Open(filepath.Join(tempfile(), "bad-path"), 0600, nil) if err == nil { t.Fatal("expected error") } @@ -138,7 +138,7 @@ func TestOpen_ErrInvalid(t *testing.T) { t.Fatal(err) } - if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrInvalid { + if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrInvalid { t.Fatalf("unexpected error: %s", err) } } @@ -174,7 +174,7 @@ func TestOpen_ErrVersionMismatch(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrVersionMismatch { + if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrVersionMismatch { t.Fatalf("unexpected error: %s", err) } } @@ -210,7 +210,7 @@ func TestOpen_ErrChecksum(t *testing.T) { } // Reopen data file. - if _, err := bolt.Open(path, 0666, nil); err != berrors.ErrChecksum { + if _, err := bolt.Open(path, 0600, nil); err != berrors.ErrChecksum { t.Fatalf("unexpected error: %s", err) } } @@ -366,7 +366,7 @@ func TestOpen_Size_Large(t *testing.T) { } // Reopen database, update, and check size again. - db0, err := bolt.Open(path, 0666, nil) + db0, err := bolt.Open(path, 0600, nil) if err != nil { t.Fatal(err) } @@ -396,7 +396,7 @@ func TestOpen_Check(t *testing.T) { path := tempfile() defer os.RemoveAll(path) - db, err := bolt.Open(path, 0666, nil) + db, err := bolt.Open(path, 0600, nil) if err != nil { t.Fatal(err) } @@ -407,7 +407,7 @@ func TestOpen_Check(t *testing.T) { t.Fatal(err) } - db, err = bolt.Open(path, 0666, nil) + db, err = bolt.Open(path, 0600, nil) if err != nil { t.Fatal(err) } @@ -429,7 +429,7 @@ func TestOpen_FileTooSmall(t *testing.T) { path := tempfile() defer os.RemoveAll(path) - db, err := bolt.Open(path, 0666, nil) + db, err := bolt.Open(path, 0600, nil) if err != nil { t.Fatal(err) } @@ -443,7 +443,7 @@ func TestOpen_FileTooSmall(t *testing.T) { t.Fatal(err) } - _, err = bolt.Open(path, 0666, nil) + _, err = bolt.Open(path, 0600, nil) if err == nil || !strings.Contains(err.Error(), "file size too small") { t.Fatalf("unexpected error: %s", err) } @@ -460,7 +460,7 @@ func TestDB_Open_InitialMmapSize(t *testing.T) { initMmapSize := 1 << 30 // 1GB testWriteSize := 1 << 27 // 134MB - db, err := bolt.Open(path, 0666, &bolt.Options{InitialMmapSize: initMmapSize}) + db, err := bolt.Open(path, 0600, &bolt.Options{InitialMmapSize: initMmapSize}) if err != nil { t.Fatal(err) } @@ -533,7 +533,7 @@ func TestDB_Open_ReadOnly(t *testing.T) { f := db.Path() o := &bolt.Options{ReadOnly: true} - readOnlyDB, err := bolt.Open(f, 0666, o) + readOnlyDB, err := bolt.Open(f, 0600, o) if err != nil { panic(err) } @@ -565,7 +565,7 @@ func TestDB_Open_ReadOnly(t *testing.T) { func TestDB_Open_ReadOnly_NoCreate(t *testing.T) { f := filepath.Join(t.TempDir(), "db") - _, err := bolt.Open(f, 0666, &bolt.Options{ReadOnly: true}) + _, err := bolt.Open(f, 0600, &bolt.Options{ReadOnly: true}) require.ErrorIs(t, err, os.ErrNotExist) } @@ -1348,7 +1348,7 @@ func TestDBUnmap(t *testing.T) { func ExampleDB_Update() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -1388,7 +1388,7 @@ func ExampleDB_Update() { func ExampleDB_View() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -1431,7 +1431,7 @@ func ExampleDB_View() { func ExampleDB_Begin() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index 4477f5f8a..c83369f09 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -56,7 +56,7 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { o.FreelistType = freelistType - db, err := bolt.Open(f, 0666, o) + db, err := bolt.Open(f, 0600, o) require.NoError(t, err) resDB := &DB{ DB: db, @@ -115,7 +115,7 @@ func (db *DB) MustReopen() { panic("Please call Close() before MustReopen()") } db.t.Logf("Reopening bbolt DB at: %s", db.f) - indb, err := bolt.Open(db.Path(), 0666, db.o) + indb, err := bolt.Open(db.Path(), 0600, db.o) require.NoError(db.t, err) db.DB = indb db.strictModeEnabledDefault() diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index b38b16ac1..8a0d2e308 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -22,7 +22,7 @@ func TestFailpoint_MapFail(t *testing.T) { }() f := filepath.Join(t.TempDir(), "db") - _, err = bolt.Open(f, 0666, nil) + _, err = bolt.Open(f, 0600, nil) require.Error(t, err) require.ErrorContains(t, err, "map somehow failed") } @@ -36,14 +36,14 @@ func TestFailpoint_UnmapFail_DbClose(t *testing.T) { err := gofail.Enable("unmapError", `return("unmap somehow failed")`) require.NoError(t, err) - _, err = bolt.Open(f, 0666, nil) + _, err = bolt.Open(f, 0600, nil) require.Error(t, err) require.ErrorContains(t, err, "unmap somehow failed") //disable the error, and try to reopen the db err = gofail.Disable("unmapError") require.NoError(t, err) - db, err := bolt.Open(f, 0666, &bolt.Options{Timeout: 30 * time.Second}) + db, err := bolt.Open(f, 0600, &bolt.Options{Timeout: 30 * time.Second}) require.NoError(t, err) err = db.Close() require.NoError(t, err) @@ -54,7 +54,7 @@ func TestFailpoint_mLockFail(t *testing.T) { require.NoError(t, err) f := filepath.Join(t.TempDir(), "db") - _, err = bolt.Open(f, 0666, &bolt.Options{Mlock: true}) + _, err = bolt.Open(f, 0600, &bolt.Options{Mlock: true}) require.Error(t, err) require.ErrorContains(t, err, "mlock somehow failed") @@ -62,7 +62,7 @@ func TestFailpoint_mLockFail(t *testing.T) { err = gofail.Disable("mlockError") require.NoError(t, err) - _, err = bolt.Open(f, 0666, &bolt.Options{Mlock: true}) + _, err = bolt.Open(f, 0600, &bolt.Options{Mlock: true}) require.NoError(t, err) } diff --git a/tx_test.go b/tx_test.go index ceda3446a..cc59804b2 100644 --- a/tx_test.go +++ b/tx_test.go @@ -37,7 +37,7 @@ func TestTx_Check_ReadOnly(t *testing.T) { t.Fatal(err) } - readOnlyDB, err := bolt.Open(db.Path(), 0666, &bolt.Options{ReadOnly: true}) + readOnlyDB, err := bolt.Open(db.Path(), 0600, &bolt.Options{ReadOnly: true}) if err != nil { t.Fatal(err) } @@ -645,7 +645,7 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) { func TestTx_Rollback(t *testing.T) { for _, isSyncFreelist := range []bool{false, true} { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -798,7 +798,7 @@ func TestTx_releaseRange(t *testing.T) { func ExampleTx_Rollback() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -852,7 +852,7 @@ func ExampleTx_Rollback() { func ExampleTx_CopyFile() { // Open the database. - db, err := bolt.Open(tempfile(), 0666, nil) + db, err := bolt.Open(tempfile(), 0600, nil) if err != nil { log.Fatal(err) } @@ -882,7 +882,7 @@ func ExampleTx_CopyFile() { defer os.Remove(toFile) // Open the cloned database. - db2, err := bolt.Open(toFile, 0666, nil) + db2, err := bolt.Open(toFile, 0600, nil) if err != nil { log.Fatal(err) } From 65759b6c759394c6a7ebef8244d5dbca1f77f4de Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Sat, 29 Jul 2023 18:35:00 +0530 Subject: [PATCH 114/439] Fix bbolt command-line commands to open a bbolt database with file mode:0600 instead of 0666. Fix bbolt command-line command: compact to open a bbolt database with file mode:0400 instead of 0444 Signed-off-by: Ishan Tyagi --- cmd/bbolt/main.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 026cf8680..364b1fe21 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -208,7 +208,7 @@ func (cmd *checkCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ + db, err := bolt.Open(path, 0600, &bolt.Options{ ReadOnly: true, PreLoadFreelist: true, }) @@ -284,7 +284,7 @@ func (cmd *infoCommand) Run(args ...string) error { } // Open the database. - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } @@ -644,7 +644,7 @@ func (cmd *pagesCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ + db, err := bolt.Open(path, 0600, &bolt.Options{ ReadOnly: true, PreLoadFreelist: true, }) @@ -737,7 +737,7 @@ func (cmd *statsCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } @@ -868,7 +868,7 @@ func (cmd *bucketsCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } @@ -929,7 +929,7 @@ func (cmd *keysCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } @@ -1020,7 +1020,7 @@ func (cmd *getCommand) Run(args ...string) error { } // Open database. - db, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true}) + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) if err != nil { return err } @@ -1097,7 +1097,7 @@ func (cmd *benchCommand) Run(args ...string) error { } // Create database. - db, err := bolt.Open(options.Path, 0666, nil) + db, err := bolt.Open(options.Path, 0600, nil) if err != nil { return err } @@ -1652,7 +1652,7 @@ func (cmd *compactCommand) Run(args ...string) (err error) { initialSize := fi.Size() // Open source database. - src, err := bolt.Open(cmd.SrcPath, 0444, &bolt.Options{ReadOnly: true}) + src, err := bolt.Open(cmd.SrcPath, 0400, &bolt.Options{ReadOnly: true}) if err != nil { return err } From 5ddbd0c94e221668fff3f9d234bd6fbb74d9457e Mon Sep 17 00:00:00 2001 From: Marcondes Viana Date: Sat, 29 Jul 2023 17:44:58 -0300 Subject: [PATCH 115/439] tests: add failpoint to simulate lack of disk space Signed-off-by: Marcondes Viana --- tests/failpoint/db_failpoint_test.go | 33 ++++++++++++++++++++++++++++ tx.go | 17 +++++++++----- 2 files changed, 44 insertions(+), 6 deletions(-) diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index 8a0d2e308..c1da5b583 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" gofail "go.etcd.io/gofail/runtime" ) @@ -122,3 +123,35 @@ func TestFailpoint_ResizeFileFail(t *testing.T) { require.NoError(t, err) } + +func TestFailpoint_LackOfDiskSpace(t *testing.T) { + db := btesting.MustCreateDB(t) + + err := gofail.Enable("lackOfDiskSpace", `return("grow somehow failed")`) + require.NoError(t, err) + + tx, err := db.Begin(true) + require.NoError(t, err) + + err = tx.Commit() + require.Error(t, err) + require.ErrorContains(t, err, "grow somehow failed") + + err = tx.Rollback() + require.Error(t, err) + require.ErrorIs(t, err, errors.ErrTxClosed) + + // It should work after disabling the failpoint. + err = gofail.Disable("lackOfDiskSpace") + require.NoError(t, err) + + tx, err = db.Begin(true) + require.NoError(t, err) + + err = tx.Commit() + require.NoError(t, err) + + err = tx.Rollback() + require.Error(t, err) + require.ErrorIs(t, err, errors.ErrTxClosed) +} diff --git a/tx.go b/tx.go index a8e4d9d0b..8b86030c1 100644 --- a/tx.go +++ b/tx.go @@ -1,6 +1,7 @@ package bbolt import ( + "errors" "fmt" "io" "os" @@ -10,7 +11,7 @@ import ( "time" "unsafe" - "go.etcd.io/bbolt/errors" + berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" ) @@ -142,9 +143,9 @@ func (tx *Tx) OnCommit(fn func()) { func (tx *Tx) Commit() error { common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return errors.ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return errors.ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -185,6 +186,10 @@ func (tx *Tx) Commit() error { // If the high water mark has moved up then attempt to grow the database. if tx.meta.Pgid() > opgid { + _ = errors.New("") + // gofail: var lackOfDiskSpace string + // tx.rollback() + // return errors.New(lackOfDiskSpace) if err := tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { tx.rollback() return err @@ -254,7 +259,7 @@ func (tx *Tx) commitFreelist() error { func (tx *Tx) Rollback() error { common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return errors.ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -561,13 +566,13 @@ func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, // This is only safe for concurrent use when used by a writable transaction. func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, errors.ErrTxClosed + return nil, berrors.ErrTxClosed } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, errors.ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. From 81634ce08c986e1ffada717845f4ec32b2f68693 Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Tue, 1 Aug 2023 13:42:05 +0530 Subject: [PATCH 116/439] Documentation: added a doc for bbolt command-line. Signed-off-by: Ishan Tyagi --- cmd/bbolt/README.md | 382 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 382 insertions(+) create mode 100644 cmd/bbolt/README.md diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md new file mode 100644 index 000000000..851f380ba --- /dev/null +++ b/cmd/bbolt/README.md @@ -0,0 +1,382 @@ +# Introduction to bbolt command line + +`bbolt` provides a command line utility for inspecting and manipulating bbolt database files. To install bbolt command-line please refer [here](https://github.com/etcd-io/bbolt#installing) + +**Note**: [etcd](https://github.com/etcd-io/etcd) uses bbolt as its backend storage engine. In this document, we take etcd as an example to demonstrate the usage of bbolt commands. Refer to [install etcd](https://etcd.io/docs/v3.5/install/) for installing etcd. + +1. Start a single member etcd cluster with this command below: + + ```bash + $etcd + ``` + + It will create a directory `default.etcd` by default under current working directory, and the directory structure will look like this: + + ```bash + $tree default.etcd + default.etcd + └── member + ├── snap + │   └── db // this is bbolt database file + └── wal + └── 0000000000000000-0000000000000000.wal + + 3 directories, 2 files + ``` + +2. Put some dummy data using [etcdctl](https://github.com/etcd-io/etcd/tree/main/etcdctl). +3. Stop the etcd instance. Note a bbolt database file can only be opened by one read-write process, because it is exclusively locked when opened. + +## Usage + +- `bbolt command [arguments]` + +### help + +- help will print information about that command + + ```bash + $bbolt help + + The commands are: + + bench run synthetic benchmark against bbolt + buckets print a list of buckets + check verifies integrity of bbolt database + compact copies a bbolt database, compacting it in the process + dump print a hexadecimal dump of a single page + get print the value of a key in a bucket + info print basic info + keys print a list of keys in a bucket + help print this screen + page print one or more pages in human readable format + pages print list of pages with their types + page-item print the key and value of a page item. + stats iterate over all pages and generate usage stats + surgery perform surgery on bbolt database + ``` + +- you can use `help` with any command: `bbolt [command] -h` for more information about command. + +## Analyse bbolt database with bbolt command line + +### info + +- `info` print the basic information about the given Bbolt database. +- usage: + `bbolt info [path to the bbolt database]` + + Example: + + ```bash + $bbolt info ~/default.etcd/member/snap/db + Page Size: 4096 + ``` + + - **note**: page size is given in bytes + - Bbolt database is using page size of 4KB + +### buckets + +- `buckets` print a list of buckets of Bbolt database is currently having. Find more information on buckets [here](https://github.com/etcd-io/bbolt#using-buckets) +- usage: + `bbolt buckets [path to the bbolt database]` + + Example: + + ```bash + $bbolt buckets ~/default.etcd/member/snap/db + alarm + auth + authRoles + authUsers + cluster + key + lease + members + members_removed + meta + ``` + + - It means when you start an etcd, it creates these `10` buckets using bbolt database. + +### check + +- `check` opens a database at a given `[PATH]` and runs an exhaustive check to verify that all pages are accessible or are marked as freed. It also verifies that no pages are double referenced. +- usage: + `bbolt check [path to the bbolt database]` + + Example: + + ```bash + $bbolt check ~/default.etcd/member/snap/db + ok + ``` + + - It returns `ok` as our database file `db` is not corrupted. + +### stats + +- To gather essential statistics about the bbolt database: `stats` performs an extensive search of the database to track every page reference. It starts at the current meta page and recursively iterates through every accessible bucket. +- usage: + `bbolt stats [path to the bbolt database]` + + Example: + + ```bash + $bbolt stats ~/default.etcd/member/snap/db + Aggregate statistics for 10 buckets + + Page count statistics + Number of logical branch pages: 0 + Number of physical branch overflow pages: 0 + Number of logical leaf pages: 0 + Number of physical leaf overflow pages: 0 + Tree statistics + Number of keys/value pairs: 11 + Number of levels in B+tree: 1 + Page size utilization + Bytes allocated for physical branch pages: 0 + Bytes actually used for branch data: 0 (0%) + Bytes allocated for physical leaf pages: 0 + Bytes actually used for leaf data: 0 (0%) + Bucket statistics + Total number of buckets: 10 + Total number on inlined buckets: 10 (100%) + Bytes used for inlined buckets: 780 (0%) + ``` + +### pages + +- Pages prints a table of pages with their type (meta, leaf, branch, freelist). +- The `meta` will store the metadata information of database. +- The `leaf` and `branch` pages will show a key count in the `items` column. +- The `freelist` will show the number of free pages, which are free for writing again. +- The `overflow` column shows the number of blocks that the page spills over into. +- usage: + `bbolt pages [path to the bbolt database]` + + Example: + + ```bash + $bbolt pages ~/default.etcd/member/snap/db + ID TYPE ITEMS OVRFLW + ======== ========== ====== ====== + 0 meta 0 + 1 meta 0 + 2 free + 3 leaf 10 + 4 freelist 2 + 5 free + ``` + +### page + +- Page prints one or more pages in human readable format. +- usage: + + ```bash + bolt page [path to the bbolt database] pageid [pageid...] + or: bolt page --all [path to the bbolt database] + + Additional options include: + + --all + prints all pages (only skips pages that were considered successful overflow pages) + --format-value=auto|ascii-encoded|hex|bytes|redacted (default: auto) + prints values (on the leaf page) using the given format + ``` + + Example: + + ```bash + $bbolt page ~/default.etcd/member/snap/db 3 + Page ID: 3 + Page Type: leaf + Total Size: 4096 bytes + Overflow pages: 0 + Item Count: 10 + + "alarm": + "auth": + "authRoles": + "authUsers": + "cluster": + "key": + "lease": + "members": + "members_removed": + "meta": + ``` + + - It prints information of page `page ID: 3` + +### page-item + +- page-item prints a page item's key and value. +- usage: + + ```bash + bolt page-item [options] [path to the bbolt database] + Additional options include: + + --key-only + Print only the key + --value-only + Print only the value + --format + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=ascii-encoded) + ``` + + Example: + + ```bash + $bbolt page-item --key-only ~/default.etcd/member/snap/db 3 7 + "members" + ``` + + - It returns the key as `--key-only` flag is passed of `pageID: 3` and `itemID: 7` + +### dump + +- Dump prints a hexadecimal dump of one or more given pages. +- usage: + `bolt dump [path to the bbolt database] [pageid...]` + +### keys + +- Print a list of keys in the given bucket. +- usage: + + ```bash + bolt keys [path to the bbolt database] [BucketName] + + Additional options include: + --format + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=bytes) + ``` + + Example 1: + + ```bash + $bbolt keys ~/default.etcd/member/snap/db meta + confState + consistent_index + term + ``` + + - It list all the keys in bucket: `meta` + + Example 2: + + ```bash + $bbolt keys ~/default.etcd/member/snap/db members + 8e9e05c52164694d + ``` + + - It list all the keys in `members` bucket which is a `memberId` of etcd cluster member. + - In this case we are running a single member etcd cluster, hence only `one memberId` is present. If we would have run a `3` member etcd cluster then it will return a `3 memberId` as `3 cluster members` would have been present in `members` bucket. + +### get + +- Print the value of the given key in the given bucket. +- usage: + + ```bash + bolt get [path to the bbolt database] [BucketName] [Key] + + Additional options include: + --format + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=bytes) + --parse-format + Input format (of key). One of: ascii-encoded|hex (default=ascii-encoded)" + ``` + + Example 1: + + ```bash + $bbolt get --format=hex ~/default.etcd/member/snap/db meta term + 0000000000000004 + ``` + + - It returns the value present in bucket: `meta` for key: `term` in hexadecimal format. + + Example 2: + + ```bash + $bbolt get ~/default.etcd/member/snap/db members 8e9e05c52164694d + {"id":10276657743932975437,"peerURLs":["http://localhost:2380"],"name":"default","clientURLs":["http://localhost:2379"]} + ``` + + - It returns the value present in bucket: `members` for key: `8e9e05c52164694d`. + +### compact + +- Compact opens a database at given `[Source Path]` and walks it recursively, copying keys as they are found from all buckets, to a newly created database at `[Destination Path]`. The original database is left untouched. +- usage: + + ```bash + bbolt compact [options] -o [Destination Path] [Source Path] + + Additional options include: + + -tx-max-size NUM + Specifies the maximum size of individual transactions. + Defaults to 64KB + ``` + + Example: + + ```bash + $bbolt compact -o ~/db.compact ~/default.etcd/member/snap/db + 16805888 -> 32768 bytes (gain=512.88x) + ``` + + - It will create a compacted database file: `db.compact` at given path. + +### bench + +- run synthetic benchmark against bbolt database. +- usage: + + ```bash + Usage: + -batch-size int + + -blockprofile string + + -count int + (default 1000) + -cpuprofile string + + -fill-percent float + (default 0.5) + -key-size int + (default 8) + -memprofile string + + -no-sync + + -path string + + -profile-mode string + (default "rw") + -read-mode string + (default "seq") + -value-size int + (default 32) + -work + + -write-mode string + (default "seq") + ``` + + Example: + + ```bash + $bbolt bench ~/default.etcd/member/snap/db -batch-size 400 -key-size 16 + # Write 68.523572ms (68.523µs/op) (14593 op/sec) + # Read 1.000015152s (11ns/op) (90909090 op/sec) + ``` + + - It runs a benchmark with batch size of `400` and with key size of `16` while for others parameters default value is taken. From 51a9dde31ee2fc077a47ae6cf0538db3bca564a9 Mon Sep 17 00:00:00 2001 From: John Jiang Date: Wed, 2 Aug 2023 00:26:37 +0800 Subject: [PATCH 117/439] gitignore: ignore .DS_Store on macOS Signed-off-by: John Jiang --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index fdc9164cc..ed4d259db 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,5 @@ cover-*.out *.iml /bbolt /cmd/bbolt/bbolt +.DS_Store From 84572b19e4ca7c558a8e58f6e5b8031cf0395d9a Mon Sep 17 00:00:00 2001 From: James Blair Date: Sun, 6 Aug 2023 08:44:54 +1200 Subject: [PATCH 118/439] Updated go to latest patch release 1.19.12. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 09820f3a4..44c0309d2 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.11" + go-version: "1.19.12" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 16ad7e664..b11e4c6fa 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.11" + go-version: "1.19.12" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.11" + go-version: "1.19.12" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.11" + go-version: "1.19.12" - run: make coverage From 3ed248c4eba0b80c26d96916ae4af4d3e52673f7 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 27 Jun 2023 16:48:51 +0100 Subject: [PATCH 119/439] Improve CreateBucketIfNotExists to avoid double searching the same key Benchmark with this change: BenchmarkBucket_CreateBucketIfNotExists-10 123 9573035 ns/op 17930 B/op 37 allocs/op Benchmark with old implementnation: BenchmarkBucket_CreateBucketIfNotExists-10 121 10474415 ns/op 18147 B/op 46 allocs/op Signed-off-by: Benjamin Wang --- bucket.go | 54 ++++++++++++++++++++++++++++++++++++++++++++------ bucket_test.go | 28 ++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/bucket.go b/bucket.go index 6a3c48ae8..f8741dcb6 100644 --- a/bucket.go +++ b/bucket.go @@ -190,13 +190,55 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == errors.ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + if b.buckets != nil { + if child := b.buckets[string(key)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(key) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(key, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(key)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + // Insert into node. + key = cloneBytes(key) + c.node().put(key, key, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(key), nil } // DeleteBucket deletes a bucket at the given key. diff --git a/bucket_test.go b/bucket_test.go index 21becb330..b60a1b912 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -1883,6 +1883,34 @@ func TestBucket_Delete_Quick(t *testing.T) { } } +func BenchmarkBucket_CreateBucketIfNotExists(b *testing.B) { + db := btesting.MustCreateDB(b) + defer db.MustClose() + + const bucketCount = 1_000_000 + + err := db.Update(func(tx *bolt.Tx) error { + for i := 0; i < bucketCount; i++ { + bucketName := fmt.Sprintf("bucket_%d", i) + _, berr := tx.CreateBucket([]byte(bucketName)) + require.NoError(b, berr) + } + return nil + }) + require.NoError(b, err) + + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + err := db.Update(func(tx *bolt.Tx) error { + _, berr := tx.CreateBucketIfNotExists([]byte("bucket_100")) + return berr + }) + require.NoError(b, err) + } +} + func ExampleBucket_Put() { // Open the database. db, err := bolt.Open(tempfile(), 0600, nil) From 2bbf2e9972a816443d130ebc2093c350903f9560 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Aug 2023 14:37:14 +0000 Subject: [PATCH 120/439] build(deps): Bump golang.org/x/sys from 0.10.0 to 0.11.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.10.0 to 0.11.0. - [Commits](https://github.com/golang/sys/compare/v0.10.0...v0.11.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6b86a1680..4d9ce5e5a 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.10.0 + golang.org/x/sys v0.11.0 ) require ( diff --git a/go.sum b/go.sum index ea91fc23b..1056a1077 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 71a59caf31ca42c3deb8967ceee0a4804d220bf4 Mon Sep 17 00:00:00 2001 From: Evgenii Stratonikov Date: Wed, 9 Aug 2023 16:28:14 +0300 Subject: [PATCH 121/439] bucket: allow to allocate key on stack in Put() As per `go build -gcflags -m ./... 2>&1`: Old behaviour: ``` ./bucket.go:148:31: leaking param: key ./bucket.go:192:42: leaking param: key ./bucket.go:271:22: leaking param: key ``` Now: ``` ./bucket.go:148:31: key does not escape ./bucket.go:192:42: key does not escape ./bucket.go:271:22: key does not escape ``` Signed-off-by: Evgenii Stratonikov --- bucket.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/bucket.go b/bucket.go index f8741dcb6..f9f23812f 100644 --- a/bucket.go +++ b/bucket.go @@ -175,15 +175,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { var value = bucket.write() // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, common.BucketLeafFlag) + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil - return b.Bucket(key), nil + return b.Bucket(newKey), nil } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. @@ -230,15 +232,17 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { var value = bucket.write() // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, common.BucketLeafFlag) + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket // to be treated as a regular, non-inline bucket for the rest of the tx. b.page = nil - return b.Bucket(key), nil + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. @@ -333,8 +337,10 @@ func (b *Bucket) Put(key []byte, value []byte) error { } // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + c.node().put(newKey, newKey, value, 0, 0) return nil } From a05608cf2949ab1e46dc05b8f1ea02564ef0d464 Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Fri, 11 Aug 2023 18:16:31 +0530 Subject: [PATCH 122/439] Added bbolt command line version flag to get runtime information. Signed-off-by: Ishan Tyagi --- cmd/bbolt/README.md | 16 ++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/command_version.go | 24 ++++++++++++++++++++++++ cmd/bbolt/main.go | 1 + version/version.go | 6 ++++++ 5 files changed, 48 insertions(+) create mode 100644 cmd/bbolt/command_version.go create mode 100644 version/version.go diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index 851f380ba..8c7785456 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -40,6 +40,7 @@ The commands are: + version prints the current version of bbolt bench run synthetic benchmark against bbolt buckets print a list of buckets check verifies integrity of bbolt database @@ -60,6 +61,21 @@ ## Analyse bbolt database with bbolt command line +### version + +- `version` print the current version information of bbolt command-line. +- usage: + `bbolt version` + + Example: + + ```bash + $bbolt version + bbolt version: 1.3.7 + Go Version: go1.20.7 + Go OS/Arch: darwin/arm64 + ``` + ### info - `info` print the basic information about the given Bbolt database. diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index b960df898..31a174080 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -17,6 +17,7 @@ func NewRootCommand() *cobra.Command { } rootCmd.AddCommand( + newVersionCobraCommand(), newSurgeryCobraCommand(), ) diff --git a/cmd/bbolt/command_version.go b/cmd/bbolt/command_version.go new file mode 100644 index 000000000..4434c515f --- /dev/null +++ b/cmd/bbolt/command_version.go @@ -0,0 +1,24 @@ +package main + +import ( + "fmt" + "runtime" + + "github.com/spf13/cobra" + "go.etcd.io/bbolt/version" +) + +func newVersionCobraCommand() *cobra.Command { + versionCmd := &cobra.Command{ + Use: "version", + Short: "print the current version of bbolt", + Long: "print the current version of bbolt", + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("bbolt Version: %s\n", version.Version) + fmt.Printf("Go Version: %s\n", runtime.Version()) + fmt.Printf("Go OS/Arch: %s/%s\n", runtime.GOOS, runtime.GOARCH) + }, + } + + return versionCmd +} diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 364b1fe21..7115471c0 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -156,6 +156,7 @@ Usage: The commands are: + version print the current version of bbolt bench run synthetic benchmark against bbolt buckets print a list of buckets check verifies integrity of bbolt database diff --git a/version/version.go b/version/version.go new file mode 100644 index 000000000..b0387d599 --- /dev/null +++ b/version/version.go @@ -0,0 +1,6 @@ +package version + +var ( + // Version shows the last bbolt binary version released. + Version = "1.3.7" +) From 393e10e840a35089ae0496762fd8a72adbeb6ab6 Mon Sep 17 00:00:00 2001 From: guoguangwu Date: Fri, 18 Aug 2023 22:02:56 +0800 Subject: [PATCH 123/439] fix Continuing typo Signed-off-by: guoguangwu --- cmd/bbolt/page_command.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/bbolt/page_command.go b/cmd/bbolt/page_command.go index c608d8460..7a6ec5b9b 100644 --- a/cmd/bbolt/page_command.go +++ b/cmd/bbolt/page_command.go @@ -71,7 +71,7 @@ func (cmd *pageCommand) printPages(pageIDs []uint64, path string, formatValue *s } _, err2 := cmd.printPage(path, pageID, *formatValue) if err2 != nil { - fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuuing...\n", pageID, err2) + fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuing...\n", pageID, err2) } } } @@ -90,7 +90,7 @@ func (cmd *pageCommand) printAllPages(path string, formatValue *string) { } overflow, err2 := cmd.printPage(path, pageID, *formatValue) if err2 != nil { - fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuuing...\n", pageID, err2) + fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuing...\n", pageID, err2) pageID++ } else { pageID += uint64(overflow) + 1 From ed506da9fde36d2b3995b8c3210610005ce0a3cf Mon Sep 17 00:00:00 2001 From: James Blair Date: Sat, 19 Aug 2023 21:25:39 +1200 Subject: [PATCH 124/439] Update to golang 1.20 minor release. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- go.mod | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 44c0309d2..c6c9fc4f7 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.12" + go-version: "1.20.7" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index b11e4c6fa..7214c7b10 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.12" + go-version: "1.20.7" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.12" + go-version: "1.20.7" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: "1.19.12" + go-version: "1.20.7" - run: make coverage diff --git a/go.mod b/go.mod index 4d9ce5e5a..c1e93919b 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module go.etcd.io/bbolt -go 1.19 +go 1.20 require ( github.com/spf13/cobra v1.7.0 From 4154f9c642333237ac659a560a30bfb37802e4c2 Mon Sep 17 00:00:00 2001 From: James Blair Date: Sat, 19 Aug 2023 22:16:51 +1200 Subject: [PATCH 125/439] Remove deprecated rand.Seed calls. From go 1.20 if Seed is not called, the generator will be seeded randomly at program startup. Signed-off-by: James Blair --- cmd/bbolt/main_test.go | 1 - freelist_test.go | 1 - simulation_test.go | 2 -- 3 files changed, 4 deletions(-) diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index dddacd8d0..dbc7de762 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -461,7 +461,6 @@ func TestCompactCommand_Run(t *testing.T) { if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { t.Fatal(err) } - rand.Seed(s) dstdb := btesting.MustCreateDB(t) dstdb.Close() diff --git a/freelist_test.go b/freelist_test.go index 0989dadcd..7297055b4 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -322,7 +322,6 @@ func benchmark_FreelistRelease(b *testing.B, size int) { } func randomPgids(n int) []common.Pgid { - rand.Seed(42) pgids := make(common.Pgids, n) for i := range pgids { pgids[i] = common.Pgid(rand.Int63()) diff --git a/simulation_test.go b/simulation_test.go index 037b7183c..6f4d5b236 100644 --- a/simulation_test.go +++ b/simulation_test.go @@ -35,8 +35,6 @@ func testSimulate(t *testing.T, openOption *bolt.Options, round, threadCount, pa t.Skip("skipping test in short mode.") } - rand.Seed(int64(qseed)) - // A list of operations that readers and writers can perform. var readerHandlers = []simulateHandler{simulateGetHandler} var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler} From ca13fd8a2dbb8bb17282b7d8574c8ac2635372c9 Mon Sep 17 00:00:00 2001 From: James Blair Date: Sat, 19 Aug 2023 22:17:45 +1200 Subject: [PATCH 126/439] Use crypto/rand.Read instead of deprecated math/rand.Read. Signed-off-by: James Blair --- manydbs_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/manydbs_test.go b/manydbs_test.go index 48bc21171..595c81b28 100644 --- a/manydbs_test.go +++ b/manydbs_test.go @@ -1,8 +1,8 @@ package bbolt import ( + "crypto/rand" "fmt" - "math/rand" "os" "path/filepath" "testing" @@ -46,7 +46,10 @@ func createAndPutKeys(t *testing.T) { } var key [16]byte - rand.Read(key[:]) + _, rerr := rand.Read(key[:]) + if rerr != nil { + return rerr + } if err := nodes.Put(key[:], nil); err != nil { return err } From 41c23856b2593985e167feb50866c14856a8e400 Mon Sep 17 00:00:00 2001 From: Jeremy Date: Mon, 21 Aug 2023 19:11:34 +0800 Subject: [PATCH 127/439] delete unnecessary error info Signed-off-by: Jeremy --- errors.go | 6 ------ errors/errors.go | 4 ---- 2 files changed, 10 deletions(-) diff --git a/errors.go b/errors.go index 28ca48d84..4d7cd8001 100644 --- a/errors.go +++ b/errors.go @@ -10,12 +10,6 @@ var ( // Deprecated: Use the error variables defined in the bbolt/errors package. ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen - // ErrDatabaseOpen is returned when opening a database that is - // already open. - // - // Deprecated: Use the error variables defined in the bbolt/errors package. - ErrDatabaseOpen = errors.ErrDatabaseOpen - // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. // diff --git a/errors/errors.go b/errors/errors.go index 88fdc31ac..9598cbd8a 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -10,10 +10,6 @@ var ( // is opened or after it is closed. ErrDatabaseNotOpen = errors.New("database not open") - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. ErrInvalid = errors.New("invalid database") From aafd6b26fd0b74a1227c1f1efba2f87c144111c3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Aug 2023 14:46:26 +0000 Subject: [PATCH 128/439] build(deps): Bump golangci/golangci-lint-action from 3.6.0 to 3.7.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.6.0 to 3.7.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/639cd343e1d3b897ff35927a75193d57cfcba299...3a919529898de77ec3da873e3063ca4b10e7f5cc) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 7214c7b10..ca34b44be 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -45,7 +45,7 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 test-windows: strategy: @@ -82,7 +82,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299 # v3.6.0 + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 coverage: needs: ["test-linux", "test-windows"] From 3c56d130e0a143544419abe315f20f125a8962bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 14:13:38 +0000 Subject: [PATCH 129/439] build(deps): Bump golang.org/x/sys from 0.11.0 to 0.12.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.11.0 to 0.12.0. - [Commits](https://github.com/golang/sys/compare/v0.11.0...v0.12.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c1e93919b..50262c34e 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.11.0 + golang.org/x/sys v0.12.0 ) require ( diff --git a/go.sum b/go.sum index 1056a1077..e073e6439 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= -golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 686a2a3551d4e2216917a2c0877d84e05cfe2869 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Sep 2023 15:03:22 +0000 Subject: [PATCH 130/439] build(deps): Bump actions/checkout from 3 to 4 Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index c6c9fc4f7..02074ba45 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -8,7 +8,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: "1.20.7" diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index ca34b44be..6e2743fdb 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -12,7 +12,7 @@ jobs: - linux-amd64-unit-test-4-cpu-race runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: "1.20.7" @@ -63,7 +63,7 @@ jobs: #- windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: "1.20.7" @@ -91,7 +91,7 @@ jobs: os: [ubuntu-latest, windows-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: go-version: "1.20.7" From 6340c70371bc539c8c8bda2437d17bec65fe7f6f Mon Sep 17 00:00:00 2001 From: Nuno Cruces Date: Wed, 13 Sep 2023 14:22:30 +0100 Subject: [PATCH 131/439] Documentation: remove note that Options.Timeout is Linux/macOS only This seems to work on all platforms. E.g. on Windows it seems to work since 1cb787ee7bfebef44baf98158967a37735e65790. Signed-off-by: Nuno Cruces --- db.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/db.go b/db.go index 0d35861d8..65ef209df 100644 --- a/db.go +++ b/db.go @@ -1208,8 +1208,7 @@ func (db *DB) freepages() []common.Pgid { // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. From 18ebb974510c2c48f905fd4721f07a20173c6962 Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Wed, 13 Sep 2023 11:00:29 -0400 Subject: [PATCH 132/439] Update to go1.21.1 Signed-off-by: Allen Ray --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/tests.yaml | 6 +++--- cmd/bbolt/README.md | 2 +- freelist.go | 6 ++---- go.mod | 2 +- internal/common/page.go | 12 +++++------- internal/common/unsafe.go | 12 ------------ 7 files changed, 13 insertions(+), 29 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 02074ba45..41da8afe5 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: - go-version: "1.20.7" + go-version: "1.21.1" - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index 6e2743fdb..8b4d41e6b 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: - go-version: "1.20.7" + go-version: "1.21.1" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -66,7 +66,7 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: - go-version: "1.20.7" + go-version: "1.21.1" - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -94,6 +94,6 @@ jobs: - uses: actions/checkout@v4 - uses: actions/setup-go@v4 with: - go-version: "1.20.7" + go-version: "1.21.1" - run: make coverage diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index 8c7785456..a3e7c169f 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -72,7 +72,7 @@ ```bash $bbolt version bbolt version: 1.3.7 - Go Version: go1.20.7 + Go Version: go1.21.1 Go OS/Arch: darwin/arm64 ``` diff --git a/freelist.go b/freelist.go index 2b09e7626..29ac16c72 100644 --- a/freelist.go +++ b/freelist.go @@ -311,15 +311,13 @@ func (f *freelist) write(p *common.Page) error { p.SetCount(uint16(l)) } else if l < 0xFFFF { p.SetCount(uint16(l)) - var ids []common.Pgid data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - common.UnsafeSlice(unsafe.Pointer(&ids), data, l) + ids := unsafe.Slice((*common.Pgid)(data), l) f.copyall(ids) } else { p.SetCount(0xFFFF) - var ids []common.Pgid data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - common.UnsafeSlice(unsafe.Pointer(&ids), data, l+1) + ids := unsafe.Slice((*common.Pgid)(data), l+1) ids[0] = common.Pgid(l) f.copyall(ids[1:]) } diff --git a/go.mod b/go.mod index 50262c34e..7445c9559 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module go.etcd.io/bbolt -go 1.20 +go 1.21 require ( github.com/spf13/cobra v1.7.0 diff --git a/internal/common/page.go b/internal/common/page.go index 808484c19..ee808967c 100644 --- a/internal/common/page.go +++ b/internal/common/page.go @@ -13,6 +13,7 @@ const MinKeysPerPage = 2 const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) const ( BranchPageFlag = 0x01 @@ -99,9 +100,8 @@ func (p *Page) LeafPageElements() []leafPageElement { if p.count == 0 { return nil } - var elems []leafPageElement data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - UnsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) return elems } @@ -116,9 +116,8 @@ func (p *Page) BranchPageElements() []branchPageElement { if p.count == 0 { return nil } - var elems []branchPageElement data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - UnsafeSlice(unsafe.Pointer(&elems), data, int(p.count)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) return elems } @@ -149,9 +148,8 @@ func (p *Page) FreelistPageIds() []Pgid { return nil } - var ids []Pgid - data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(ids[0]), idx) - UnsafeSlice(unsafe.Pointer(&ids), data, count) + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) return ids } diff --git a/internal/common/unsafe.go b/internal/common/unsafe.go index c1970ba3c..9b77dd7b2 100644 --- a/internal/common/unsafe.go +++ b/internal/common/unsafe.go @@ -1,7 +1,6 @@ package common import ( - "reflect" "unsafe" ) @@ -26,14 +25,3 @@ func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // all), so this is believed to be correct. return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } - -// UnsafeSlice modifies the data, len, and cap of a slice variable pointed to by -// the slice parameter. This helper should be used over other direct -// manipulation of reflect.SliceHeader to prevent misuse, namely, converting -// from reflect.SliceHeader to a Go slice type. -func UnsafeSlice(slice, data unsafe.Pointer, len int) { - s := (*reflect.SliceHeader)(slice) - s.Data = uintptr(data) - s.Cap = len - s.Len = len -} From e1400df84a16fb40366deac6a7d2714a3e1ffe45 Mon Sep 17 00:00:00 2001 From: James Blair Date: Mon, 18 Sep 2023 21:21:21 +1200 Subject: [PATCH 133/439] Add OWNERS file for bbolt. Signed-off-by: James Blair --- OWNERS | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 OWNERS diff --git a/OWNERS b/OWNERS new file mode 100644 index 000000000..412b742ed --- /dev/null +++ b/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - mitake # Hitoshi Mitake + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala From f3bb36466ac65b51d831a6eabcd78dde5f57feb7 Mon Sep 17 00:00:00 2001 From: liwei Date: Thu, 21 Sep 2023 09:13:58 +0800 Subject: [PATCH 134/439] feat: adpater Android 14 Signed-off-by: liwei --- bolt_android.go | 90 +++++++++++++++++++++++++++++++++++++++++++++++++ bolt_unix.go | 4 +-- 2 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 bolt_android.go diff --git a/bolt_android.go b/bolt_android.go new file mode 100644 index 000000000..11890f0d7 --- /dev/null +++ b/bolt_android.go @@ -0,0 +1,90 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/bolt_unix.go b/bolt_unix.go index deb7e7ca2..aaa48d241 100644 --- a/bolt_unix.go +++ b/bolt_unix.go @@ -1,5 +1,5 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android +// +build !windows,!plan9,!solaris,!aix,!android package bbolt From 5d246f221c53d8846d1d0a76be05e719f9e2071d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 14:12:37 +0000 Subject: [PATCH 135/439] build(deps): Bump golang.org/x/sys from 0.12.0 to 0.13.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.12.0 to 0.13.0. - [Commits](https://github.com/golang/sys/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7445c9559..928797f08 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.3.0 - golang.org/x/sys v0.12.0 + golang.org/x/sys v0.13.0 ) require ( diff --git a/go.sum b/go.sum index e073e6439..f39b4026e 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From eb6ec9879eb0270a5878eb7dc68730b976e00d8d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:29:34 +0000 Subject: [PATCH 136/439] build(deps): Bump golang.org/x/sync from 0.3.0 to 0.4.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.3.0 to 0.4.0. - [Commits](https://github.com/golang/sync/compare/v0.3.0...v0.4.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 928797f08..d6fc2fbef 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.3.0 + golang.org/x/sync v0.4.0 golang.org/x/sys v0.13.0 ) diff --git a/go.sum b/go.sum index f39b4026e..68124a57b 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= +golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From a812bcb85c059911a41e7d9baebef76e28a482e6 Mon Sep 17 00:00:00 2001 From: James Blair Date: Wed, 18 Oct 2023 17:36:36 +1300 Subject: [PATCH 137/439] Complete migration to owners file. Signed-off-by: James Blair --- MAINTAINERS | 19 ------------------- 1 file changed, 19 deletions(-) delete mode 100644 MAINTAINERS diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index 56342b5b7..000000000 --- a/MAINTAINERS +++ /dev/null @@ -1,19 +0,0 @@ -# The official list of maintainers and reviewers for the project maintenance. -# -# Refer to the GOVERNANCE.md in etcd repository for description of the roles. -# -# Names should be added to this file like so: -# Individual's name (@GITHUB_HANDLE) pkg:* -# Individual's name (@GITHUB_HANDLE) pkg:* -# -# Please keep the list sorted. - -# MAINTAINERS -Benjamin Wang (ahrtr@) #owner/#domain-expert -Hitoshi Mitake (@mitake) -Marek Siarkowicz (@serathius) -Piotr Tabor (@ptabor) #owner/#domain-expert -Sahdev Zala (@spzala) - -# REVIEWERS - From 31914ea6e813f38b511f37fb6fa14a12b1f15499 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 17 Jul 2023 14:33:02 +0100 Subject: [PATCH 138/439] cmd: add meta page related surgery commands Two command: - surgery meta validate - surgery meta update Signed-off-by: Benjamin Wang --- cmd/bbolt/command_surgery.go | 21 +- cmd/bbolt/command_surgery_meta.go | 292 +++++++++++++++++++++++++ cmd/bbolt/command_surgery_meta_test.go | 126 +++++++++++ internal/common/meta.go | 8 + internal/common/types.go | 2 +- 5 files changed, 442 insertions(+), 7 deletions(-) create mode 100644 cmd/bbolt/command_surgery_meta.go create mode 100644 cmd/bbolt/command_surgery_meta_test.go diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 15fa48cb9..129ae459d 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -28,6 +28,7 @@ func newSurgeryCobraCommand() *cobra.Command { surgeryCmd.AddCommand(newSurgeryClearPageCommand()) surgeryCmd.AddCommand(newSurgeryClearPageElementsCommand()) surgeryCmd.AddCommand(newSurgeryFreelistCommand()) + surgeryCmd.AddCommand(newSurgeryMetaCommand()) return surgeryCmd } @@ -311,15 +312,23 @@ func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsO } func readMetaPage(path string) (*common.Meta, error) { - _, activeMetaPageId, err := guts_cli.GetRootPage(path) + pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) if err != nil { - return nil, fmt.Errorf("read root page failed: %w", err) + return nil, fmt.Errorf("read Page size failed: %w", err) } - _, buf, err := guts_cli.ReadPage(path, uint64(activeMetaPageId)) - if err != nil { - return nil, fmt.Errorf("read active mage page failed: %w", err) + + m := make([]*common.Meta, 2) + for i := 0; i < 2; i++ { + m[i], _, err = ReadMetaPageAt(path, uint32(i), uint32(pageSize)) + if err != nil { + return nil, fmt.Errorf("read meta page %d failed: %w", i, err) + } + } + + if m[0].Txid() > m[1].Txid() { + return m[0], nil } - return common.LoadPageMeta(buf), nil + return m[1], nil } func checkSourceDBPath(srcPath string) (os.FileInfo, error) { diff --git a/cmd/bbolt/command_surgery_meta.go b/cmd/bbolt/command_surgery_meta.go new file mode 100644 index 000000000..36cf33ce5 --- /dev/null +++ b/cmd/bbolt/command_surgery_meta.go @@ -0,0 +1,292 @@ +package main + +import ( + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "go.etcd.io/bbolt/internal/common" +) + +const ( + metaFieldPageSize = "pageSize" + metaFieldRoot = "root" + metaFieldFreelist = "freelist" + metaFieldPgid = "pgid" +) + +func newSurgeryMetaCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "meta ", + Short: "meta page related surgery commands", + } + + cmd.AddCommand(newSurgeryMetaValidateCommand()) + cmd.AddCommand(newSurgeryMetaUpdateCommand()) + + return cmd +} + +func newSurgeryMetaValidateCommand() *cobra.Command { + metaValidateCmd := &cobra.Command{ + Use: "validate [options]", + Short: "Validate both meta pages", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + return surgeryMetaValidateFunc(args[0]) + }, + } + return metaValidateCmd +} + +func surgeryMetaValidateFunc(srcDBPath string) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + + var pageSize uint32 + + for i := 0; i <= 1; i++ { + m, _, err := ReadMetaPageAt(srcDBPath, uint32(i), pageSize) + if err != nil { + return fmt.Errorf("read meta page %d failed: %w", i, err) + } + if mValidateErr := m.Validate(); mValidateErr != nil { + fmt.Fprintf(os.Stdout, "WARNING: The meta page %d isn't valid: %v!\n", i, mValidateErr) + } else { + fmt.Fprintf(os.Stdout, "The meta page %d is valid!\n", i) + } + + pageSize = m.PageSize() + } + + return nil +} + +type surgeryMetaUpdateOptions struct { + surgeryBaseOptions + fields []string + metaPageId uint32 +} + +var allowedMetaUpdateFields = map[string]struct{}{ + metaFieldPageSize: {}, + metaFieldRoot: {}, + metaFieldFreelist: {}, + metaFieldPgid: {}, +} + +// AddFlags sets the flags for `meta update` command. +// Example: --fields root:16,freelist:8 --fields pgid:128 --fields txid:1234 +// Result: []string{"root:16", "freelist:8", "pgid:128", "txid:1234"} +func (o *surgeryMetaUpdateOptions) AddFlags(fs *pflag.FlagSet) { + o.surgeryBaseOptions.AddFlags(fs) + fs.StringSliceVarP(&o.fields, "fields", "", o.fields, "comma separated list of fields (supported fields: pageSize, root, freelist, pgid and txid) to be updated, and each item is a colon-separated key-value pair") + fs.Uint32VarP(&o.metaPageId, "meta-page", "", o.metaPageId, "the meta page ID to operate on, valid values are 0 and 1") +} + +func (o *surgeryMetaUpdateOptions) Validate() error { + if err := o.surgeryBaseOptions.Validate(); err != nil { + return err + } + + if o.metaPageId > 1 { + return fmt.Errorf("invalid meta page id: %d", o.metaPageId) + } + + for _, field := range o.fields { + kv := strings.Split(field, ":") + if len(kv) != 2 { + return fmt.Errorf("invalid key-value pair: %s", field) + } + + if _, ok := allowedMetaUpdateFields[kv[0]]; !ok { + return fmt.Errorf("field %q isn't allowed to be updated", kv[0]) + } + + if _, err := strconv.ParseUint(kv[1], 10, 64); err != nil { + return fmt.Errorf("invalid value %q for field %q", kv[1], kv[0]) + } + } + + return nil +} + +func newSurgeryMetaUpdateCommand() *cobra.Command { + var o surgeryMetaUpdateOptions + metaUpdateCmd := &cobra.Command{ + Use: "update [options]", + Short: "Update fields in meta pages", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Validate(); err != nil { + return err + } + return surgeryMetaUpdateFunc(args[0], o) + }, + } + o.AddFlags(metaUpdateCmd.Flags()) + return metaUpdateCmd +} + +func surgeryMetaUpdateFunc(srcDBPath string, cfg surgeryMetaUpdateOptions) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + + if err := common.CopyFile(srcDBPath, cfg.outputDBFilePath); err != nil { + return fmt.Errorf("[meta update] copy file failed: %w", err) + } + + // read the page size from the first meta page if we want to edit the second meta page. + var pageSize uint32 + if cfg.metaPageId == 1 { + m0, _, err := ReadMetaPageAt(cfg.outputDBFilePath, 0, pageSize) + if err != nil { + return fmt.Errorf("read the first meta page failed: %w", err) + } + pageSize = m0.PageSize() + } + + // update the specified meta page + m, buf, err := ReadMetaPageAt(cfg.outputDBFilePath, cfg.metaPageId, pageSize) + if err != nil { + return fmt.Errorf("read meta page %d failed: %w", cfg.metaPageId, err) + } + mChanged := updateMetaField(m, parseFields(cfg.fields)) + if mChanged { + if err := writeMetaPageAt(cfg.outputDBFilePath, buf, cfg.metaPageId, pageSize); err != nil { + return fmt.Errorf("[meta update] write meta page %d failed: %w", cfg.metaPageId, err) + } + } + + if cfg.metaPageId == 1 && pageSize != m.PageSize() { + fmt.Fprintf(os.Stdout, "WARNING: The page size (%d) in the first meta page doesn't match the second meta page (%d)\n", pageSize, m.PageSize()) + } + + // Display results + if !mChanged { + fmt.Fprintln(os.Stdout, "Nothing changed!") + } + + if mChanged { + fmt.Fprintf(os.Stdout, "The meta page %d has been updated!\n", cfg.metaPageId) + } + + return nil +} + +func parseFields(fields []string) map[string]uint64 { + fieldsMap := make(map[string]uint64) + for _, field := range fields { + kv := strings.SplitN(field, ":", 2) + val, _ := strconv.ParseUint(kv[1], 10, 64) + fieldsMap[kv[0]] = val + } + return fieldsMap +} + +func updateMetaField(m *common.Meta, fields map[string]uint64) bool { + changed := false + for key, val := range fields { + switch key { + case metaFieldPageSize: + m.SetPageSize(uint32(val)) + case metaFieldRoot: + m.SetRootBucket(common.NewInBucket(common.Pgid(val), 0)) + case metaFieldFreelist: + m.SetFreelist(common.Pgid(val)) + case metaFieldPgid: + m.SetPgid(common.Pgid(val)) + } + + changed = true + } + + if m.Magic() != common.Magic { + m.SetMagic(common.Magic) + changed = true + } + if m.Version() != common.Version { + m.SetVersion(common.Version) + changed = true + } + if m.Flags() != common.MetaPageFlag { + m.SetFlags(common.MetaPageFlag) + changed = true + } + + newChecksum := m.Sum64() + if m.Checksum() != newChecksum { + m.SetChecksum(newChecksum) + changed = true + } + + return changed +} + +func ReadMetaPageAt(dbPath string, metaPageId uint32, pageSize uint32) (*common.Meta, []byte, error) { + if metaPageId > 1 { + return nil, nil, fmt.Errorf("invalid metaPageId: %d", metaPageId) + } + + f, err := os.OpenFile(dbPath, os.O_RDONLY, 0444) + if err != nil { + return nil, nil, err + } + defer f.Close() + + // The meta page is just 64 bytes, and definitely less than 1024 bytes, + // so it's fine to only read 1024 bytes. Note we don't care about the + // pageSize when reading the first meta page, because we always read the + // file starting from offset 0. Actually the passed pageSize is 0 when + // reading the first meta page in the `surgery meta update` command. + buf := make([]byte, 1024) + n, err := f.ReadAt(buf, int64(metaPageId*pageSize)) + if n == len(buf) && (err == nil || err == io.EOF) { + return common.LoadPageMeta(buf), buf, nil + } + + return nil, nil, err +} + +func writeMetaPageAt(dbPath string, buf []byte, metaPageId uint32, pageSize uint32) error { + if metaPageId > 1 { + return fmt.Errorf("invalid metaPageId: %d", metaPageId) + } + + f, err := os.OpenFile(dbPath, os.O_RDWR, 0666) + if err != nil { + return err + } + defer f.Close() + + n, err := f.WriteAt(buf, int64(metaPageId*pageSize)) + if n == len(buf) && (err == nil || err == io.EOF) { + return nil + } + + return err +} diff --git a/cmd/bbolt/command_surgery_meta_test.go b/cmd/bbolt/command_surgery_meta_test.go new file mode 100644 index 000000000..399cad18c --- /dev/null +++ b/cmd/bbolt/command_surgery_meta_test.go @@ -0,0 +1,126 @@ +package main_test + +import ( + "fmt" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" +) + +func TestSurgery_Meta_Validate(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + // validate the meta pages + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{ + "surgery", "meta", "validate", srcPath, + }) + err := rootCmd.Execute() + require.NoError(t, err) + + // TODD: add one more case that the validation may fail. We need to + // make the command output configurable, so that test cases can set + // a customized io.Writer. +} + +func TestSurgery_Meta_Update(t *testing.T) { + testCases := []struct { + name string + root common.Pgid + freelist common.Pgid + pgid common.Pgid + }{ + { + name: "root changed", + root: 50, + }, + { + name: "freelist changed", + freelist: 40, + }, + { + name: "pgid changed", + pgid: 600, + }, + { + name: "both root and freelist changed", + root: 45, + freelist: 46, + }, + { + name: "both pgid and freelist changed", + pgid: 256, + freelist: 47, + }, + { + name: "all fields changed", + root: 43, + freelist: 62, + pgid: 256, + }, + } + + for _, tc := range testCases { + for i := 0; i <= 1; i++ { + tc := tc + metaPageId := uint32(i) + + t.Run(tc.name, func(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + var fields []string + if tc.root != 0 { + fields = append(fields, fmt.Sprintf("root:%d", tc.root)) + } + if tc.freelist != 0 { + fields = append(fields, fmt.Sprintf("freelist:%d", tc.freelist)) + } + if tc.pgid != 0 { + fields = append(fields, fmt.Sprintf("pgid:%d", tc.pgid)) + } + + rootCmd := main.NewRootCommand() + output := filepath.Join(t.TempDir(), "db") + rootCmd.SetArgs([]string{ + "surgery", "meta", "update", srcPath, + "--output", output, + "--meta-page", fmt.Sprintf("%d", metaPageId), + "--fields", strings.Join(fields, ","), + }) + err := rootCmd.Execute() + require.NoError(t, err) + + m, _, err := main.ReadMetaPageAt(output, metaPageId, 4096) + require.NoError(t, err) + + require.Equal(t, common.Magic, m.Magic()) + require.Equal(t, common.Version, m.Version()) + + if tc.root != 0 { + require.Equal(t, tc.root, m.RootBucket().RootPage()) + } + if tc.freelist != 0 { + require.Equal(t, tc.freelist, m.Freelist()) + } + if tc.pgid != 0 { + require.Equal(t, tc.pgid, m.Pgid()) + } + }) + } + } +} diff --git a/internal/common/meta.go b/internal/common/meta.go index 4517d3716..055388604 100644 --- a/internal/common/meta.go +++ b/internal/common/meta.go @@ -72,6 +72,10 @@ func (m *Meta) SetMagic(v uint32) { m.magic = v } +func (m *Meta) Version() uint32 { + return m.version +} + func (m *Meta) SetVersion(v uint32) { m.version = v } @@ -136,6 +140,10 @@ func (m *Meta) DecTxid() { m.txid -= 1 } +func (m *Meta) Checksum() uint64 { + return m.checksum +} + func (m *Meta) SetChecksum(v uint64) { m.checksum = v } diff --git a/internal/common/types.go b/internal/common/types.go index 04b920302..8ad8279a0 100644 --- a/internal/common/types.go +++ b/internal/common/types.go @@ -10,7 +10,7 @@ import ( const MaxMmapStep = 1 << 30 // 1GB // Version represents the data file format version. -const Version = 2 +const Version uint32 = 2 // Magic represents a marker value to indicate that a file is a Bolt DB. const Magic uint32 = 0xED0CDAED From 26f89a595140f163a4e8a7c86b689990f6335788 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 23 Oct 2023 13:13:18 +0100 Subject: [PATCH 139/439] ensure the stats is always 64bit aligned The first word in an allocated struct can be relied upon to be 64-bit aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. Signed-off-by: Benjamin Wang --- db.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/db.go b/db.go index 65ef209df..b8487573e 100644 --- a/db.go +++ b/db.go @@ -36,6 +36,12 @@ const ( // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. type DB struct { + // Put `stats` at the first field to ensure it's 64-bit aligned. Note that + // the first word in an allocated struct can be relied upon to be 64-bit + // aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. Also + // refer to discussion in https://github.com/etcd-io/bbolt/issues/577. + stats Stats + // When enabled, the database will perform a Check() after every commit. // A panic is issued if the database is in an inconsistent state. This // flag has a large performance impact so it should only be used for @@ -125,7 +131,6 @@ type DB struct { opened bool rwtx *Tx txs []*Tx - stats Stats freelist *freelist freelistLoad sync.Once @@ -1275,6 +1280,12 @@ var DefaultOptions = &Options{ // Stats represents statistics about the database. type Stats struct { + // Put `TxStats` at the first field to ensure it's 64-bit aligned. Note + // that the first word in an allocated struct can be relied upon to be + // 64-bit aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. + // Also refer to discussion in https://github.com/etcd-io/bbolt/issues/577. + TxStats TxStats // global, ongoing stats. + // Freelist stats FreePageN int // total number of free pages on the freelist PendingPageN int // total number of pending pages on the freelist @@ -1284,8 +1295,6 @@ type Stats struct { // Transaction stats TxN int // total number of started read transactions OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. } // Sub calculates and returns the difference between two sets of database stats. From dfa2d79a8e62b9eff2832bea1f2e7520d3f7caab Mon Sep 17 00:00:00 2001 From: xinglong Date: Tue, 22 Jun 2021 14:09:22 +0800 Subject: [PATCH 140/439] Record the amount of free pages instead of hashmapFreeCount calculation Signed-off-by: xing0821 <54933318+xing0821@users.noreply.github.com> --- db.go | 9 +++++++++ freelist.go | 1 + freelist_hmap.go | 10 +++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/db.go b/db.go index 65ef209df..93a3bb329 100644 --- a/db.go +++ b/db.go @@ -15,6 +15,9 @@ import ( "go.etcd.io/bbolt/internal/common" ) +// When enabled, the database will perform assert function to check the slow-path code +var assertVerify = os.Getenv("BBOLT_VERIFY") == "true" + // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond @@ -1309,3 +1312,9 @@ type Info struct { Data uintptr PageSize int } + +func _assertVerify(conditionFunc func() bool, msg string, v ...interface{}) { + if assertVerify && !conditionFunc() { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/freelist.go b/freelist.go index 29ac16c72..731d75c46 100644 --- a/freelist.go +++ b/freelist.go @@ -30,6 +30,7 @@ type freelist struct { freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size + freePagesCount uint64 // count of free pages(hashmap version) allocate func(txid common.Txid, n int) common.Pgid // the freelist allocate func free_count func() int // the function which gives you free page number mergeSpans func(ids common.Pgids) // the mergeSpan func diff --git a/freelist_hmap.go b/freelist_hmap.go index 57e1e950b..39f75eed4 100644 --- a/freelist_hmap.go +++ b/freelist_hmap.go @@ -8,7 +8,11 @@ import ( // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count + _assertVerify(func() bool { return int(f.freePagesCount) == f.hashmapFreeCountSlow() }, "freePagesCount is out of sync with free pages map") + return int(f.freePagesCount) +} + +func (f *freelist) hashmapFreeCountSlow() int { count := 0 for _, size := range f.forwardMap { count += int(size) @@ -142,6 +146,7 @@ func (f *freelist) addSpan(start common.Pgid, size uint64) { } f.freemaps[size][start] = struct{}{} + f.freePagesCount += size } func (f *freelist) delSpan(start common.Pgid, size uint64) { @@ -151,6 +156,7 @@ func (f *freelist) delSpan(start common.Pgid, size uint64) { if len(f.freemaps[size]) == 0 { delete(f.freemaps, size) } + f.freePagesCount -= size } // initial from pgids using when use hashmap version @@ -162,6 +168,8 @@ func (f *freelist) init(pgids []common.Pgid) { size := uint64(1) start := pgids[0] + // reset the counter when freelist init + f.freePagesCount = 0 if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { panic("pgids not sorted") From 804c83609e40dce815f427601510ff6765f90880 Mon Sep 17 00:00:00 2001 From: ncabatoff Date: Thu, 26 Oct 2023 09:04:25 -0400 Subject: [PATCH 141/439] Rework assertion to follow etcd approach. Enable assertions in tests. Signed-off-by: ncabatoff --- Makefile | 16 +++++----- db.go | 9 ------ freelist_hmap.go | 6 +++- freelist_test.go | 1 + internal/common/utils.go | 7 ---- internal/common/verify.go | 67 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 81 insertions(+), 25 deletions(-) create mode 100644 internal/common/verify.go diff --git a/Makefile b/Makefile index 1ff13c133..9ee21cae4 100644 --- a/Makefile +++ b/Makefile @@ -42,14 +42,14 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @@ -76,8 +76,8 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint diff --git a/db.go b/db.go index 93a3bb329..65ef209df 100644 --- a/db.go +++ b/db.go @@ -15,9 +15,6 @@ import ( "go.etcd.io/bbolt/internal/common" ) -// When enabled, the database will perform assert function to check the slow-path code -var assertVerify = os.Getenv("BBOLT_VERIFY") == "true" - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond @@ -1312,9 +1309,3 @@ type Info struct { Data uintptr PageSize int } - -func _assertVerify(conditionFunc func() bool, msg string, v ...interface{}) { - if assertVerify && !conditionFunc() { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/freelist_hmap.go b/freelist_hmap.go index 39f75eed4..0d38976a1 100644 --- a/freelist_hmap.go +++ b/freelist_hmap.go @@ -8,7 +8,11 @@ import ( // hashmapFreeCount returns count of free pages(hashmap version) func (f *freelist) hashmapFreeCount() int { - _assertVerify(func() bool { return int(f.freePagesCount) == f.hashmapFreeCountSlow() }, "freePagesCount is out of sync with free pages map") + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) return int(f.freePagesCount) } diff --git a/freelist_test.go b/freelist_test.go index 7297055b4..5cf40bd1c 100644 --- a/freelist_test.go +++ b/freelist_test.go @@ -448,6 +448,7 @@ func Test_freelist_hashmapGetFreePageIDs(t *testing.T) { val = rand.Int31n(1000) fm[common.Pgid(i)] = uint64(val) i += val + f.freePagesCount += uint64(val) } f.forwardMap = fm diff --git a/internal/common/utils.go b/internal/common/utils.go index c94e5c6bf..bdf82a7b0 100644 --- a/internal/common/utils.go +++ b/internal/common/utils.go @@ -7,13 +7,6 @@ import ( "unsafe" ) -// Assert will panic with a given formatted message if the given condition is false. -func Assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - func LoadBucket(buf []byte) *InBucket { return (*InBucket)(unsafe.Pointer(&buf[0])) } diff --git a/internal/common/verify.go b/internal/common/verify.go new file mode 100644 index 000000000..eac95e263 --- /dev/null +++ b/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} From 34b7ee830e37f20d90e9092ab7dc42c70a9639ba Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 26 Oct 2023 16:20:06 +0100 Subject: [PATCH 142/439] Add changelog items for 1.3.8 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index c13749df4..43dce8610 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -1,5 +1,17 @@ Note that we start to track changes starting from v1.3.7. +
+## v1.3.8(2023-10-26) + +### BoltDB +- Fix [db.close() doesn't unlock the db file if db.munnmap() fails](https://github.com/etcd-io/bbolt/pull/439). +- [Avoid syscall.Syscall use on OpenBSD](https://github.com/etcd-io/bbolt/pull/406). +- Fix [rollback panicking after mlock failed or both meta pages corrupted](https://github.com/etcd-io/bbolt/pull/444). +- Fix [bbolt panicking due to 64bit unaligned on arm32](https://github.com/etcd-io/bbolt/pull/584). + +### CMD +- [Update the usage of surgery command](https://github.com/etcd-io/bbolt/pull/411). +
## v1.3.7(2023-01-31) From 3be121f8b41730cbcce2e8b3616a241cec4c9a15 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 26 Oct 2023 19:36:49 +0100 Subject: [PATCH 143/439] update changelog format error Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 43dce8610..2f7c96e75 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -1,6 +1,7 @@ Note that we start to track changes starting from v1.3.7.
+ ## v1.3.8(2023-10-26) ### BoltDB From c90f37ee4afac2a212932d2d4770604355e8bd7c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:37:38 +0000 Subject: [PATCH 144/439] build(deps): Bump golang.org/x/sys from 0.13.0 to 0.14.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.13.0 to 0.14.0. - [Commits](https://github.com/golang/sys/compare/v0.13.0...v0.14.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d6fc2fbef..82624a930 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.4.0 - golang.org/x/sys v0.13.0 + golang.org/x/sys v0.14.0 ) require ( diff --git a/go.sum b/go.sum index 68124a57b..e5c9efe4f 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 839fc1cb7e3015cadca5941656d3e54e829627ca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Nov 2023 14:37:53 +0000 Subject: [PATCH 145/439] build(deps): Bump github.com/spf13/cobra from 1.7.0 to 1.8.0 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.7.0...v1.8.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d6fc2fbef..7ae2820df 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module go.etcd.io/bbolt go 1.21 require ( - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 diff --git a/go.sum b/go.sum index 68124a57b..f0bb5f77f 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -6,8 +6,8 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= From 393ee7b1f2fbc3b43ca0cf48b18c5b3a4d32ea92 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Nov 2023 09:39:37 +0000 Subject: [PATCH 146/439] build(deps): Bump golang.org/x/sync from 0.4.0 to 0.5.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.4.0 to 0.5.0. - [Commits](https://github.com/golang/sync/compare/v0.4.0...v0.5.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ddacc8bcb..26f2d2f52 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.4.0 + golang.org/x/sync v0.5.0 golang.org/x/sys v0.14.0 ) diff --git a/go.sum b/go.sum index 16fb32152..204bc990c 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= -golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From f56cd264018c9622d02d51d881a96dc8ad4678fa Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 6 Nov 2023 19:19:12 +0100 Subject: [PATCH 147/439] fix: change pageItemCommand default format auto Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 4 +- cmd/bbolt/main_test.go | 104 ++++++++++++++++++++++++++++++----------- 2 files changed, 78 insertions(+), 30 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 7115471c0..357d25ffd 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -456,7 +456,7 @@ func (cmd *pageItemCommand) Run(args ...string) error { fs := flag.NewFlagSet("", flag.ContinueOnError) fs.BoolVar(&options.keyOnly, "key-only", false, "Print only the key") fs.BoolVar(&options.valueOnly, "value-only", false, "Print only the value") - fs.StringVar(&options.format, "format", "ascii-encoded", "Output format. One of: "+FORMAT_MODES) + fs.StringVar(&options.format, "format", "auto", "Output format. One of: "+FORMAT_MODES) fs.BoolVar(&options.help, "h", false, "") if err := fs.Parse(args); err != nil { return err @@ -606,7 +606,7 @@ Additional options include: --value-only Print only the value --format - Output format. One of: `+FORMAT_MODES+` (default=ascii-encoded) + Output format. One of: `+FORMAT_MODES+` (default=auto) page-item prints a page item key and value. `, "\n") diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index dbc7de762..fbf1eac31 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -4,6 +4,7 @@ import ( "bytes" crypto "crypto/rand" "encoding/binary" + "encoding/hex" "fmt" "io" "math/rand" @@ -14,12 +15,12 @@ import ( "testing" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/guts_cli" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" - "go.etcd.io/bbolt/internal/guts_cli" ) // Ensure the "info" command can print information about a database. @@ -135,35 +136,76 @@ func TestPageCommand_Run(t *testing.T) { } func TestPageItemCommand_Run(t *testing.T) { - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) - srcPath := db.Path() - - // Insert some sample data - t.Log("Insert some sample data") - err := db.Fill([]byte("data"), 1, 100, - func(tx int, k int) []byte { return []byte(fmt.Sprintf("key_%d", k)) }, - func(tx int, k int) []byte { return []byte(fmt.Sprintf("value_%d", k)) }, - ) - require.NoError(t, err) - - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + testCases := []struct { + name string + printable bool + itemId string + expectedKey string + expectedValue string + }{ + { + name: "printable items", + printable: true, + itemId: "0", + expectedKey: "key_0", + expectedValue: "value_0", + }, + { + name: "non printable items", + printable: false, + itemId: "0", + expectedKey: hex.EncodeToString(convertInt64IntoBytes(0 + 1)), + expectedValue: hex.EncodeToString(convertInt64IntoBytes(0 + 2)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + srcPath := db.Path() + + t.Log("Insert some sample data") + err := db.Update(func(tx *bolt.Tx) error { + b, bErr := tx.CreateBucketIfNotExists([]byte("data")) + if bErr != nil { + return bErr + } - meta := readMetaPage(t, srcPath) - leafPageId := 0 - for i := 2; i < int(meta.Pgid()); i++ { - p, _, err := guts_cli.ReadPage(srcPath, uint64(i)) - require.NoError(t, err) - if p.IsLeafPage() && p.Count() > 1 { - leafPageId = int(p.Id()) - } - } - require.NotEqual(t, 0, leafPageId) + for i := 0; i < 100; i++ { + if tc.printable { + if bErr = b.Put([]byte(fmt.Sprintf("key_%d", i)), []byte(fmt.Sprintf("value_%d", i))); bErr != nil { + return bErr + } + } else { + k, v := convertInt64IntoBytes(int64(i+1)), convertInt64IntoBytes(int64(i+2)) + if bErr = b.Put(k, v); bErr != nil { + return bErr + } + } + } + return nil + }) + require.NoError(t, err) + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + meta := readMetaPage(t, srcPath) + leafPageId := 0 + for i := 2; i < int(meta.Pgid()); i++ { + p, _, err := guts_cli.ReadPage(srcPath, uint64(i)) + require.NoError(t, err) + if p.IsLeafPage() && p.Count() > 1 { + leafPageId = int(p.Id()) + } + } + require.NotEqual(t, 0, leafPageId) - m := NewMain() - err = m.Run("page-item", db.Path(), fmt.Sprintf("%d", leafPageId), "0") - require.NoError(t, err) - if !strings.Contains(m.Stdout.String(), "key_0") || !strings.Contains(m.Stdout.String(), "value_0") { - t.Fatalf("Unexpected output:\n%s\n", m.Stdout.String()) + m := NewMain() + err = m.Run("page-item", db.Path(), fmt.Sprintf("%d", leafPageId), tc.itemId) + require.NoError(t, err) + if !strings.Contains(m.Stdout.String(), tc.expectedKey) || !strings.Contains(m.Stdout.String(), tc.expectedValue) { + t.Fatalf("Unexpected output:\n%s\n", m.Stdout.String()) + } + }) } } @@ -629,3 +671,9 @@ func requireDBNoChange(t *testing.T, oldData []byte, filePath string) { noChange := bytes.Equal(oldData, newData) require.True(t, noChange) } + +func convertInt64IntoBytes(num int64) []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(buf, num) + return buf[:n] +} From 987e20b0940f006443b4f849942d5c84ce9f457a Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 6 Nov 2023 19:35:46 +0100 Subject: [PATCH 148/439] fix: change getCommand default format auto Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 4 +-- cmd/bbolt/main_test.go | 79 ++++++++++++++++++++++++++++-------------- 2 files changed, 55 insertions(+), 28 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 357d25ffd..3f21a42ff 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -994,7 +994,7 @@ func (cmd *getCommand) Run(args ...string) error { var parseFormat string var format string fs.StringVar(&parseFormat, "parse-format", "ascii-encoded", "Input format. One of: ascii-encoded|hex (default: ascii-encoded)") - fs.StringVar(&format, "format", "bytes", "Output format. One of: "+FORMAT_MODES+" (default: bytes)") + fs.StringVar(&format, "format", "auto", "Output format. One of: "+FORMAT_MODES+" (default: auto)") help := fs.Bool("h", false, "") if err := fs.Parse(args); err != nil { return err @@ -1062,7 +1062,7 @@ Print the value of the given key in the given (sub)bucket. Additional options include: --format - Output format. One of: `+FORMAT_MODES+` (default=bytes) + Output format. One of: `+FORMAT_MODES+` (default=auto) --parse-format Input format (of key). One of: ascii-encoded|hex (default=ascii-encoded)" `, "\n") diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index fbf1eac31..abc57ccac 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -17,6 +17,7 @@ import ( "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/guts_cli" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" @@ -355,38 +356,64 @@ func TestKeysCommand_Run(t *testing.T) { // Ensure the "get" command can print the value of a key in a bucket. func TestGetCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) + testCases := []struct { + name string + printable bool + testBucket string + testKey string + expectedValue string + }{ + { + name: "printable data", + printable: true, + testBucket: "foo", + testKey: "foo-1", + expectedValue: "val-foo-1\n", + }, + { + name: "non printable data", + printable: false, + testBucket: "bar", + testKey: "100001", + expectedValue: hex.EncodeToString(convertInt64IntoBytes(100001)) + "\n", + }, + } - if err := db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{"foo", "bar"} { - b, err := tx.CreateBucket([]byte(name)) - if err != nil { - return err - } - for i := 0; i < 3; i++ { - key := fmt.Sprintf("%s-%d", name, i) - val := fmt.Sprintf("val-%s-%d", name, i) - if err := b.Put([]byte(key), []byte(val)); err != nil { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := btesting.MustCreateDB(t) + + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucket([]byte(tc.testBucket)) + if err != nil { return err } + if tc.printable { + val := fmt.Sprintf("val-%s", tc.testKey) + if err := b.Put([]byte(tc.testKey), []byte(val)); err != nil { + return err + } + } else { + if err := b.Put([]byte(tc.testKey), convertInt64IntoBytes(100001)); err != nil { + return err + } + } + return nil + }); err != nil { + t.Fatal(err) } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.Close() + db.Close() - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - expected := "val-foo-1\n" + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - // Run the command. - m := NewMain() - if err := m.Run("get", db.Path(), "foo", "foo-1"); err != nil { - t.Fatal(err) - } else if actual := m.Stdout.String(); actual != expected { - t.Fatalf("unexpected stdout:\n\n%s", actual) + // Run the command. + m := NewMain() + if err := m.Run("get", db.Path(), tc.testBucket, tc.testKey); err != nil { + t.Fatal(err) + } + actual := m.Stdout.String() + assert.Equal(t, tc.expectedValue, actual) + }) } } From 09dd42f057e73fd5a2f61d9d65da7508558763f8 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 6 Nov 2023 19:28:59 +0100 Subject: [PATCH 149/439] fix:change keysCommand default format auto Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 4 +- cmd/bbolt/main_test.go | 91 ++++++++++++++++++++++++++++++------------ 2 files changed, 67 insertions(+), 28 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 3f21a42ff..7ba8c8a7f 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -909,7 +909,7 @@ func newKeysCommand(m *Main) *keysCommand { func (cmd *keysCommand) Run(args ...string) error { // Parse flags. fs := flag.NewFlagSet("", flag.ContinueOnError) - optionsFormat := fs.String("format", "bytes", "Output format. One of: "+FORMAT_MODES+" (default: bytes)") + optionsFormat := fs.String("format", "auto", "Output format. One of: "+FORMAT_MODES+" (default: auto)") help := fs.Bool("h", false, "") if err := fs.Parse(args); err != nil { return err @@ -969,7 +969,7 @@ Print a list of keys in the given (sub)bucket. Additional options include: --format - Output format. One of: `+FORMAT_MODES+` (default=bytes) + Output format. One of: `+FORMAT_MODES+` (default=auto) Print a list of keys in the given bucket. `, "\n") diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index abc57ccac..e137db3e9 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -320,37 +320,68 @@ func TestBucketsCommand_Run(t *testing.T) { // Ensure the "keys" command can print a list of keys for a bucket. func TestKeysCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) + testCases := []struct { + name string + printable bool + testBucket string + expected string + }{ + { + name: "printable keys", + printable: true, + testBucket: "foo", + expected: "foo-0\nfoo-1\nfoo-2\n", + }, + { + name: "non printable keys", + printable: false, + testBucket: "bar", + expected: convertInt64KeysIntoHexString(100001, 100002, 100003), + }, + } - if err := db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{"foo", "bar"} { - b, err := tx.CreateBucket([]byte(name)) - if err != nil { - return err - } - for i := 0; i < 3; i++ { - key := fmt.Sprintf("%s-%d", name, i) - if err := b.Put([]byte(key), []byte{0}); err != nil { - return err + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Logf("creating test database for subtest '%s'", tc.name) + db := btesting.MustCreateDB(t) + + err := db.Update(func(tx *bolt.Tx) error { + t.Logf("creating test bucket %s", tc.testBucket) + b, bErr := tx.CreateBucketIfNotExists([]byte(tc.testBucket)) + if bErr != nil { + return fmt.Errorf("error creating test bucket %q: %v", tc.testBucket, bErr) } - } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.Close() - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + t.Logf("inserting test data into test bucket %s", tc.testBucket) + if tc.printable { + for i := 0; i < 3; i++ { + key := fmt.Sprintf("%s-%d", tc.testBucket, i) + if pErr := b.Put([]byte(key), []byte{0}); pErr != nil { + return pErr + } + } + } else { + for i := 100001; i < 100004; i++ { + k := convertInt64IntoBytes(int64(i)) + if pErr := b.Put(k, []byte{0}); pErr != nil { + return pErr + } + } + } + return nil + }) + require.NoError(t, err) + db.Close() - expected := "foo-0\nfoo-1\nfoo-2\n" + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - // Run the command. - m := NewMain() - if err := m.Run("keys", db.Path(), "foo"); err != nil { - t.Fatal(err) - } else if actual := m.Stdout.String(); actual != expected { - t.Fatalf("unexpected stdout:\n\n%s", actual) + t.Log("running Keys cmd") + m := NewMain() + kErr := m.Run("keys", db.Path(), tc.testBucket) + require.NoError(t, kErr) + actual := m.Stdout.String() + assert.Equal(t, tc.expected, actual) + }) } } @@ -704,3 +735,11 @@ func convertInt64IntoBytes(num int64) []byte { n := binary.PutVarint(buf, num) return buf[:n] } + +func convertInt64KeysIntoHexString(nums ...int64) string { + var res []string + for _, num := range nums { + res = append(res, hex.EncodeToString(convertInt64IntoBytes(num))) + } + return strings.Join(res, "\n") + "\n" // last newline char +} From 4d3043150334576c2e48b66c3b1c55594ffc8a74 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 8 Nov 2023 06:54:35 -0800 Subject: [PATCH 150/439] Enable arm64 runner Signed-off-by: Ivan Valdes --- .github/workflows/tests_arm64.yaml | 48 ++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 .github/workflows/tests_arm64.yaml diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml new file mode 100644 index 000000000..d6ecbc673 --- /dev/null +++ b/.github/workflows/tests_arm64.yaml @@ -0,0 +1,48 @@ +name: Tests ARM64 +on: [push, pull_request] +jobs: + test-linux: + strategy: + fail-fast: false + matrix: + target: + - linux-arm64-unit-test-1-cpu + - linux-arm64-unit-test-2-cpu + - linux-arm64-unit-test-4-cpu + - linux-arm64-unit-test-4-cpu-race + runs-on: actuated-arm64-8cpu-32gb + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.21.1" + - run: make fmt + - env: + TARGET: ${{ matrix.target }} + run: | + case "${TARGET}" in + linux-arm64-unit-test-1-cpu) + CPU=1 make test + ;; + linux-arm64-unit-test-2-cpu) + CPU=2 make test + ;; + linux-arm64-unit-test-4-cpu) + CPU=4 make test + ;; + linux-arm64-unit-test-4-cpu-race) + # XXX: By default, the Github Action runner will terminate the process + # if it has high resource usage. Try to use GOGC to limit memory and + # cpu usage here to prevent unexpected terminating. It can be replaced + # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x. + # + # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010 + GOGC=30 CPU=4 ENABLE_RACE=true make test + ;; + *) + echo "Failed to find target" + exit 1 + ;; + esac + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 From 6db6adcc12cb28a61639ade035d3aa14b64b22f9 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 8 Nov 2023 18:10:26 +0100 Subject: [PATCH 151/439] fix: cmd readme default format Signed-off-by: Mustafa Elbehery --- cmd/bbolt/README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index a3e7c169f..813c9feed 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -241,7 +241,7 @@ --value-only Print only the value --format - Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=ascii-encoded) + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=auto) ``` Example: @@ -269,7 +269,7 @@ Additional options include: --format - Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=bytes) + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=auto) ``` Example 1: @@ -303,7 +303,7 @@ Additional options include: --format - Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=bytes) + Output format. One of: auto|ascii-encoded|hex|bytes|redacted (default=auto) --parse-format Input format (of key). One of: ascii-encoded|hex (default=ascii-encoded)" ``` From e12728ab62738b5606bc934675a729ada5dbfcdc Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 9 Nov 2023 11:17:39 +0100 Subject: [PATCH 152/439] add: reusable github workflow template Signed-off-by: Mustafa Elbehery --- .github/workflows/failpoint_test.yaml | 1 - .github/workflows/tests-template.yml | 57 +++++++++++++++ .github/workflows/tests.yaml | 99 --------------------------- .github/workflows/tests_amd64.yaml | 16 +++++ .github/workflows/tests_arm64.yaml | 50 ++------------ .github/workflows/tests_windows.yml | 49 +++++++++++++ 6 files changed, 127 insertions(+), 145 deletions(-) create mode 100644 .github/workflows/tests-template.yml delete mode 100644 .github/workflows/tests.yaml create mode 100644 .github/workflows/tests_amd64.yaml create mode 100644 .github/workflows/tests_windows.yml diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 41da8afe5..f2c3576ff 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -15,4 +15,3 @@ jobs: - run: | make gofail-enable make test-failpoint - diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml new file mode 100644 index 000000000..568a6b01c --- /dev/null +++ b/.github/workflows/tests-template.yml @@ -0,0 +1,57 @@ +--- +name: Reusable unit test Workflow +on: + workflow_call: + inputs: + runs-on: + required: false + type: string + default: "['ubuntu-latest']" +permissions: read-all + +jobs: + test-linux: + strategy: + fail-fast: false + matrix: + target: + - linux-unit-test-1-cpu + - linux-unit-test-2-cpu + - linux-unit-test-4-cpu + - linux-unit-test-4-cpu-race + runs-on: ${{ fromJson(inputs.runs-on) }} + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.21.1" + - run: make fmt + - env: + TARGET: ${{ matrix.target }} + run: | + case "${TARGET}" in + linux-unit-test-1-cpu) + CPU=1 make test + ;; + linux-unit-test-2-cpu) + CPU=2 make test + ;; + linux-unit-test-4-cpu) + CPU=4 make test + ;; + linux-unit-test-4-cpu-race) + # XXX: By default, the Github Action runner will terminate the process + # if it has high resource usage. Try to use GOGC to limit memory and + # cpu usage here to prevent unexpected terminating. It can be replaced + # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x. + # + # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010 + GOGC=30 CPU=4 ENABLE_RACE=true make test + ;; + *) + echo "Failed to find target" + exit 1 + ;; + esac + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml deleted file mode 100644 index 8b4d41e6b..000000000 --- a/.github/workflows/tests.yaml +++ /dev/null @@ -1,99 +0,0 @@ -name: Tests -on: [push, pull_request] -jobs: - test-linux: - strategy: - fail-fast: false - matrix: - target: - - linux-amd64-unit-test-1-cpu - - linux-amd64-unit-test-2-cpu - - linux-amd64-unit-test-4-cpu - - linux-amd64-unit-test-4-cpu-race - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version: "1.21.1" - - run: make fmt - - env: - TARGET: ${{ matrix.target }} - run: | - case "${TARGET}" in - linux-amd64-unit-test-1-cpu) - CPU=1 make test - ;; - linux-amd64-unit-test-2-cpu) - CPU=2 make test - ;; - linux-amd64-unit-test-4-cpu) - CPU=4 make test - ;; - linux-amd64-unit-test-4-cpu-race) - # XXX: By default, the Github Action runner will terminate the process - # if it has high resource usage. Try to use GOGC to limit memory and - # cpu usage here to prevent unexpected terminating. It can be replaced - # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x. - # - # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010 - GOGC=30 CPU=4 ENABLE_RACE=true make test - ;; - *) - echo "Failed to find target" - exit 1 - ;; - esac - - name: golangci-lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 - - test-windows: - strategy: - fail-fast: false - matrix: - target: - - windows-amd64-unit-test-4-cpu - # FIXME(fuweid): - # - # The windows will throws the following error when enable race. - # We skip it until we have solution. - # - # ThreadSanitizer failed to allocate 0x000200000000 (8589934592) bytes at 0x0400c0000000 (error code: 1455) - # - #- windows-amd64-unit-test-4-cpu-race - runs-on: windows-latest - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version: "1.21.1" - - run: make fmt - - env: - TARGET: ${{ matrix.target }} - run: | - case "${TARGET}" in - windows-amd64-unit-test-4-cpu) - CPU=4 make test - ;; - *) - echo "Failed to find target" - exit 1 - ;; - esac - shell: bash - - name: golangci-lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 - - coverage: - needs: ["test-linux", "test-windows"] - strategy: - matrix: - os: [ubuntu-latest, windows-latest] - runs-on: ${{ matrix.os }} - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version: "1.21.1" - - run: make coverage - diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml new file mode 100644 index 000000000..6b51bcab8 --- /dev/null +++ b/.github/workflows/tests_amd64.yaml @@ -0,0 +1,16 @@ +name: Tests AMD64 +permissions: read-all +on: [push, pull_request] +jobs: + test-linux-amd64: + uses: ./.github/workflows/tests-template.yml + + coverage: + needs: ["test-linux-amd64"] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.21.1" + - run: make coverage diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index d6ecbc673..79db51792 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -1,48 +1,8 @@ name: Tests ARM64 +permissions: read-all on: [push, pull_request] jobs: - test-linux: - strategy: - fail-fast: false - matrix: - target: - - linux-arm64-unit-test-1-cpu - - linux-arm64-unit-test-2-cpu - - linux-arm64-unit-test-4-cpu - - linux-arm64-unit-test-4-cpu-race - runs-on: actuated-arm64-8cpu-32gb - steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v4 - with: - go-version: "1.21.1" - - run: make fmt - - env: - TARGET: ${{ matrix.target }} - run: | - case "${TARGET}" in - linux-arm64-unit-test-1-cpu) - CPU=1 make test - ;; - linux-arm64-unit-test-2-cpu) - CPU=2 make test - ;; - linux-arm64-unit-test-4-cpu) - CPU=4 make test - ;; - linux-arm64-unit-test-4-cpu-race) - # XXX: By default, the Github Action runner will terminate the process - # if it has high resource usage. Try to use GOGC to limit memory and - # cpu usage here to prevent unexpected terminating. It can be replaced - # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x. - # - # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010 - GOGC=30 CPU=4 ENABLE_RACE=true make test - ;; - *) - echo "Failed to find target" - exit 1 - ;; - esac - - name: golangci-lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + test-linux-arm64: + uses: ./.github/workflows/tests-template.yml + with: + runs-on: "['actuated-arm64-4cpu-16gb']" diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml new file mode 100644 index 000000000..4e45e0dad --- /dev/null +++ b/.github/workflows/tests_windows.yml @@ -0,0 +1,49 @@ +name: Tests +on: [push, pull_request] +jobs: + test-windows: + strategy: + fail-fast: false + matrix: + target: + - windows-amd64-unit-test-4-cpu + # FIXME(fuweid): + # + # The windows will throws the following error when enable race. + # We skip it until we have solution. + # + # ThreadSanitizer failed to allocate 0x000200000000 (8589934592) bytes at 0x0400c0000000 (error code: 1455) + # + #- windows-amd64-unit-test-4-cpu-race + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.21.1" + - run: make fmt + - env: + TARGET: ${{ matrix.target }} + run: | + case "${TARGET}" in + windows-amd64-unit-test-4-cpu) + CPU=4 make test + ;; + *) + echo "Failed to find target" + exit 1 + ;; + esac + shell: bash + - name: golangci-lint + uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + + coverage: + needs: ["test-windows" ] + runs-on: windows-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v4 + with: + go-version: "1.21.1" + - run: make coverage From f62e83b46dc764631c19361c6ded56e360de2144 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 13 Nov 2023 15:09:23 +0100 Subject: [PATCH 153/439] refactor goversion into one location Signed-off-by: Mustafa Elbehery --- .github/workflows/failpoint_test.yaml | 4 +++- .github/workflows/tests-template.yml | 4 +++- .github/workflows/tests_amd64.yaml | 4 +++- .github/workflows/tests_windows.yml | 8 ++++++-- .go-version | 1 + 5 files changed, 16 insertions(+), 5 deletions(-) create mode 100644 .go-version diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index f2c3576ff..46cafab6c 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,9 +9,11 @@ jobs: runs-on: ${{ matrix.os }} steps: - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@v4 with: - go-version: "1.21.1" + go-version: ${{ steps.goversion.outputs.goversion }} - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 568a6b01c..0306ca066 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -22,9 +22,11 @@ jobs: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@v4 with: - go-version: "1.21.1" + go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt - env: TARGET: ${{ matrix.target }} diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 6b51bcab8..5e04962cc 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -10,7 +10,9 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@v4 with: - go-version: "1.21.1" + go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 4e45e0dad..d0c0adf13 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -18,9 +18,11 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@v4 with: - go-version: "1.21.1" + go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt - env: TARGET: ${{ matrix.target }} @@ -43,7 +45,9 @@ jobs: runs-on: windows-latest steps: - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@v4 with: - go-version: "1.21.1" + go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.go-version b/.go-version new file mode 100644 index 000000000..284497740 --- /dev/null +++ b/.go-version @@ -0,0 +1 @@ +1.21.1 From e6a186d631c547f674036b65d291d98546e65615 Mon Sep 17 00:00:00 2001 From: arjunmalhotra1 Date: Mon, 13 Nov 2023 19:53:47 -0600 Subject: [PATCH 154/439] etcd: upgrade go version from 1.21.4 To keep etcd projects up to date with the latest patch releases & incorporate the latest security updates. Signed-off-by: arjunmalhotra1 --- .go-version | 2 +- cmd/bbolt/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 284497740..20a1265cf 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.1 +1.21.4 diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index 813c9feed..acd416c3d 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -72,7 +72,7 @@ ```bash $bbolt version bbolt version: 1.3.7 - Go Version: go1.21.1 + Go Version: go1.21.4 Go OS/Arch: darwin/arm64 ``` From a7cb1eeffefaf432150ccd2028d5459d57bad41e Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 15 Nov 2023 10:58:09 -0800 Subject: [PATCH 155/439] github wokflow: use large runner for race tests Remove the resource limits from `GOGC`, and set the GitHub workflow to run on a large runners for race tests (ubuntu 8 cores for amd64 and coverage tests; actuated 8 cores for arm64). Signed-off-by: Ivan Valdes --- .github/workflows/tests-template.yml | 22 ++++++++-------------- .github/workflows/tests_amd64.yaml | 11 +++++++++-- .github/workflows/tests_arm64.yaml | 7 ++++++- 3 files changed, 23 insertions(+), 17 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 0306ca066..401f6659b 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -6,7 +6,11 @@ on: runs-on: required: false type: string - default: "['ubuntu-latest']" + default: ubuntu-latest + targets: + required: false + type: string + default: "['linux-unit-test-1-cpu','linux-unit-test-2-cpu','linux-unit-test-4-cpu']" permissions: read-all jobs: @@ -14,12 +18,8 @@ jobs: strategy: fail-fast: false matrix: - target: - - linux-unit-test-1-cpu - - linux-unit-test-2-cpu - - linux-unit-test-4-cpu - - linux-unit-test-4-cpu-race - runs-on: ${{ fromJson(inputs.runs-on) }} + target: ${{ fromJSON(inputs.targets) }} + runs-on: ${{ inputs.runs-on }} steps: - uses: actions/checkout@v4 - id: goversion @@ -42,13 +42,7 @@ jobs: CPU=4 make test ;; linux-unit-test-4-cpu-race) - # XXX: By default, the Github Action runner will terminate the process - # if it has high resource usage. Try to use GOGC to limit memory and - # cpu usage here to prevent unexpected terminating. It can be replaced - # with GOMEMLIMIT=2048MiB if the go-version is updated to >=1.19.x. - # - # REF: https://github.com/actions/runner-images/issues/6680#issuecomment-1335778010 - GOGC=30 CPU=4 ENABLE_RACE=true make test + CPU=4 ENABLE_RACE=true make test ;; *) echo "Failed to find target" diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 5e04962cc..c81774650 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -4,10 +4,17 @@ on: [push, pull_request] jobs: test-linux-amd64: uses: ./.github/workflows/tests-template.yml + test-linux-amd64-race: + uses: ./.github/workflows/tests-template.yml + with: + runs-on: ubuntu-latest-8-cores + targets: "['linux-unit-test-4-cpu-race']" coverage: - needs: ["test-linux-amd64"] - runs-on: ubuntu-latest + needs: + - test-linux-amd64 + - test-linux-amd64-race + runs-on: ubuntu-latest-8-cores steps: - uses: actions/checkout@v4 - id: goversion diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index 79db51792..bb1cbb82e 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -5,4 +5,9 @@ jobs: test-linux-arm64: uses: ./.github/workflows/tests-template.yml with: - runs-on: "['actuated-arm64-4cpu-16gb']" + runs-on: actuated-arm64-4cpu-16gb + test-linux-arm64-race: + uses: ./.github/workflows/tests-template.yml + with: + runs-on: actuated-arm64-8cpu-16gb + targets: "['linux-unit-test-4-cpu-race']" From 607a0c3d08d799a5bb9f1faac859f55de8c12c82 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 20 Nov 2023 19:58:00 -0800 Subject: [PATCH 156/439] github workflow: only run arm64 for etcd-io/bbolt Skip running arm64 jobs that use actuated's runners for commits done in forked repositories. These jobs will not run and eventually fail in GitHub actions. Signed-off-by: Ivan Valdes --- .github/workflows/tests-template.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 401f6659b..72aa26199 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -15,6 +15,8 @@ permissions: read-all jobs: test-linux: + # this is to prevent arm64 jobs from running at forked projects + if: ${{ github.repository == 'etcd-io/bbolt' || startsWith(inputs.runs-on, 'ubuntu') }} strategy: fail-fast: false matrix: From b5aaaeba54becb92f7e6928ef01e11fb6f27f022 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 9 Nov 2023 15:30:31 +0100 Subject: [PATCH 157/439] fix: refactor findingLastBucket into a func Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 42 ++++++++++++++++++++++-------------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 7ba8c8a7f..ea284539e 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -939,19 +939,13 @@ func (cmd *keysCommand) Run(args ...string) error { // Print keys. return db.View(func(tx *bolt.Tx) error { // Find bucket. - var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) - if lastbucket == nil { - return berrors.ErrBucketNotFound - } - for _, bucket := range buckets[1:] { - lastbucket = lastbucket.Bucket([]byte(bucket)) - if lastbucket == nil { - return berrors.ErrBucketNotFound - } + lastBucket, err := findLastBucket(tx, buckets) + if err != nil { + return err } // Iterate over each key. - return lastbucket.ForEach(func(key, _ []byte) error { + return lastBucket.ForEach(func(key, _ []byte) error { return writelnBytes(cmd.Stdout, key, *optionsFormat) }) }) @@ -1030,19 +1024,13 @@ func (cmd *getCommand) Run(args ...string) error { // Print value. return db.View(func(tx *bolt.Tx) error { // Find bucket. - var lastbucket *bolt.Bucket = tx.Bucket([]byte(buckets[0])) - if lastbucket == nil { - return berrors.ErrBucketNotFound - } - for _, bucket := range buckets[1:] { - lastbucket = lastbucket.Bucket([]byte(bucket)) - if lastbucket == nil { - return berrors.ErrBucketNotFound - } + lastBucket, err := findLastBucket(tx, buckets) + if err != nil { + return err } // Find value for given key. - val := lastbucket.Get(key) + val := lastBucket.Get(key) if val == nil { return fmt.Errorf("Error %w for key: %q hex: \"%x\"", ErrKeyNotFound, key, string(key)) } @@ -1718,3 +1706,17 @@ func (_ cmdKvStringer) ValueToString(value []byte) string { func CmdKvStringer() bolt.KVStringer { return cmdKvStringer{} } + +func findLastBucket(tx *bolt.Tx, bucketNames []string) (*bolt.Bucket, error) { + var lastbucket *bolt.Bucket = tx.Bucket([]byte(bucketNames[0])) + if lastbucket == nil { + return nil, berrors.ErrBucketNotFound + } + for _, bucket := range bucketNames[1:] { + lastbucket = lastbucket.Bucket([]byte(bucket)) + if lastbucket == nil { + return nil, berrors.ErrBucketNotFound + } + } + return lastbucket, nil +} From 445a07b6cb837d1196f9fceb7655900031ca093f Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Sun, 26 Nov 2023 21:03:26 +0800 Subject: [PATCH 158/439] tx.go: introduce failpoint before fdatasync It can be used for power failure cases. Signed-off-by: Wei Fu --- tx.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tx.go b/tx.go index 8b86030c1..4e1f454f5 100644 --- a/tx.go +++ b/tx.go @@ -475,6 +475,7 @@ func (tx *Tx) write() error { // Ignore file sync if flag is set on DB. if !tx.db.NoSync || common.IgnoreNoSync { + // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { return err } @@ -512,6 +513,7 @@ func (tx *Tx) writeMeta() error { return err } if !tx.db.NoSync || common.IgnoreNoSync { + // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { return err } From 7d93161fbd712457c9aadc47c4017dd9a8b804a1 Mon Sep 17 00:00:00 2001 From: caojiamingalan Date: Tue, 23 May 2023 20:33:24 -0500 Subject: [PATCH 159/439] add logger Signed-off-by: caojiamingalan --- db.go | 13 ++++++ logger.go | 130 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ tx.go | 4 ++ 3 files changed, 147 insertions(+) create mode 100644 logger.go diff --git a/db.go b/db.go index b8487573e..2c5c694b3 100644 --- a/db.go +++ b/db.go @@ -116,6 +116,9 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + // Logger is the logger used for bbolt. + Logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -194,6 +197,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.MaxBatchDelay = common.DefaultMaxBatchDelay db.AllocSize = common.DefaultAllocSize + if options.Logger == nil { + db.Logger = getDiscardLogger() + } else { + db.Logger = options.Logger + } + flag := os.O_RDWR if options.ReadOnly { flag = os.O_RDONLY @@ -293,6 +302,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Mark the database as opened and return. + db.Logger.Debug("bbolt opened successfully") return db, nil } @@ -1268,6 +1278,9 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/logger.go b/logger.go new file mode 100644 index 000000000..29418aba8 --- /dev/null +++ b/logger.go @@ -0,0 +1,130 @@ +// Copyright 2015 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bbolt + +import ( + "fmt" + "io" + "log" + "os" + "sync" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + bboltLoggerMu.Lock() + defer bboltLoggerMu.Unlock() + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} + bboltLoggerMu sync.Mutex +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/tx.go b/tx.go index 4e1f454f5..a71dc06d4 100644 --- a/tx.go +++ b/tx.go @@ -32,6 +32,7 @@ type Tx struct { pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() + Logger Logger // WriteFlag specifies the flag for write-related methods like WriteTo(). // Tx opens the database file with the specified flag to copy the data. @@ -56,6 +57,8 @@ func (tx *Tx) init(db *DB) { tx.root.InBucket = &common.InBucket{} *tx.root.InBucket = *(tx.meta.RootBucket()) + tx.Logger = db.Logger + // Increment the transaction id and add a page cache for writable transactions. if tx.writable { tx.pages = make(map[common.Pgid]*common.Page) @@ -142,6 +145,7 @@ func (tx *Tx) OnCommit(fn func()) { // called on a read-only transaction. func (tx *Tx) Commit() error { common.Assert(!tx.managed, "managed tx commit not allowed") + tx.Logger.Debugf("committing tx") if tx.db == nil { return berrors.ErrTxClosed } else if !tx.writable { From 5ce15f0a8ac73512c5eac76a45d4d9c4a1fc192f Mon Sep 17 00:00:00 2001 From: caojiamingalan Date: Thu, 25 May 2023 21:37:14 -0500 Subject: [PATCH 160/439] add tx.ID() in log. Change the position of log. If we call tx.ID() before testing the nullity of tx.db, it may panic. Signed-off-by: caojiamingalan --- logger.go | 17 ----------------- tx.go | 2 +- 2 files changed, 1 insertion(+), 18 deletions(-) diff --git a/logger.go b/logger.go index 29418aba8..d27b26081 100644 --- a/logger.go +++ b/logger.go @@ -1,17 +1,3 @@ -// Copyright 2015 The etcd Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package bbolt import ( @@ -43,14 +29,11 @@ type Logger interface { } func getDiscardLogger() Logger { - bboltLoggerMu.Lock() - defer bboltLoggerMu.Unlock() return discardLogger } var ( discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} - bboltLoggerMu sync.Mutex ) const ( diff --git a/tx.go b/tx.go index a71dc06d4..f6cebf720 100644 --- a/tx.go +++ b/tx.go @@ -145,13 +145,13 @@ func (tx *Tx) OnCommit(fn func()) { // called on a read-only transaction. func (tx *Tx) Commit() error { common.Assert(!tx.managed, "managed tx commit not allowed") - tx.Logger.Debugf("committing tx") if tx.db == nil { return berrors.ErrTxClosed } else if !tx.writable { return berrors.ErrTxNotWritable } + tx.Logger.Infof("Committing transaction %d", tx.ID()) // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. // Rebalance nodes which have had deletions. From 9e8668ed74f8e41cd117b6da52054164ca014933 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 27 Nov 2023 17:22:15 +0100 Subject: [PATCH 161/439] logger: add logger to bbolt Signed-off-by: Mustafa Elbehery --- logger.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logger.go b/logger.go index d27b26081..fb250894a 100644 --- a/logger.go +++ b/logger.go @@ -1,11 +1,11 @@ package bbolt +// See https://github.com/etcd-io/raft/blob/main/logger.go import ( "fmt" "io" "log" "os" - "sync" ) type Logger interface { From bc9ab2f7462805ed402371222aa8ce478cbe483b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Dec 2023 14:28:54 +0000 Subject: [PATCH 162/439] build(deps): Bump golang.org/x/sys from 0.14.0 to 0.15.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.14.0 to 0.15.0. - [Commits](https://github.com/golang/sys/compare/v0.14.0...v0.15.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 26f2d2f52..e597edd70 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.5.0 - golang.org/x/sys v0.14.0 + golang.org/x/sys v0.15.0 ) require ( diff --git a/go.sum b/go.sum index 204bc990c..6634d6b66 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 9dd229558c4a044e200cf3b7ff1daf17d1970fc5 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 5 Dec 2023 19:34:16 -0800 Subject: [PATCH 163/439] Update go version to 1.21.5 Signed-off-by: Ivan Valdes --- .go-version | 2 +- cmd/bbolt/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 20a1265cf..ce2dd5357 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.4 +1.21.5 diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index acd416c3d..047b4977a 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -72,7 +72,7 @@ ```bash $bbolt version bbolt version: 1.3.7 - Go Version: go1.21.4 + Go Version: go1.21.5 Go OS/Arch: darwin/arm64 ``` From 173f1cfa7403b43bddb7ec3b8f94d8e66beb069f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 17 Nov 2023 20:20:04 +0000 Subject: [PATCH 164/439] update doc to clarify the behavior when removing key/value pair during iteration Signed-off-by: Benjamin Wang --- README.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.md b/README.md index 254f02785..06eec0e9c 100644 --- a/README.md +++ b/README.md @@ -445,6 +445,11 @@ When you have iterated to the end of the cursor then `Next()` will return a before calling `Next()` or `Prev()`. If you do not seek to a position then these functions will return a `nil` key. +If you remove key/value pairs during iteration, the cursor may automatically +move to the next position if present in current node each time removing a key. +When you call `c.Next()` after removing a key, it may skip one key/value pair. +Refer to [pull/611](https://github.com/etcd-io/bbolt/pull/611) to get more detailed info. + During iteration, if the key is non-`nil` but the value is `nil`, that means the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to access the sub-bucket. @@ -869,6 +874,12 @@ Here are a few things to note when evaluating and using Bolt: to grow. However, it's important to note that deleting large chunks of data will not allow you to reclaim that space on disk. +* Removing key/values pairs in a bucket during iteration on the bucket using + cursor may not work properly. Each time when removing a key/value pair, the + cursor may automatically move to the next position if present. When users + call `c.Next()` after removing a key, it may skip one key/value pair. + Refer to https://github.com/etcd-io/bbolt/pull/611 for more detailed info. + For more information on page allocation, [see this comment][page-allocation]. [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 From b05bec2fe3f4de1650daf868e507e686d86770a5 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Tue, 5 Dec 2023 21:29:07 +0800 Subject: [PATCH 165/439] tests/*: introduce go-dmflakey This commit is to add go-binding for go-dmflakey. It's used to simulate powerfailure with common filesystems. Signed-off-by: Wei Fu --- .github/workflows/failpoint_test.yaml | 1 + Makefile | 3 + tests/dmflakey/dmflakey.go | 322 ++++++++++++++++++++++++++ tests/dmflakey/dmflakey_test.go | 200 ++++++++++++++++ tests/dmflakey/dmsetup.go | 105 +++++++++ tests/dmflakey/loopback.go | 91 ++++++++ 6 files changed, 722 insertions(+) create mode 100644 tests/dmflakey/dmflakey.go create mode 100644 tests/dmflakey/dmflakey_test.go create mode 100644 tests/dmflakey/dmsetup.go create mode 100644 tests/dmflakey/loopback.go diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 46cafab6c..f5b79a9d4 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -15,5 +15,6 @@ jobs: with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | + sudo make root-test make gofail-enable make test-failpoint diff --git a/Makefile b/Makefile index 9ee21cae4..bab533445 100644 --- a/Makefile +++ b/Makefile @@ -81,3 +81,6 @@ test-failpoint: @echo "[failpoint] array freelist test" BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint +.PHONY: root-test # run tests that require root +root-test: + go test -v ${TESTFLAGS} ./tests/dmflakey -test.root diff --git a/tests/dmflakey/dmflakey.go b/tests/dmflakey/dmflakey.go new file mode 100644 index 000000000..d9bdf99a0 --- /dev/null +++ b/tests/dmflakey/dmflakey.go @@ -0,0 +1,322 @@ +//go:build linux + +package dmflakey + +import ( + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +type featCfg struct { + // SyncFS attempts to synchronize filesystem before inject failure. + syncFS bool + // interval is used to determine the up time for feature. + // + // For AllowWrites, it means that the device is available for `interval` seconds. + // For Other features, the device exhibits unreliable behaviour for + // `interval` seconds. + interval time.Duration +} + +// Default values. +const ( + // defaultImgSize is the default size for filesystem image. + defaultImgSize int64 = 1024 * 1024 * 1024 * 10 // 10 GiB + // defaultInterval is the default interval for the up time of feature. + defaultInterval = 2 * time.Minute +) + +// defaultFeatCfg is the default setting for flakey feature. +var defaultFeatCfg = featCfg{interval: defaultInterval} + +// FeatOpt is used to configure failure feature. +type FeatOpt func(*featCfg) + +// WithIntervalFeatOpt updates the up time for the feature. +func WithIntervalFeatOpt(interval time.Duration) FeatOpt { + return func(cfg *featCfg) { + cfg.interval = interval + } +} + +// WithSyncFSFeatOpt is to determine if the caller wants to synchronize +// filesystem before inject failure. +func WithSyncFSFeatOpt(syncFS bool) FeatOpt { + return func(cfg *featCfg) { + cfg.syncFS = syncFS + } +} + +// Flakey is to inject failure into device. +type Flakey interface { + // DevicePath returns the flakey device path. + DevicePath() string + + // Filesystem returns filesystem's type. + Filesystem() FSType + + // AllowWrites allows write I/O. + AllowWrites(opts ...FeatOpt) error + + // DropWrites drops all write I/O silently. + DropWrites(opts ...FeatOpt) error + + // ErrorWrites drops all write I/O and returns error. + ErrorWrites(opts ...FeatOpt) error + + // Teardown releases the flakey device. + Teardown() error +} + +// FSType represents the filesystem name. +type FSType string + +// Supported filesystems. +const ( + FSTypeEXT4 FSType = "ext4" + FSTypeXFS FSType = "xfs" +) + +// InitFlakey creates an filesystem on a loopback device and returns Flakey on it. +// +// The device-mapper device will be /dev/mapper/$flakeyDevice. And the filesystem +// image will be created at $dataStorePath/$flakeyDevice.img. By default, the +// device is available for 2 minutes and size is 10 GiB. +func InitFlakey(flakeyDevice, dataStorePath string, fsType FSType) (_ Flakey, retErr error) { + imgPath := filepath.Join(dataStorePath, fmt.Sprintf("%s.img", flakeyDevice)) + if err := createEmptyFSImage(imgPath, fsType); err != nil { + return nil, err + } + defer func() { + if retErr != nil { + os.RemoveAll(imgPath) + } + }() + + loopDevice, err := attachToLoopDevice(imgPath) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + _ = detachLoopDevice(loopDevice) + } + }() + + imgSize, err := getBlkSize(loopDevice) + if err != nil { + return nil, err + } + + if err := newFlakeyDevice(flakeyDevice, loopDevice, defaultInterval); err != nil { + return nil, err + } + + return &flakey{ + fsType: fsType, + imgPath: imgPath, + imgSize: imgSize, + + loopDevice: loopDevice, + flakeyDevice: flakeyDevice, + }, nil +} + +type flakey struct { + fsType FSType + imgPath string + imgSize int64 + + loopDevice string + flakeyDevice string +} + +// DevicePath returns the flakey device path. +func (f *flakey) DevicePath() string { + return fmt.Sprintf("/dev/mapper/%s", f.flakeyDevice) +} + +// Filesystem returns filesystem's type. +func (f *flakey) Filesystem() FSType { + return f.fsType +} + +// AllowWrites allows write I/O. +func (f *flakey) AllowWrites(opts ...FeatOpt) error { + var o = defaultFeatCfg + for _, opt := range opts { + opt(&o) + } + + // NOTE: Table parameters + // + // 0 imgSize flakey [ []] + // + // Mandatory parameters: + // + // : Full pathname to the underlying block-device, or a "major:minor" device-number. + // : Starting sector within the device. + // : Number of seconds device is available. + // : Number of seconds device returns errors. + // + // Optional: + // + // If no feature parameters are present, during the periods of unreliability, all I/O returns errors. + // + // For AllowWrites, the device will handle data correctly in `interval` seconds. + // + // REF: https://docs.kernel.org/admin-guide/device-mapper/dm-flakey.html. + table := fmt.Sprintf("0 %d flakey %s 0 %d 0", + f.imgSize, f.loopDevice, int(o.interval.Seconds())) + + return reloadFlakeyDevice(f.flakeyDevice, o.syncFS, table) +} + +// DropWrites drops all write I/O silently. +func (f *flakey) DropWrites(opts ...FeatOpt) error { + var o = defaultFeatCfg + for _, opt := range opts { + opt(&o) + } + + // NOTE: Table parameters + // + // 0 imgSize flakey [ []] + // + // Mandatory parameters: + // + // : Full pathname to the underlying block-device, or a "major:minor" device-number. + // : Starting sector within the device. + // : Number of seconds device is available. + // : Number of seconds device returns errors. + // + // Optional: + // + // : How many arguments (length of ) + // + // For DropWrites, + // + // num_features: 1 (there is only one argument) + // feature_arguments: drop_writes + // + // The Device will drop all the writes into disk in `interval` seconds. + // Read I/O is handled correctly. + // + // For example, the application calls fsync, all the dirty pages will + // be flushed into disk ideally. But during DropWrites, device will + // ignore all the data and return successfully. It can be used to + // simulate data-loss after power failure. + // + // REF: https://docs.kernel.org/admin-guide/device-mapper/dm-flakey.html. + table := fmt.Sprintf("0 %d flakey %s 0 0 %d 1 drop_writes", + f.imgSize, f.loopDevice, int(o.interval.Seconds())) + + return reloadFlakeyDevice(f.flakeyDevice, o.syncFS, table) +} + +// ErrorWrites drops all write I/O and returns error. +func (f *flakey) ErrorWrites(opts ...FeatOpt) error { + var o = defaultFeatCfg + for _, opt := range opts { + opt(&o) + } + + // NOTE: Table parameters + // + // 0 imgSize flakey [ []] + // + // Mandatory parameters: + // + // : Full pathname to the underlying block-device, or a "major:minor" device-number. + // : Starting sector within the device. + // : Number of seconds device is available. + // : Number of seconds device returns errors. + // + // Optional: + // + // : How many arguments (length of ) + // + // For ErrorWrites, + // + // num_features: 1 (there is only one argument) + // feature_arguments: error_writes + // + // The Device will drop all the writes into disk in `interval` seconds + // and return failure to caller. Read I/O is handled correctly. + // + // REF: https://docs.kernel.org/admin-guide/device-mapper/dm-flakey.html. + table := fmt.Sprintf("0 %d flakey %s 0 0 %d 1 error_writes", + f.imgSize, f.loopDevice, int(o.interval.Seconds())) + + return reloadFlakeyDevice(f.flakeyDevice, o.syncFS, table) +} + +// Teardown releases the flakey device. +func (f *flakey) Teardown() error { + if err := deleteFlakeyDevice(f.flakeyDevice); err != nil { + if !strings.Contains(err.Error(), "No such device or address") { + return err + } + } + if err := detachLoopDevice(f.loopDevice); err != nil { + if !errors.Is(err, unix.ENXIO) { + return err + } + } + return os.RemoveAll(f.imgPath) +} + +// createEmptyFSImage creates empty filesystem on dataStorePath folder with +// default size - 10 GiB. +func createEmptyFSImage(imgPath string, fsType FSType) error { + if err := validateFSType(fsType); err != nil { + return err + } + + mkfs, err := exec.LookPath(fmt.Sprintf("mkfs.%s", fsType)) + if err != nil { + return fmt.Errorf("failed to ensure mkfs.%s: %w", fsType, err) + } + + if _, err := os.Stat(imgPath); err == nil { + return fmt.Errorf("failed to create image because %s already exists", imgPath) + } + + f, err := os.Create(imgPath) + if err != nil { + return fmt.Errorf("failed to create image %s: %w", imgPath, err) + } + + if err = func() error { + defer f.Close() + + return f.Truncate(defaultImgSize) + }(); err != nil { + return fmt.Errorf("failed to truncate image %s with %v bytes: %w", + imgPath, defaultImgSize, err) + } + + output, err := exec.Command(mkfs, imgPath).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to mkfs.%s on %s (out: %s): %w", + fsType, imgPath, string(output), err) + } + return nil +} + +// validateFSType validates the fs type input. +func validateFSType(fsType FSType) error { + switch fsType { + case FSTypeEXT4, FSTypeXFS: + return nil + default: + return fmt.Errorf("unsupported filesystem %s", fsType) + } +} diff --git a/tests/dmflakey/dmflakey_test.go b/tests/dmflakey/dmflakey_test.go new file mode 100644 index 000000000..2cc1f8ea6 --- /dev/null +++ b/tests/dmflakey/dmflakey_test.go @@ -0,0 +1,200 @@ +//go:build linux + +package dmflakey + +import ( + "errors" + "flag" + "fmt" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +var enableRoot bool + +func init() { + flag.BoolVar(&enableRoot, "test.root", false, "enable tests that require root") +} + +func TestMain(m *testing.M) { + flag.Parse() + requiresRoot() + os.Exit(m.Run()) +} + +func requiresRoot() { + if !enableRoot { + fmt.Fprintln(os.Stderr, "Skip tests that require root") + os.Exit(0) + } + + if os.Getuid() != 0 { + fmt.Fprintln(os.Stderr, "This test must be run as root.") + os.Exit(1) + } +} + +func TestBasic(t *testing.T) { + tmpDir := t.TempDir() + + flakey, err := InitFlakey("go-dmflakey", tmpDir, FSTypeEXT4) + require.NoError(t, err, "init flakey") + defer func() { + assert.NoError(t, flakey.Teardown()) + }() + + target := filepath.Join(tmpDir, "root") + require.NoError(t, os.MkdirAll(target, 0600)) + + require.NoError(t, mount(target, flakey.DevicePath(), "")) + defer func() { + assert.NoError(t, unmount(target)) + }() + + file := filepath.Join(target, "test") + assert.NoError(t, writeFile(file, []byte("hello, world"), 0600, true)) + + assert.NoError(t, unmount(target)) + + assert.NoError(t, flakey.Teardown()) +} + +func TestDropWrites(t *testing.T) { + flakey, root := initFlakey(t, FSTypeEXT4) + + // commit=1000 is to delay commit triggered by writeback thread + require.NoError(t, mount(root, flakey.DevicePath(), "commit=1000")) + + // ensure testdir/f1 is synced. + target := filepath.Join(root, "testdir") + require.NoError(t, os.MkdirAll(target, 0600)) + + f1 := filepath.Join(target, "f1") + assert.NoError(t, writeFile(f1, []byte("hello, world from f1"), 0600, false)) + require.NoError(t, syncfs(f1)) + + // testdir/f2 is created but without fsync + f2 := filepath.Join(target, "f2") + assert.NoError(t, writeFile(f2, []byte("hello, world from f2"), 0600, false)) + + // simulate power failure + assert.NoError(t, flakey.DropWrites()) + assert.NoError(t, unmount(root)) + assert.NoError(t, flakey.AllowWrites()) + require.NoError(t, mount(root, flakey.DevicePath(), "")) + + data, err := os.ReadFile(f1) + assert.NoError(t, err) + assert.Equal(t, "hello, world from f1", string(data)) + + _, err = os.ReadFile(f2) + assert.True(t, errors.Is(err, os.ErrNotExist)) +} + +func TestErrorWrites(t *testing.T) { + flakey, root := initFlakey(t, FSTypeEXT4) + + // commit=1000 is to delay commit triggered by writeback thread + require.NoError(t, mount(root, flakey.DevicePath(), "commit=1000")) + + // inject IO failure on write + assert.NoError(t, flakey.ErrorWrites()) + + f1 := filepath.Join(root, "f1") + err := writeFile(f1, []byte("hello, world during failpoint"), 0600, true) + assert.ErrorContains(t, err, "input/output error") + + // resume + assert.NoError(t, flakey.AllowWrites()) + err = writeFile(f1, []byte("hello, world"), 0600, true) + assert.NoError(t, err) + + assert.NoError(t, unmount(root)) + require.NoError(t, mount(root, flakey.DevicePath(), "")) + + data, err := os.ReadFile(f1) + assert.NoError(t, err) + assert.Equal(t, "hello, world", string(data)) +} + +func initFlakey(t *testing.T, fsType FSType) (_ Flakey, root string) { + tmpDir := t.TempDir() + + target := filepath.Join(tmpDir, "root") + require.NoError(t, os.MkdirAll(target, 0600)) + + flakey, err := InitFlakey("go-dmflakey", tmpDir, FSTypeEXT4) + require.NoError(t, err, "init flakey") + + t.Cleanup(func() { + assert.NoError(t, unmount(target)) + assert.NoError(t, flakey.Teardown()) + }) + return flakey, target +} + +func writeFile(name string, data []byte, perm os.FileMode, sync bool) error { + f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + defer f.Close() + + if _, err = f.Write(data); err != nil { + return err + } + + if sync { + return f.Sync() + } + return nil +} + +func syncfs(file string) error { + f, err := os.Open(file) + if err != nil { + return fmt.Errorf("failed to open %s: %w", file, err) + } + defer f.Close() + + _, _, errno := unix.Syscall(unix.SYS_SYNCFS, uintptr(f.Fd()), 0, 0) + if errno != 0 { + return errno + } + return nil +} + +func mount(target string, devPath string, opt string) error { + args := []string{"-o", opt, devPath, target} + + output, err := exec.Command("mount", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to mount (args: %v) (out: %s): %w", + args, string(output), err) + } + return nil +} + +func unmount(target string) error { + for i := 0; i < 50; i++ { + if err := unix.Unmount(target, 0); err != nil { + switch err { + case unix.EBUSY: + time.Sleep(500 * time.Millisecond) + continue + case unix.EINVAL: + default: + return fmt.Errorf("failed to umount %s: %w", target, err) + } + } + return nil + } + return unix.EBUSY +} diff --git a/tests/dmflakey/dmsetup.go b/tests/dmflakey/dmsetup.go new file mode 100644 index 000000000..d1fe69876 --- /dev/null +++ b/tests/dmflakey/dmsetup.go @@ -0,0 +1,105 @@ +//go:build linux + +package dmflakey + +import ( + "fmt" + "os" + "os/exec" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// newFlakeyDevice creates flakey device. +// +// REF: https://docs.kernel.org/admin-guide/device-mapper/dm-flakey.html +func newFlakeyDevice(flakeyDevice, loopDevice string, interval time.Duration) error { + loopSize, err := getBlkSize(loopDevice) + if err != nil { + return fmt.Errorf("failed to get the size of the loop device %s: %w", loopDevice, err) + } + + // The flakey device will be available in interval.Seconds(). + table := fmt.Sprintf("0 %d flakey %s 0 %d 0", + loopSize, loopDevice, int(interval.Seconds())) + + args := []string{"create", flakeyDevice, "--table", table} + + output, err := exec.Command("dmsetup", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create flakey device %s with table %s (out: %s): %w", + flakeyDevice, table, string(output), err) + } + return nil +} + +// reloadFlakeyDevice reloads the flakey device with feature table. +func reloadFlakeyDevice(flakeyDevice string, syncFS bool, table string) (retErr error) { + args := []string{"suspend", "--nolockfs", flakeyDevice} + if syncFS { + args[1] = flakeyDevice + args = args[:len(args)-1] + } + + output, err := exec.Command("dmsetup", args...).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to suspend flakey device %s (out: %s): %w", + flakeyDevice, string(output), err) + } + + defer func() { + output, derr := exec.Command("dmsetup", "resume", flakeyDevice).CombinedOutput() + if derr != nil { + derr = fmt.Errorf("failed to resume flakey device %s (out: %s): %w", + flakeyDevice, string(output), derr) + } + + if retErr == nil { + retErr = derr + } + }() + + output, err = exec.Command("dmsetup", "load", flakeyDevice, "--table", table).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to reload flakey device %s with table (%s) (out: %s): %w", + flakeyDevice, table, string(output), err) + } + return nil +} + +// removeFlakeyDevice removes flakey device. +func deleteFlakeyDevice(flakeyDevice string) error { + output, err := exec.Command("dmsetup", "remove", flakeyDevice).CombinedOutput() + if err != nil { + return fmt.Errorf("failed to remove flakey device %s (out: %s): %w", + flakeyDevice, string(output), err) + } + return nil +} + +// getBlkSize64 gets device size in bytes (BLKGETSIZE64). +// +// REF: https://man7.org/linux/man-pages/man8/blockdev.8.html +func getBlkSize64(device string) (int64, error) { + deviceFd, err := os.Open(device) + if err != nil { + return 0, fmt.Errorf("failed to open device %s: %w", device, err) + } + defer deviceFd.Close() + + var size int64 + if _, _, err := unix.Syscall(unix.SYS_IOCTL, deviceFd.Fd(), unix.BLKGETSIZE64, uintptr(unsafe.Pointer(&size))); err != 0 { + return 0, fmt.Errorf("failed to get block size: %w", err) + } + return size, nil +} + +// getBlkSize gets size in 512-byte sectors (BLKGETSIZE64 / 512). +// +// REF: https://man7.org/linux/man-pages/man8/blockdev.8.html +func getBlkSize(device string) (int64, error) { + size, err := getBlkSize64(device) + return size / 512, err +} diff --git a/tests/dmflakey/loopback.go b/tests/dmflakey/loopback.go new file mode 100644 index 000000000..35e82cf8f --- /dev/null +++ b/tests/dmflakey/loopback.go @@ -0,0 +1,91 @@ +//go:build linux + +package dmflakey + +import ( + "errors" + "fmt" + "os" + "time" + + "golang.org/x/sys/unix" +) + +const ( + loopControlDevice = "/dev/loop-control" + loopDevicePattern = "/dev/loop%d" + + maxRetryToAttach = 50 +) + +// attachToLoopDevice associates free loop device with backing file. +// +// There might have race condition. It needs to retry when it runs into EBUSY. +// +// REF: https://man7.org/linux/man-pages/man4/loop.4.html +func attachToLoopDevice(backingFile string) (string, error) { + backingFd, err := os.OpenFile(backingFile, os.O_RDWR, 0) + if err != nil { + return "", fmt.Errorf("failed to open loop device's backing file %s: %w", + backingFile, err) + } + defer backingFd.Close() + + for i := 0; i < maxRetryToAttach; i++ { + loop, err := getFreeLoopDevice() + if err != nil { + return "", fmt.Errorf("failed to get free loop device: %w", err) + } + + err = func() error { + loopFd, err := os.OpenFile(loop, os.O_RDWR, 0) + if err != nil { + return err + } + defer loopFd.Close() + + return unix.IoctlSetInt(int(loopFd.Fd()), + unix.LOOP_SET_FD, int(backingFd.Fd())) + }() + if err != nil { + if errors.Is(err, unix.EBUSY) { + time.Sleep(500 * time.Millisecond) + continue + } + return "", err + } + return loop, nil + } + return "", fmt.Errorf("failed to associate free loop device with backing file %s after retry %v", + backingFile, maxRetryToAttach) +} + +// detachLoopDevice disassociates the loop device from any backing file. +// +// REF: https://man7.org/linux/man-pages/man4/loop.4.html +func detachLoopDevice(loopDevice string) error { + loopFd, err := os.Open(loopDevice) + if err != nil { + return fmt.Errorf("failed to open loop %s: %w", loopDevice, err) + } + defer loopFd.Close() + + return unix.IoctlSetInt(int(loopFd.Fd()), unix.LOOP_CLR_FD, 0) +} + +// getFreeLoopbackDevice allocates or finds a free loop device for use. +// +// REF: https://man7.org/linux/man-pages/man4/loop.4.html +func getFreeLoopDevice() (string, error) { + control, err := os.OpenFile(loopControlDevice, os.O_RDWR, 0) + if err != nil { + return "", fmt.Errorf("failed to open %s: %w", loopControlDevice, err) + } + + idx, err := unix.IoctlRetInt(int(control.Fd()), unix.LOOP_CTL_GET_FREE) + control.Close() + if err != nil { + return "", fmt.Errorf("failed to get free loop device number: %w", err) + } + return fmt.Sprintf(loopDevicePattern, idx), nil +} From 300e72ab8a0d3b730dbafb3b5463f00615bbfc9b Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Sat, 25 Nov 2023 17:02:28 +0800 Subject: [PATCH 166/439] tests/robustness: init with powerfailure case Add `Robustness Test` pipeline for robustness test cases. Signed-off-by: Wei Fu --- .github/workflows/failpoint_test.yaml | 1 - .github/workflows/robustness_test.yaml | 18 +++ Makefile | 5 +- tests/dmflakey/dmflakey_test.go | 22 +-- tests/robustness/main_test.go | 17 +++ tests/robustness/powerfailure_test.go | 194 +++++++++++++++++++++++++ tests/utils/helpers.go | 26 ++++ 7 files changed, 261 insertions(+), 22 deletions(-) create mode 100644 .github/workflows/robustness_test.yaml create mode 100644 tests/robustness/main_test.go create mode 100644 tests/robustness/powerfailure_test.go create mode 100644 tests/utils/helpers.go diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index f5b79a9d4..46cafab6c 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -15,6 +15,5 @@ jobs: with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | - sudo make root-test make gofail-enable make test-failpoint diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml new file mode 100644 index 000000000..9aca5249e --- /dev/null +++ b/.github/workflows/robustness_test.yaml @@ -0,0 +1,18 @@ +name: Robustness Test +on: [push, pull_request] +permissions: read-all +jobs: + test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@v4 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - run: | + make gofail-enable + # build bbolt with failpoint + go install ./cmd/bbolt + sudo -E PATH=$PATH make test-robustness diff --git a/Makefile b/Makefile index bab533445..f43b25b20 100644 --- a/Makefile +++ b/Makefile @@ -81,6 +81,7 @@ test-failpoint: @echo "[failpoint] array freelist test" BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint -.PHONY: root-test # run tests that require root -root-test: +.PHONY: test-robustness # Running robustness tests requires root permission +test-robustness: go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + go test -v ${TESTFLAGS} ./tests/robustness -test.root diff --git a/tests/dmflakey/dmflakey_test.go b/tests/dmflakey/dmflakey_test.go index 2cc1f8ea6..41c66db8d 100644 --- a/tests/dmflakey/dmflakey_test.go +++ b/tests/dmflakey/dmflakey_test.go @@ -12,35 +12,19 @@ import ( "testing" "time" + testutils "go.etcd.io/bbolt/tests/utils" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" ) -var enableRoot bool - -func init() { - flag.BoolVar(&enableRoot, "test.root", false, "enable tests that require root") -} - func TestMain(m *testing.M) { flag.Parse() - requiresRoot() + testutils.RequiresRoot() os.Exit(m.Run()) } -func requiresRoot() { - if !enableRoot { - fmt.Fprintln(os.Stderr, "Skip tests that require root") - os.Exit(0) - } - - if os.Getuid() != 0 { - fmt.Fprintln(os.Stderr, "This test must be run as root.") - os.Exit(1) - } -} - func TestBasic(t *testing.T) { tmpDir := t.TempDir() diff --git a/tests/robustness/main_test.go b/tests/robustness/main_test.go new file mode 100644 index 000000000..d83f32700 --- /dev/null +++ b/tests/robustness/main_test.go @@ -0,0 +1,17 @@ +//go:build linux + +package robustness + +import ( + "flag" + "os" + "testing" + + testutils "go.etcd.io/bbolt/tests/utils" +) + +func TestMain(m *testing.M) { + flag.Parse() + testutils.RequiresRoot() + os.Exit(m.Run()) +} diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go new file mode 100644 index 000000000..a1d0bc598 --- /dev/null +++ b/tests/robustness/powerfailure_test.go @@ -0,0 +1,194 @@ +//go:build linux + +package robustness + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "testing" + "time" + + "go.etcd.io/bbolt/tests/dmflakey" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sys/unix" +) + +// TestRestartFromPowerFailure is to test data after unexpected power failure. +func TestRestartFromPowerFailure(t *testing.T) { + flakey := initFlakeyDevice(t, t.Name(), dmflakey.FSTypeEXT4, "") + root := flakey.RootFS() + + dbPath := filepath.Join(root, "boltdb") + + args := []string{"bbolt", "bench", + "-work", // keep the database + "-path", dbPath, + "-count=1000000000", + "-batch-size=5", // separate total count into multiple truncation + } + + logPath := filepath.Join(t.TempDir(), fmt.Sprintf("%s.log", t.Name())) + logFd, err := os.Create(logPath) + require.NoError(t, err) + defer logFd.Close() + + fpURL := "127.0.0.1:12345" + + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdout = logFd + cmd.Stderr = logFd + cmd.Env = append(cmd.Env, "GOFAIL_HTTP="+fpURL) + t.Logf("start %s", strings.Join(args, " ")) + require.NoError(t, cmd.Start(), "args: %v", args) + + errCh := make(chan error, 1) + go func() { + errCh <- cmd.Wait() + }() + + defer func() { + if t.Failed() { + logData, err := os.ReadFile(logPath) + assert.NoError(t, err) + t.Logf("dump log:\n: %s", string(logData)) + } + }() + + time.Sleep(time.Duration(time.Now().UnixNano()%5+1) * time.Second) + t.Logf("simulate power failure") + + activeFailpoint(t, fpURL, "beforeSyncMetaPage", "panic") + + select { + case <-time.After(10 * time.Second): + t.Error("bbolt should stop with panic in seconds") + assert.NoError(t, cmd.Process.Kill()) + case err := <-errCh: + require.Error(t, err) + } + require.NoError(t, flakey.PowerFailure("")) + + st, err := os.Stat(dbPath) + require.NoError(t, err) + t.Logf("db size: %d", st.Size()) + + t.Logf("verify data") + output, err := exec.Command("bbolt", "check", dbPath).CombinedOutput() + require.NoError(t, err, "bbolt check output: %s", string(output)) +} + +// activeFailpoint actives the failpoint by http. +func activeFailpoint(t *testing.T, targetUrl string, fpName, fpVal string) { + u, err := url.Parse("http://" + path.Join(targetUrl, fpName)) + require.NoError(t, err, "parse url %s", targetUrl) + + req, err := http.NewRequest("PUT", u.String(), bytes.NewBuffer([]byte(fpVal))) + require.NoError(t, err) + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + data, err := io.ReadAll(resp.Body) + require.NoError(t, err) + require.Equal(t, 204, resp.StatusCode, "response body: %s", string(data)) +} + +// FlakeyDevice extends dmflakey.Flakey interface. +type FlakeyDevice interface { + // RootFS returns root filesystem. + RootFS() string + + // PowerFailure simulates power failure with drop all the writes. + PowerFailure(mntOpt string) error + + dmflakey.Flakey +} + +// initFlakeyDevice returns FlakeyDevice instance with a given filesystem. +func initFlakeyDevice(t *testing.T, name string, fsType dmflakey.FSType, mntOpt string) FlakeyDevice { + imgDir := t.TempDir() + + flakey, err := dmflakey.InitFlakey(name, imgDir, fsType) + require.NoError(t, err, "init flakey %s", name) + t.Cleanup(func() { + assert.NoError(t, flakey.Teardown()) + }) + + rootDir := t.TempDir() + err = unix.Mount(flakey.DevicePath(), rootDir, string(fsType), 0, mntOpt) + require.NoError(t, err, "init rootfs on %s", rootDir) + + t.Cleanup(func() { assert.NoError(t, unmountAll(rootDir)) }) + + return &flakeyT{ + Flakey: flakey, + + rootDir: rootDir, + mntOpt: mntOpt, + } +} + +type flakeyT struct { + dmflakey.Flakey + + rootDir string + mntOpt string +} + +// RootFS returns root filesystem. +func (f *flakeyT) RootFS() string { + return f.rootDir +} + +// PowerFailure simulates power failure with drop all the writes. +func (f *flakeyT) PowerFailure(mntOpt string) error { + if err := f.DropWrites(); err != nil { + return fmt.Errorf("failed to drop_writes: %w", err) + } + + if err := unmountAll(f.rootDir); err != nil { + return fmt.Errorf("failed to unmount rootfs %s: %w", f.rootDir, err) + } + + if mntOpt == "" { + mntOpt = f.mntOpt + } + + if err := f.AllowWrites(); err != nil { + return fmt.Errorf("failed to allow_writes: %w", err) + } + + if err := unix.Mount(f.DevicePath(), f.rootDir, string(f.Filesystem()), 0, mntOpt); err != nil { + return fmt.Errorf("failed to mount rootfs %s: %w", f.rootDir, err) + } + return nil +} + +func unmountAll(target string) error { + for i := 0; i < 50; i++ { + if err := unix.Unmount(target, 0); err != nil { + switch err { + case unix.EBUSY: + time.Sleep(500 * time.Millisecond) + continue + case unix.EINVAL: + return nil + default: + return fmt.Errorf("failed to umount %s: %w", target, err) + } + } + continue + } + return fmt.Errorf("failed to umount %s: %w", target, unix.EBUSY) +} diff --git a/tests/utils/helpers.go b/tests/utils/helpers.go new file mode 100644 index 000000000..f9c87f6e5 --- /dev/null +++ b/tests/utils/helpers.go @@ -0,0 +1,26 @@ +package utils + +import ( + "flag" + "fmt" + "os" +) + +var enableRoot bool + +func init() { + flag.BoolVar(&enableRoot, "test.root", false, "enable tests that require root") +} + +// RequiresRoot requires root and the test.root flag has been set. +func RequiresRoot() { + if !enableRoot { + fmt.Fprintln(os.Stderr, "Skip tests that require root") + os.Exit(0) + } + + if os.Getuid() != 0 { + fmt.Fprintln(os.Stderr, "This test must be run as root.") + os.Exit(1) + } +} From 5ff325a6a3dda494964f780f6a8d3f0fe487d84c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 14:46:21 +0000 Subject: [PATCH 167/439] build(deps): Bump actions/setup-go from 4 to 5 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_test.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 46cafab6c..ea7e44425 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index 9aca5249e..b1980ac65 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -8,7 +8,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 72aa26199..74d6aa1d4 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index c81774650..c4cc01ac9 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index d0c0adf13..de6a8569b 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -47,7 +47,7 @@ jobs: - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v4 + - uses: actions/setup-go@v5 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From 82cf0eda8ce45a6ec9c06afd212a99a61a5b082d Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 7 Dec 2023 14:56:53 -0800 Subject: [PATCH 168/439] github workflow: enable workflow approval Allow to run GitHub workflows for Pull Requests that have the ok-to-test label. Signed-off-by: Ivan Valdes --- .github/workflows/gh-workflow-approve.yaml | 41 ++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 .github/workflows/gh-workflow-approve.yaml diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml new file mode 100644 index 000000000..dcdff6dbc --- /dev/null +++ b/.github/workflows/gh-workflow-approve.yaml @@ -0,0 +1,41 @@ +--- +name: Approve GitHub Workflows + +on: + pull_request_target: + types: + - labeled + - synchronize + branches: + - main + +jobs: + approve: + name: Approve ok-to-test + if: contains(github.event.pull_request.labels.*.name, 'ok-to-test') + runs-on: ubuntu-latest + permissions: + actions: write + steps: + - name: Update PR + uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + debug: ${{ secrets.ACTIONS_RUNNER_DEBUG == 'true' }} + script: | + const result = await github.rest.actions.listWorkflowRunsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + event: "pull_request", + status: "action_required", + head_sha: context.payload.pull_request.head.sha, + per_page: 100 + }); + for (var run of result.data.workflow_runs) { + await github.rest.actions.approveWorkflowRun({ + owner: context.repo.owner, + repo: context.repo.repo, + run_id: run.id + }); + } From 324df9cd264b7f2c4504ea61758a4ea89373e4ce Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Tue, 12 Dec 2023 19:44:03 +0800 Subject: [PATCH 169/439] *: introduce failpoint beforeBucketPut Signed-off-by: Wei Fu --- bucket.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bucket.go b/bucket.go index f9f23812f..4aa04bea6 100644 --- a/bucket.go +++ b/bucket.go @@ -336,6 +336,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { return errors.ErrIncompatibleValue } + // gofail: var beforeBucketPut struct{} + // Insert into node. // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent // it from being marked as leaking, and accordingly cannot be allocated on stack. From 1b080787075bb3ec06a414c754da9ef66bda4071 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Tue, 12 Dec 2023 21:40:08 +0800 Subject: [PATCH 170/439] tests/robustness: add issue72 reproducer Signed-off-by: Wei Fu --- tests/failpoint/db_failpoint_test.go | 115 +++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index c1da5b583..3255ba21c 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -155,3 +155,118 @@ func TestFailpoint_LackOfDiskSpace(t *testing.T) { require.Error(t, err) require.ErrorIs(t, err, errors.ErrTxClosed) } + +// TestIssue72 reproduces issue 72. +// +// When bbolt is processing a `Put` invocation, the key might be concurrently +// updated by the application which calls the `Put` API (although it shouldn't). +// It might lead to a situation that bbolt use an old key to find a proper +// position to insert the key/value pair, but actually inserts a new key. +// Eventually it might break the rule that all keys should be sorted. In a +// worse case, it might cause page elements to point to already freed pages. +// +// REF: https://github.com/etcd-io/bbolt/issues/72 +func TestIssue72(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + + bucketName := []byte(t.Name()) + err := db.Update(func(tx *bolt.Tx) error { + _, txerr := tx.CreateBucket(bucketName) + return txerr + }) + require.NoError(t, err) + + // The layout is like: + // + // +--+--+--+ + // +------+1 |3 |10+---+ + // | +-++--+--+ | + // | | | + // | | | + // +v-+--+ +v-+--+ +-v+--+--+ + // |1 |2 | |3 |4 | |10|11|12| + // +--+--+ +--+--+ +--+--+--+ + // + err = db.Update(func(tx *bolt.Tx) error { + bk := tx.Bucket(bucketName) + + for _, id := range []int{1, 2, 3, 4, 10, 11, 12} { + if txerr := bk.Put(idToBytes(id), make([]byte, 1000)); txerr != nil { + return txerr + } + } + return nil + }) + require.NoError(t, err) + + require.NoError(t, gofail.Enable("beforeBucketPut", `sleep(5000)`)) + + // +--+--+--+ + // +------+1 |3 |1 +---+ + // | +-++--+--+ | + // | | | + // | | | + // +v-+--+ +v-+--+ +-v+--+--+--+ + // |1 |2 | |3 |4 | |1 |10|11|12| + // +--+--+ +--+--+ +--+--+--+--+ + // + key := idToBytes(13) + updatedKey := idToBytes(1) + err = db.Update(func(tx *bolt.Tx) error { + bk := tx.Bucket(bucketName) + + go func() { + time.Sleep(3 * time.Second) + copy(key, updatedKey) + }() + return bk.Put(key, make([]byte, 100)) + }) + require.NoError(t, err) + + require.NoError(t, gofail.Disable("beforeBucketPut")) + + // bbolt inserts 100 into last branch page. Since there are two `1` + // keys in branch, spill operation will update first `1` pointer and + // then last one won't be updated and continues to point to freed page. + // + // + // +--+--+--+ + // +---------------+1 |3 |1 +---------+ + // | +--++-+--+ | + // | | | + // | | | + // | +--+--+ +v-+--+ +-----v-----+ + // | |1 |2 | |3 |4 | |freed page | + // | +--+--+ +--+--+ +-----------+ + // | + // +v-+--+--+--+---+ + // |1 |10|11|12|100| + // +--+--+--+--+---+ + err = db.Update(func(tx *bolt.Tx) error { + return tx.Bucket(bucketName).Put(idToBytes(100), make([]byte, 100)) + }) + require.NoError(t, err) + + defer func() { + if r := recover(); r != nil { + t.Logf("panic info:\n %v", r) + } + }() + + // Add more keys to ensure branch node to spill. + err = db.Update(func(tx *bolt.Tx) error { + bk := tx.Bucket(bucketName) + + for _, id := range []int{101, 102, 103, 104, 105} { + if txerr := bk.Put(idToBytes(id), make([]byte, 1000)); txerr != nil { + return txerr + } + } + return nil + }) + require.NoError(t, err) +} + +func idToBytes(id int) []byte { + return []byte(fmt.Sprintf("%010d", id)) +} From a05ec68aaafcf77e22b9da83bd4069cad8cba39d Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Tue, 12 Dec 2023 21:53:05 +0800 Subject: [PATCH 171/439] bucket: copy key before Put Application might change key value after seeking and before real put. This unexpected behaviour could corrupt database. When users file issue, maintainers doesn't know application behaviour. It could be caused by data race. This patch is to prevent such case and save maintainers' time. Signed-off-by: Wei Fu --- bucket.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bucket.go b/bucket.go index 4aa04bea6..8e1a98ad6 100644 --- a/bucket.go +++ b/bucket.go @@ -327,21 +327,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { return errors.ErrValueTooLarge } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&common.BucketLeafFlag) != 0 { + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} - // Insert into node. - // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent - // it from being marked as leaking, and accordingly cannot be allocated on stack. - newKey := cloneBytes(key) c.node().put(newKey, newKey, value, 0, 0) return nil From a9cde4d6c81495c3cb7a29c2e8807da05b4a7a77 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Sat, 16 Dec 2023 13:34:33 -0800 Subject: [PATCH 172/439] github actions: add workflow telemetry to collect metrics Signed-off-by: Ivan Valdes --- .github/workflows/tests-template.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 74d6aa1d4..4d4c2e739 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,6 +23,8 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: + - name: Collect Workflow Telemetry + uses: catchpoint/workflow-telemetry-action@v1 - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" From 62d80260de277168b2d59779bfb03c6ebfda08f4 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Sun, 17 Dec 2023 21:47:49 +0800 Subject: [PATCH 173/439] *: copy key before comparing during CreateBucket It's follow-up of #637. Signed-off-by: Wei Fu --- bucket.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/bucket.go b/bucket.go index 8e1a98ad6..ba9d319e4 100644 --- a/bucket.go +++ b/bucket.go @@ -154,12 +154,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { return nil, errors.ErrBucketNameRequired } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if there is an existing key. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & common.BucketLeafFlag) != 0 { return nil, errors.ErrBucketExists } @@ -174,10 +179,6 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { } var value = bucket.write() - // Insert into node. - // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent - // it from being marked as leaking, and accordingly cannot be allocated on stack. - newKey := cloneBytes(key) c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to From ac2258bb131224611a0a67f5b3e88f8aa0056b22 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Sun, 17 Dec 2023 22:00:18 +0800 Subject: [PATCH 174/439] copy key before seeking in CreateBucketIfNotExists It's follow-up of #637. Signed-off-by: Wei Fu --- bucket.go | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/bucket.go b/bucket.go index ba9d319e4..0d60a35e3 100644 --- a/bucket.go +++ b/bucket.go @@ -201,22 +201,27 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { return nil, errors.ErrBucketNameRequired } + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + if b.buckets != nil { - if child := b.buckets[string(key)]; child != nil { + if child := b.buckets[string(newKey)]; child != nil { return child, nil } } // Move cursor to correct position. c := b.Cursor() - k, v, flags := c.seek(key) + k, v, flags := c.seek(newKey) // Return an error if there is an existing non-bucket key. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & common.BucketLeafFlag) != 0 { var child = b.openBucket(v) if b.buckets != nil { - b.buckets[string(key)] = child + b.buckets[string(newKey)] = child } return child, nil @@ -232,10 +237,6 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { } var value = bucket.write() - // Insert into node. - // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent - // it from being marked as leaking, and accordingly cannot be allocated on stack. - newKey := cloneBytes(key) c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to From 70aa6e651d9d0eb08603d8a8c96b0260fef5e4a7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 14:30:46 +0000 Subject: [PATCH 175/439] build(deps): Bump actions/github-script from 6.4.1 to 7.0.1 Bumps [actions/github-script](https://github.com/actions/github-script) from 6.4.1 to 7.0.1. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/d7906e4ad0b1822421a7e6a35d5ca353c962f410...60a0d83039c74a4aee543508d2ffcb1c3799cdea) --- updated-dependencies: - dependency-name: actions/github-script dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/gh-workflow-approve.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml index dcdff6dbc..fa1fdd12d 100644 --- a/.github/workflows/gh-workflow-approve.yaml +++ b/.github/workflows/gh-workflow-approve.yaml @@ -18,7 +18,7 @@ jobs: actions: write steps: - name: Update PR - uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410 # v6.4.1 + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 continue-on-error: true with: github-token: ${{ secrets.GITHUB_TOKEN }} From e4ab08e221901c98349255c26f2262ef80bfac92 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Mon, 18 Dec 2023 23:44:18 +0800 Subject: [PATCH 176/439] CHANGELOG: update for v1.3.9 - [bucket: allow to allocate key on stack in Put()](https://github.com/etcd-io/bbolt/pull/550) - [bucket.Put: copy key before seek](https://github.com/etcd-io/bbolt/pull/637) - [copy key before comparing during CreateBucket or CreateBucketIfNotExists](https://github.com/etcd-io/bbolt/pull/641) Signed-off-by: Wei Fu --- CHANGELOG/CHANGELOG-1.3.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 2f7c96e75..ee1eedec9 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -1,5 +1,10 @@ Note that we start to track changes starting from v1.3.7. +## v1.3.9(TBD) + +### BoltDB +- [Clone the key before operating data in bucket against the key](https://github.com/etcd-io/bbolt/pull/639) +
## v1.3.8(2023-10-26) From 052862c2612b6c686c8535aecea8bff6da39828b Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 21 Dec 2023 23:24:18 +0800 Subject: [PATCH 177/439] OWNERS: add fuweid as reviewer Signed-off-by: Wei Fu --- OWNERS | 2 ++ 1 file changed, 2 insertions(+) diff --git a/OWNERS b/OWNERS index 412b742ed..7f8eb86ab 100644 --- a/OWNERS +++ b/OWNERS @@ -6,3 +6,5 @@ approvers: - serathius # Marek Siarkowicz - ptabor # Piotr Tabor - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu From 23c97d55e67b31641a01ca8c6ff0b4c1a10d4cba Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 27 Dec 2023 15:32:54 +0000 Subject: [PATCH 178/439] refactor the implementation of check Signed-off-by: Benjamin Wang --- db.go | 2 +- tx_check.go | 62 ++++++++++++++++++++++++++++------------------------- 2 files changed, 34 insertions(+), 30 deletions(-) diff --git a/db.go b/db.go index 2c5c694b3..39fce45ea 100644 --- a/db.go +++ b/db.go @@ -1206,7 +1206,7 @@ func (db *DB) freepages() []common.Pgid { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. diff --git a/tx_check.go b/tx_check.go index cc08013e8..ed8840ec0 100644 --- a/tx_check.go +++ b/tx_check.go @@ -57,7 +57,7 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(&tx.root, reachable, freed, kvStringer, ch) // Ensure all pages below high water mark are either reachable or freed. for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { @@ -71,7 +71,7 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { close(ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { // Ignore inline buckets. if b.RootPage() == 0 { @@ -80,52 +80,56 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[common.Pgid]*common.Page, fre // Check every page used by this bucket. b.tx.forEachPage(b.RootPage(), func(p *common.Page, _ int, stack []common.Pgid) { - if p.Id() > tx.meta.Pgid() { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(b.tx.meta.Pgid()), stack) - } - - // Ensure each page is only referenced once. - for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { - var id = p.Id() + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.Id()] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) - } else if !p.IsBranchPage() && !p.IsLeafPage() { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) - } + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) }) - tx.recursivelyCheckPages(b.RootPage(), kvStringer.KeyToString, ch) + tx.recursivelyCheckPageKeyOrder(b.RootPage(), kvStringer.KeyToString, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { @@ -143,7 +147,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( if i < len(p.BranchPageElements())-1 { maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree From 7cab486ea2ac6874a2b6f60f6da857f25a9c834f Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 28 Dec 2023 09:09:23 +1300 Subject: [PATCH 179/439] Reduce arm64 runner memory allocation to 8GB. Based on memory consumption statistics for historic runs. Signed-off-by: James Blair --- .github/workflows/tests_arm64.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index bb1cbb82e..9ab60789f 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -5,9 +5,9 @@ jobs: test-linux-arm64: uses: ./.github/workflows/tests-template.yml with: - runs-on: actuated-arm64-4cpu-16gb + runs-on: actuated-arm64-4cpu-8gb test-linux-arm64-race: uses: ./.github/workflows/tests-template.yml with: - runs-on: actuated-arm64-8cpu-16gb + runs-on: actuated-arm64-8cpu-8gb targets: "['linux-unit-test-4-cpu-race']" From 53450a9f2be4a9427c6fb1d987a4d139543f39aa Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 28 Dec 2023 09:12:31 +1300 Subject: [PATCH 180/439] Fix yamllint errors in workflow files. Signed-off-by: James Blair --- .github/workflows/failpoint_test.yaml | 1 + .github/workflows/tests_amd64.yaml | 1 + .github/workflows/tests_arm64.yaml | 1 + .github/workflows/tests_windows.yml | 5 +++-- 4 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index ea7e44425..4de9c5008 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -1,3 +1,4 @@ +--- name: Failpoint test on: [push, pull_request] permissions: read-all diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index c4cc01ac9..c174565ca 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -1,3 +1,4 @@ +--- name: Tests AMD64 permissions: read-all on: [push, pull_request] diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index 9ab60789f..6498c0c0f 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -1,3 +1,4 @@ +--- name: Tests ARM64 permissions: read-all on: [push, pull_request] diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index de6a8569b..cf9b73f9d 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -1,3 +1,4 @@ +--- name: Tests on: [push, pull_request] jobs: @@ -14,7 +15,7 @@ jobs: # # ThreadSanitizer failed to allocate 0x000200000000 (8589934592) bytes at 0x0400c0000000 (error code: 1455) # - #- windows-amd64-unit-test-4-cpu-race + # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - uses: actions/checkout@v4 @@ -41,7 +42,7 @@ jobs: uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 coverage: - needs: ["test-windows" ] + needs: ["test-windows"] runs-on: windows-latest steps: - uses: actions/checkout@v4 From 2cd6213e4aef8b1b06175ac36c690fd4a96c6caf Mon Sep 17 00:00:00 2001 From: James Blair Date: Thu, 28 Dec 2023 09:14:03 +1300 Subject: [PATCH 181/439] Remove workflow telemetry to reduce noise. Signed-off-by: James Blair --- .github/workflows/tests-template.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 4d4c2e739..74d6aa1d4 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,8 +23,6 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - name: Collect Workflow Telemetry - uses: catchpoint/workflow-telemetry-action@v1 - uses: actions/checkout@v4 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" From 4c7075efe6290fdf65bb9af2d5d9a80942f379be Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 7 Dec 2023 16:03:05 +0000 Subject: [PATCH 182/439] add log messages Signed-off-by: Benjamin Wang --- bucket.go | 60 +++++++++++++++++++++++++--- db.go | 114 ++++++++++++++++++++++++++++++++++++++++++------------ tx.go | 48 +++++++++++++++++------ 3 files changed, 180 insertions(+), 42 deletions(-) diff --git a/bucket.go b/bucket.go index 0d60a35e3..d9b384a2a 100644 --- a/bucket.go +++ b/bucket.go @@ -145,7 +145,16 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + lg := b.tx.db.Logger() + lg.Debugf("Creating bucket %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", string(key), err) + } else { + lg.Debugf("Creating bucket %q successfully", string(key)) + } + }() if b.tx.db == nil { return nil, errors.ErrTxClosed } else if !b.tx.writable { @@ -192,7 +201,17 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + lg := b.tx.db.Logger() + lg.Debugf("Creating bucket if not exist %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", string(key), err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", string(key)) + } + }() + if b.tx.db == nil { return nil, errors.ErrTxClosed } else if !b.tx.writable { @@ -249,7 +268,17 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Deleting bucket %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", string(key), err) + } else { + lg.Debugf("Deleting bucket %q successfully", string(key)) + } + }() + if b.tx.db == nil { return errors.ErrTxClosed } else if !b.Writable() { @@ -269,7 +298,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { // Recursively delete all child buckets. child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -316,7 +345,16 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Putting key %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", string(key), err) + } else { + lg.Debugf("Putting key %q successfully", string(key)) + } + }() if b.tx.db == nil { return errors.ErrTxClosed } else if !b.Writable() { @@ -353,7 +391,17 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Deleting key %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", string(key), err) + } else { + lg.Debugf("Deleting key %q successfully", string(key)) + } + }() + if b.tx.db == nil { return errors.ErrTxClosed } else if !b.Writable() { diff --git a/db.go b/db.go index 39fce45ea..db9dbafed 100644 --- a/db.go +++ b/db.go @@ -116,8 +116,7 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool - // Logger is the logger used for bbolt. - Logger Logger + logger Logger path string openFile func(string, int, os.FileMode) (*os.File, error) @@ -176,10 +175,11 @@ func (db *DB) String() string { // If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. // Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -198,11 +198,21 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.AllocSize = common.DefaultAllocSize if options.Logger == nil { - db.Logger = getDiscardLogger() + db.logger = getDiscardLogger() } else { - db.Logger = options.Logger + db.logger = options.Logger } + lg := db.Logger() + lg.Infof("Opening db file (%s) with mode %x and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + flag := os.O_RDWR if options.ReadOnly { flag = os.O_RDONLY @@ -219,9 +229,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -233,8 +243,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -247,23 +258,24 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, berrors.ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -275,8 +287,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -291,18 +304,18 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } // Mark the database as opened and return. - db.Logger.Debug("bbolt opened successfully") return db, nil } @@ -435,9 +448,13 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() + lg := db.Logger() + // Ensure the size is at least the minimum size. - fileSize, err := db.fileSize() + var fileSize int + fileSize, err = db.fileSize() if err != nil { + lg.Errorf("getting file size failed: %w", err) return err } var size = fileSize @@ -446,6 +463,7 @@ func (db *DB) mmap(minsz int) (err error) { } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -470,6 +488,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -500,6 +519,7 @@ func (db *DB) mmap(minsz int) (err error) { err0 := db.meta0.Validate() err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -522,6 +542,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: " + err.Error()) } @@ -569,6 +590,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: " + err.Error()) } return nil @@ -578,6 +600,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: " + err.Error()) } return nil @@ -628,9 +651,11 @@ func (db *DB) init() error { // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } @@ -713,13 +738,29 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + db.Logger().Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + db.Logger().Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + db.Logger().Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -1053,7 +1094,18 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + db.Logger().Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + db.Logger().Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1142,8 +1194,10 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. + lg := db.Logger() fileSize, err := db.fileSize() if err != nil { + lg.Errorf("getting file size failed: %w", err) return err } if sz <= fileSize { @@ -1165,10 +1219,12 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { @@ -1283,6 +1339,16 @@ type Options struct { Logger Logger } +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + +} + // DefaultOptions represent the options used if nil options are passed into Open(). // No timeout is used which will cause Bolt to wait indefinitely for a lock. var DefaultOptions = &Options{ diff --git a/tx.go b/tx.go index f6cebf720..8e624e7b2 100644 --- a/tx.go +++ b/tx.go @@ -5,6 +5,7 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" @@ -32,7 +33,6 @@ type Tx struct { pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() - Logger Logger // WriteFlag specifies the flag for write-related methods like WriteTo(). // Tx opens the database file with the specified flag to copy the data. @@ -57,8 +57,6 @@ func (tx *Tx) init(db *DB) { tx.root.InBucket = &common.InBucket{} *tx.root.InBucket = *(tx.meta.RootBucket()) - tx.Logger = db.Logger - // Increment the transaction id and add a page cache for writable transactions. if tx.writable { tx.pages = make(map[common.Pgid]*common.Page) @@ -68,6 +66,9 @@ func (tx *Tx) init(db *DB) { // ID returns the transaction id. func (tx *Tx) ID() int { + if tx == nil || tx.meta == nil { + return -1 + } return int(tx.meta.Txid()) } @@ -143,7 +144,18 @@ func (tx *Tx) OnCommit(fn func()) { // Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { return berrors.ErrTxClosed @@ -151,7 +163,6 @@ func (tx *Tx) Commit() error { return berrors.ErrTxNotWritable } - tx.Logger.Infof("Committing transaction %d", tx.ID()) // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. // Rebalance nodes which have had deletions. @@ -165,7 +176,8 @@ func (tx *Tx) Commit() error { // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } @@ -180,8 +192,9 @@ func (tx *Tx) Commit() error { } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { @@ -194,7 +207,8 @@ func (tx *Tx) Commit() error { // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -202,7 +216,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -212,11 +227,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -224,7 +239,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -418,8 +434,10 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { // allocate returns a contiguous block of memory starting at a given page. func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } @@ -436,6 +454,7 @@ func (tx *Tx) allocate(count int) (*common.Page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. + lg := tx.db.Logger() pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) @@ -459,6 +478,7 @@ func (tx *Tx) write() error { buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -481,6 +501,7 @@ func (tx *Tx) write() error { if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -508,17 +529,20 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) tx.meta.Write(p) // Write the meta page to file. if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } From b18e7931c576d68049994964e4c496f0d59a8230 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 31 Dec 2023 14:19:12 +0000 Subject: [PATCH 183/439] move the invariant properties into a common method Signed-off-by: Benjamin Wang --- tx_check.go | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tx_check.go b/tx_check.go index ed8840ec0..08a1a6eda 100644 --- a/tx_check.go +++ b/tx_check.go @@ -78,12 +78,7 @@ func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*commo return } - // Check every page used by this bucket. - b.tx.forEachPage(b.RootPage(), func(p *common.Page, _ int, stack []common.Pgid) { - verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) - }) - - tx.recursivelyCheckPageKeyOrder(b.RootPage(), kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { @@ -94,6 +89,15 @@ func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*commo }) } +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { if p.Id() > hwm { ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) From 4c3a80b2c06cb5a39eeabe422f718a1b6cfc7be2 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Mon, 1 Jan 2024 17:10:05 +0800 Subject: [PATCH 184/439] tests: Update TestRestartFromPowerFailure Update case with a combination of EXT4 filesystem's commit setting and unexpected exit event. That EXT4 filesystem's commit is to sync all its data and metadata every seconds. The kernel can help us sync even if that process has been killed. With different commit setting, we can simulate that case that kernel syncs half part of dirty pages before power failure. And for unexpected exit event, we can kill that process randomly or panic at failpoint instead of fixed code path. Signed-off-by: Wei Fu --- tests/dmflakey/dmflakey.go | 5 ++ tests/robustness/powerfailure_test.go | 85 +++++++++++++++++++++++++-- 2 files changed, 85 insertions(+), 5 deletions(-) diff --git a/tests/dmflakey/dmflakey.go b/tests/dmflakey/dmflakey.go index d9bdf99a0..25061a4cb 100644 --- a/tests/dmflakey/dmflakey.go +++ b/tests/dmflakey/dmflakey.go @@ -7,6 +7,7 @@ import ( "fmt" "os" "os/exec" + "path" "path/filepath" "strings" "time" @@ -289,6 +290,10 @@ func createEmptyFSImage(imgPath string, fsType FSType) error { return fmt.Errorf("failed to create image because %s already exists", imgPath) } + if err := os.MkdirAll(path.Dir(imgPath), 0600); err != nil { + return fmt.Errorf("failed to ensure parent directory %s: %w", path.Dir(imgPath), err) + } + f, err := os.Create(imgPath) if err != nil { return fmt.Errorf("failed to create image %s: %w", imgPath, err) diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index a1d0bc598..09ae88124 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -4,8 +4,11 @@ package robustness import ( "bytes" + "crypto/rand" "fmt" "io" + "math" + "math/big" "net/http" "net/url" "os" @@ -23,9 +26,65 @@ import ( "golang.org/x/sys/unix" ) +var panicFailpoints = []string{ + "beforeSyncDataPages", + "beforeSyncMetaPage", + "lackOfDiskSpace", + "mapError", + "resizeFileError", + "unmapError", +} + // TestRestartFromPowerFailure is to test data after unexpected power failure. func TestRestartFromPowerFailure(t *testing.T) { - flakey := initFlakeyDevice(t, t.Name(), dmflakey.FSTypeEXT4, "") + for _, tc := range []struct { + name string + du time.Duration + fsMountOpt string + useFailpoint bool + }{ + { + name: "fp_ext4_commit5s", + du: 5 * time.Second, + fsMountOpt: "commit=5", + useFailpoint: true, + }, + { + name: "fp_ext4_commit1s", + du: 10 * time.Second, + fsMountOpt: "commit=1", + useFailpoint: true, + }, + { + name: "fp_ext4_commit1000s", + du: 10 * time.Second, + fsMountOpt: "commit=1000", + useFailpoint: true, + }, + { + name: "kill_ext4_commit5s", + du: 5 * time.Second, + fsMountOpt: "commit=5", + }, + { + name: "kill_ext4_commit1s", + du: 10 * time.Second, + fsMountOpt: "commit=1", + }, + { + name: "kill_ext4_commit1000s", + du: 10 * time.Second, + fsMountOpt: "commit=1000", + }, + } { + t.Run(tc.name, func(t *testing.T) { + doPowerFailure(t, tc.du, tc.fsMountOpt, tc.useFailpoint) + }) + } +} + +func doPowerFailure(t *testing.T, du time.Duration, fsMountOpt string, useFailpoint bool) { + flakey := initFlakeyDevice(t, strings.Replace(t.Name(), "/", "_", -1), dmflakey.FSTypeEXT4, fsMountOpt) root := flakey.RootFS() dbPath := filepath.Join(root, "boltdb") @@ -38,6 +97,8 @@ func TestRestartFromPowerFailure(t *testing.T) { } logPath := filepath.Join(t.TempDir(), fmt.Sprintf("%s.log", t.Name())) + require.NoError(t, os.MkdirAll(path.Dir(logPath), 0600)) + logFd, err := os.Create(logPath) require.NoError(t, err) defer logFd.Close() @@ -64,10 +125,18 @@ func TestRestartFromPowerFailure(t *testing.T) { } }() - time.Sleep(time.Duration(time.Now().UnixNano()%5+1) * time.Second) + time.Sleep(du) t.Logf("simulate power failure") - activeFailpoint(t, fpURL, "beforeSyncMetaPage", "panic") + if useFailpoint { + fpURL = "http://" + fpURL + targetFp := panicFailpoints[randomInt(t, math.MaxInt32)%len(panicFailpoints)] + t.Logf("random pick failpoint: %s", targetFp) + activeFailpoint(t, fpURL, targetFp, "panic") + } else { + t.Log("kill bbolt") + assert.NoError(t, cmd.Process.Kill()) + } select { case <-time.After(10 * time.Second): @@ -89,10 +158,10 @@ func TestRestartFromPowerFailure(t *testing.T) { // activeFailpoint actives the failpoint by http. func activeFailpoint(t *testing.T, targetUrl string, fpName, fpVal string) { - u, err := url.Parse("http://" + path.Join(targetUrl, fpName)) + u, err := url.JoinPath(targetUrl, fpName) require.NoError(t, err, "parse url %s", targetUrl) - req, err := http.NewRequest("PUT", u.String(), bytes.NewBuffer([]byte(fpVal))) + req, err := http.NewRequest("PUT", u, bytes.NewBuffer([]byte(fpVal))) require.NoError(t, err) resp, err := http.DefaultClient.Do(req) @@ -192,3 +261,9 @@ func unmountAll(target string) error { } return fmt.Errorf("failed to umount %s: %w", target, unix.EBUSY) } + +func randomInt(t *testing.T, max int) int { + n, err := rand.Int(rand.Reader, big.NewInt(int64(max))) + assert.NoError(t, err) + return int(n.Int64()) +} From c61a3be3e85720c7a8f6fcd6c71c00aea45218c5 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Mon, 1 Jan 2024 22:49:06 +0800 Subject: [PATCH 185/439] *: introduce nightly run for robustness Signed-off-by: Wei Fu --- .github/workflows/robustness_nightly.yaml | 17 ++++++++++ .github/workflows/robustness_template.yaml | 38 ++++++++++++++++++++++ .github/workflows/robustness_test.yaml | 18 +++------- Makefile | 2 +- 4 files changed, 61 insertions(+), 14 deletions(-) create mode 100644 .github/workflows/robustness_nightly.yaml create mode 100644 .github/workflows/robustness_template.yaml diff --git a/.github/workflows/robustness_nightly.yaml b/.github/workflows/robustness_nightly.yaml new file mode 100644 index 000000000..8b2bdb81e --- /dev/null +++ b/.github/workflows/robustness_nightly.yaml @@ -0,0 +1,17 @@ +--- +name: Robustness Nightly +permissions: read-all +on: + schedule: + - cron: '25 9 * * *' # runs every day at 09:25 UTC + # workflow_dispatch enables manual testing of this job by maintainers + workflow_dispatch: + +jobs: + main: + # GHA has a maximum amount of 6h execution time, we try to get done within 3h + uses: ./.github/workflows/robustness_template.yaml + with: + count: 100 + testTimeout: 200m + runs-on: "['ubuntu-latest-8-cores']" diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml new file mode 100644 index 000000000..54ed3b483 --- /dev/null +++ b/.github/workflows/robustness_template.yaml @@ -0,0 +1,38 @@ +--- +name: Reusable Robustness Workflow +on: + workflow_call: + inputs: + count: + required: true + type: number + testTimeout: + required: false + type: string + default: '30m' + runs-on: + required: false + type: string + default: "['ubuntu-latest']" +permissions: read-all + +jobs: + test: + timeout-minutes: 210 + runs-on: ${{ fromJson(inputs.runs-on) }} + steps: + - uses: actions/checkout@v4 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@v5 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: test-robustness + run: | + set -euo pipefail + + make gofail-enable + + # build bbolt with failpoint + go install ./cmd/bbolt + sudo -E PATH=$PATH make ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" test-robustness diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index b1980ac65..a96854d4e 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -3,16 +3,8 @@ on: [push, pull_request] permissions: read-all jobs: test: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - run: | - make gofail-enable - # build bbolt with failpoint - go install ./cmd/bbolt - sudo -E PATH=$PATH make test-robustness + uses: ./.github/workflows/robustness_template.yaml + with: + count: 10 + testTimeout: 30m + runs-on: "['ubuntu-latest-8-cores']" diff --git a/Makefile b/Makefile index f43b25b20..b2e95df8e 100644 --- a/Makefile +++ b/Makefile @@ -84,4 +84,4 @@ test-failpoint: .PHONY: test-robustness # Running robustness tests requires root permission test-robustness: go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root From 27ded38c22bffa309c2b074b8dcdb701fb2de957 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 24 Nov 2023 14:52:08 +0000 Subject: [PATCH 186/439] add MoveBucket to support moving a sub-bucket from one bucket to another bucket Signed-off-by: Benjamin Wang --- bucket.go | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ tx.go | 18 ++++++++++++++++++ 2 files changed, 71 insertions(+) diff --git a/bucket.go b/bucket.go index d9b384a2a..c1e8d126c 100644 --- a/bucket.go +++ b/bucket.go @@ -322,6 +322,59 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(key) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(key, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return fmt.Errorf("key %q isn't a bucket in the source bucket: %w", key, errors.ErrIncompatibleValue) + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + return nil + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(key) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(key, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + return fmt.Errorf("key %q already exists in the target bucket: %w", key, errors.ErrIncompatibleValue) + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(key)) + c.node().del(key) + + // add te sub-bucket to the destination bucket + newKey := cloneBytes(key) + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. diff --git a/tx.go b/tx.go index 8e624e7b2..81913b0fe 100644 --- a/tx.go +++ b/tx.go @@ -127,6 +127,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. From ac355dec240ce753edc453e455e1c70ffac83a2b Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 12 Dec 2023 12:49:44 +0100 Subject: [PATCH 187/439] add MoveSubBucket test Signed-off-by: Mustafa Elbehery --- bucket.go | 5 +- errors/errors.go | 6 +- movebucket_test.go | 291 +++++++++++++++++++++++++++++++++++++++++++++ utils_test.go | 46 +++++++ 4 files changed, 345 insertions(+), 3 deletions(-) create mode 100644 movebucket_test.go create mode 100644 utils_test.go diff --git a/bucket.go b/bucket.go index c1e8d126c..78a68f548 100644 --- a/bucket.go +++ b/bucket.go @@ -326,7 +326,8 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { // Returns an error if // 1. the sub-bucket cannot be found in the source bucket; // 2. or the key already exists in the destination bucket; -// 3. the key represents a non-bucket value. +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed @@ -348,7 +349,7 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { // Do nothing (return true directly) if the source bucket and the // destination bucket are actually the same bucket. if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { - return nil + return fmt.Errorf("source bucket %s and target bucket %s are the same: %w", b.String(), dstBucket.String(), errors.ErrSameBuckets) } // check whether the key already exists in the destination bucket diff --git a/errors/errors.go b/errors/errors.go index 9598cbd8a..5709bcf2c 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -69,8 +69,12 @@ var ( // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. ErrValueTooLarge = errors.New("value too large") - // ErrIncompatibleValue is returned when trying create or delete a bucket + // ErrIncompatibleValue is returned when trying to create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") ) diff --git a/movebucket_test.go b/movebucket_test.go new file mode 100644 index 000000000..21789c40a --- /dev/null +++ b/movebucket_test.go @@ -0,0 +1,291 @@ +package bbolt_test + +import ( + "bytes" + crand "crypto/rand" + "math/rand" + "os" + "testing" + + "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/btesting" + + "github.com/stretchr/testify/require" +) + +func TestTx_MoveBucket(t *testing.T) { + testCases := []struct { + name string + srcBucketPath []string + dstBucketPath []string + bucketToMove string + incompatibleKeyInSrc bool + incompatibleKeyInDst bool + parentSrc bool + parentDst bool + expActErr error + }{ + { + name: "happy path", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "bucketToMove not exist in srcBucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: false, + parentDst: false, + expActErr: errors.ErrBucketNotFound, + }, + { + name: "bucketToMove exist in dstBucket", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2", "sb3ToMove"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: true, + expActErr: errors.ErrBucketExists, + }, + { + name: "bucketToMove key exist in srcBucket but no subBucket value", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: true, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: errors.ErrIncompatibleValue, + }, + { + name: "bucketToMove key exist in dstBucket but no subBucket value", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: true, + parentSrc: true, + parentDst: true, + expActErr: errors.ErrIncompatibleValue, + }, + { + name: "srcBucket is rootBucket", + srcBucketPath: []string{"", "sb3ToMove"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "dstBucket is rootBucket", + srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, + dstBucketPath: []string{""}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: true, + parentDst: false, + expActErr: nil, + }, + { + name: "srcBucket is rootBucket and dstBucket is rootBucket", + srcBucketPath: []string{"", "sb3ToMove"}, + dstBucketPath: []string{""}, + bucketToMove: "sb3ToMove", + incompatibleKeyInSrc: false, + incompatibleKeyInDst: false, + parentSrc: false, + parentDst: false, + expActErr: errors.ErrSameBuckets, + }, + } + + for _, tc := range testCases { + + t.Run(tc.name, func(*testing.T) { + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) + + dumpBucketBeforeMoving := tempfile() + dumpBucketAfterMoving := tempfile() + + // arrange + if err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := openBuckets(t, tx, tc.incompatibleKeyInSrc, true, false, tc.srcBucketPath...) + dstBucket := openBuckets(t, tx, tc.incompatibleKeyInDst, true, false, tc.dstBucketPath...) + + if tc.incompatibleKeyInSrc { + if pErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { + t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", srcBucket, pErr) + } + } + + if tc.incompatibleKeyInDst { + if pErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { + t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", dstBucket, pErr) + } + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // act + if err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) + dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + + var bucketToMove *bbolt.Bucket + if srcBucket != nil { + bucketToMove = srcBucket.Bucket([]byte(tc.bucketToMove)) + } else { + bucketToMove = tx.Bucket([]byte(tc.bucketToMove)) + } + + if tc.expActErr == nil && bucketToMove != nil { + if wErr := dumpBucket([]byte(tc.bucketToMove), bucketToMove, dumpBucketBeforeMoving); wErr != nil { + t.Fatalf("error dumping bucket %v to file %v: %v", bucketToMove.String(), dumpBucketBeforeMoving, wErr) + } + } + + mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket) + require.ErrorIs(t, mErr, tc.expActErr) + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + + // skip assertion if failure expected + if tc.expActErr != nil { + return + } + + // assert + if err := db.Update(func(tx *bbolt.Tx) error { + var movedBucket *bbolt.Bucket + srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) + + if srcBucket != nil { + if movedBucket = srcBucket.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { + t.Fatalf("expected childBucket %v to be moved from srcBucket %v", tc.bucketToMove, srcBucket) + } + } else { + if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { + t.Fatalf("expected childBucket %v to be moved from root bucket %v", tc.bucketToMove, "root bucket") + } + } + + dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + if dstBucket != nil { + if movedBucket = dstBucket.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { + t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, dstBucket) + } + } else { + if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { + t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, "root bucket") + } + } + + wErr := dumpBucket([]byte(tc.bucketToMove), movedBucket, dumpBucketAfterMoving) + if wErr != nil { + t.Fatalf("error dumping bucket %v to file %v", movedBucket.String(), dumpBucketAfterMoving) + } + + beforeBucket := readBucketFromFile(t, dumpBucketBeforeMoving) + afterBucket := readBucketFromFile(t, dumpBucketAfterMoving) + + if !bytes.Equal(beforeBucket, afterBucket) { + t.Fatalf("bucket's content before moving is different than after moving") + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.MustCheck() + }) + } +} + +func openBuckets(t testing.TB, tx *bbolt.Tx, incompatibleKey bool, init bool, parent bool, paths ...string) *bbolt.Bucket { + t.Helper() + + var bk *bbolt.Bucket + var err error + + idx := len(paths) - 1 + for i, key := range paths { + if len(key) == 0 { + if !init { + break + } + continue + } + if (incompatibleKey && i == idx) || (parent && i == idx) { + continue + } + if bk == nil { + bk, err = tx.CreateBucketIfNotExists([]byte(key)) + } else { + bk, err = bk.CreateBucketIfNotExists([]byte(key)) + } + if err != nil { + t.Fatalf("error creating bucket %v: %v", key, err) + } + if init { + insertRandKeysValuesBucket(t, bk, rand.Intn(4096)) + } + } + + return bk +} + +func readBucketFromFile(t testing.TB, tmpFile string) []byte { + data, err := os.ReadFile(tmpFile) + if err != nil { + t.Fatalf("error reading temp file %v", tmpFile) + } + + return data +} + +func insertRandKeysValuesBucket(t testing.TB, bk *bbolt.Bucket, n int) { + var min, max = 1, 1024 + + for i := 0; i < n; i++ { + // generate rand key/value length + keyLength := rand.Intn(max-min) + min + valLength := rand.Intn(max-min) + min + + keyData := make([]byte, keyLength) + valData := make([]byte, valLength) + + _, err := crand.Read(keyData) + require.NoError(t, err) + + _, err = crand.Read(valData) + require.NoError(t, err) + + err = bk.Put(keyData, valData) + require.NoError(t, err) + } +} diff --git a/utils_test.go b/utils_test.go new file mode 100644 index 000000000..867109493 --- /dev/null +++ b/utils_test.go @@ -0,0 +1,46 @@ +package bbolt_test + +import ( + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" +) + +// `dumpBucket` dumps all the data, including both key/value data +// and child buckets, from the source bucket into the target db file. +func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string) error { + common.Assert(len(srcBucketName) != 0, "source bucket name can't be empty") + common.Assert(srcBucket != nil, "the source bucket can't be nil") + common.Assert(len(dstFilename) != 0, "the target file path can't be empty") + + dstDB, err := bolt.Open(dstFilename, 0600, nil) + if err != nil { + return err + } + + return dstDB.Update(func(tx *bolt.Tx) error { + dstBucket, err := tx.CreateBucket(srcBucketName) + if err != nil { + return err + } + return cloneBucket(srcBucket, dstBucket) + }) +} + +func cloneBucket(src *bolt.Bucket, dst *bolt.Bucket) error { + return src.ForEach(func(k, v []byte) error { + if v == nil { + srcChild := src.Bucket(k) + dstChild, err := dst.CreateBucket(k) + if err != nil { + return err + } + if err = dstChild.SetSequence(srcChild.Sequence()); err != nil { + return err + } + + return cloneBucket(srcChild, dstChild) + } + + return dst.Put(k, v) + }) +} From 0bd26bc48ce36b29006eb94983e09f7a9f1aa03d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 2 Jan 2024 13:35:38 +0000 Subject: [PATCH 188/439] Refactor test case TestTx_MoveBucket and add log for MoveBucket Signed-off-by: Benjamin Wang --- movebucket_test.go | 376 ++++++++++++++++++++++----------------------- utils_test.go | 1 + 2 files changed, 181 insertions(+), 196 deletions(-) diff --git a/movebucket_test.go b/movebucket_test.go index 21789c40a..b89b9602f 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -1,10 +1,10 @@ package bbolt_test import ( - "bytes" crand "crypto/rand" "math/rand" "os" + "path/filepath" "testing" "go.etcd.io/bbolt" @@ -16,259 +16,243 @@ import ( func TestTx_MoveBucket(t *testing.T) { testCases := []struct { - name string - srcBucketPath []string - dstBucketPath []string - bucketToMove string - incompatibleKeyInSrc bool - incompatibleKeyInDst bool - parentSrc bool - parentDst bool - expActErr error + name string + srcBucketPath []string + dstBucketPath []string + bucketToMove string + bucketExistInSrc bool + bucketExistInDst bool + hasIncompatibleKeyInSrc bool + hasIncompatibleKeyInDst bool + expectedErr error }{ + // normal cases { - name: "happy path", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "normal case", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove not exist in srcBucket", - srcBucketPath: []string{"sb1", "sb2"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: false, - parentDst: false, - expActErr: errors.ErrBucketNotFound, + name: "the source and target bucket share the same grandparent", + srcBucketPath: []string{"grandparent", "sb2"}, + dstBucketPath: []string{"grandparent", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove exist in dstBucket", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2", "sb3ToMove"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: true, - expActErr: errors.ErrBucketExists, + name: "bucketToMove is a top level bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, { - name: "bucketToMove key exist in srcBucket but no subBucket value", - srcBucketPath: []string{"sb1", "sb2"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: true, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: errors.ErrIncompatibleValue, + name: "convert bucketToMove to a top level bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: nil, }, + // negative cases { - name: "bucketToMove key exist in dstBucket but no subBucket value", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: true, - parentSrc: true, - parentDst: true, - expActErr: errors.ErrIncompatibleValue, + name: "bucketToMove not exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketNotFound, }, { - name: "srcBucket is rootBucket", - srcBucketPath: []string{"", "sb3ToMove"}, - dstBucketPath: []string{"db1", "db2"}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "bucketToMove exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: true, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrBucketExists, }, { - name: "dstBucket is rootBucket", - srcBucketPath: []string{"sb1", "sb2", "sb3ToMove"}, - dstBucketPath: []string{""}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: true, - parentDst: false, - expActErr: nil, + name: "incompatible key exist in source bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: false, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: true, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrIncompatibleValue, }, { - name: "srcBucket is rootBucket and dstBucket is rootBucket", - srcBucketPath: []string{"", "sb3ToMove"}, - dstBucketPath: []string{""}, - bucketToMove: "sb3ToMove", - incompatibleKeyInSrc: false, - incompatibleKeyInDst: false, - parentSrc: false, - parentDst: false, - expActErr: errors.ErrSameBuckets, + name: "incompatible key exist in target bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: true, + expectedErr: errors.ErrIncompatibleValue, + }, + { + name: "the source and target are the same bucket", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"sb1", "sb2"}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, + }, + { + name: "both the source and target are the root bucket", + srcBucketPath: []string{}, + dstBucketPath: []string{}, + bucketToMove: "bucketToMove", + bucketExistInSrc: true, + bucketExistInDst: false, + hasIncompatibleKeyInSrc: false, + hasIncompatibleKeyInDst: false, + expectedErr: errors.ErrSameBuckets, }, } for _, tc := range testCases { t.Run(tc.name, func(*testing.T) { - db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) - dumpBucketBeforeMoving := tempfile() - dumpBucketAfterMoving := tempfile() + dumpBucketBeforeMoving := filepath.Join(t.TempDir(), "dbBeforeMove") + dumpBucketAfterMoving := filepath.Join(t.TempDir(), "dbAfterMove") - // arrange - if err := db.Update(func(tx *bbolt.Tx) error { - srcBucket := openBuckets(t, tx, tc.incompatibleKeyInSrc, true, false, tc.srcBucketPath...) - dstBucket := openBuckets(t, tx, tc.incompatibleKeyInDst, true, false, tc.dstBucketPath...) + t.Log("Creating sample db and populate some data") + err := db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) - if tc.incompatibleKeyInSrc { - if pErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { - t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", srcBucket, pErr) - } + if tc.bucketExistInSrc { + _ = createBucketAndPopulateData(t, tx, srcBucket, tc.bucketToMove) } - if tc.incompatibleKeyInDst { - if pErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("0")); pErr != nil { - t.Fatalf("error inserting key %v, and value %v in bucket %v: %v", tc.bucketToMove, "0", dstBucket, pErr) - } + if tc.bucketExistInDst { + _ = createBucketAndPopulateData(t, tx, dstBucket, tc.bucketToMove) } - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() - - // act - if err := db.Update(func(tx *bbolt.Tx) error { - srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) - dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) + if tc.hasIncompatibleKeyInSrc { + putErr := srcBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) + } - var bucketToMove *bbolt.Bucket - if srcBucket != nil { - bucketToMove = srcBucket.Bucket([]byte(tc.bucketToMove)) - } else { - bucketToMove = tx.Bucket([]byte(tc.bucketToMove)) + if tc.hasIncompatibleKeyInDst { + putErr := dstBucket.Put([]byte(tc.bucketToMove), []byte("bar")) + require.NoError(t, putErr) } - if tc.expActErr == nil && bucketToMove != nil { - if wErr := dumpBucket([]byte(tc.bucketToMove), bucketToMove, dumpBucketBeforeMoving); wErr != nil { - t.Fatalf("error dumping bucket %v to file %v: %v", bucketToMove.String(), dumpBucketBeforeMoving, wErr) - } + return nil + }) + require.NoError(t, err) + + t.Log("Moving bucket") + err = db.Update(func(tx *bbolt.Tx) error { + srcBucket := prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket := prepareBuckets(t, tx, tc.dstBucketPath...) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s before moving it", dumpBucketBeforeMoving) + bk := openBucket(tx, srcBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketBeforeMoving) + require.NoError(t, dumpErr) } mErr := tx.MoveBucket([]byte(tc.bucketToMove), srcBucket, dstBucket) - require.ErrorIs(t, mErr, tc.expActErr) + require.Equal(t, tc.expectedErr, mErr) + + if tc.expectedErr == nil { + t.Logf("Dump the bucket to %s after moving it", dumpBucketAfterMoving) + bk := openBucket(tx, dstBucket, tc.bucketToMove) + dumpErr := dumpBucket([]byte(tc.bucketToMove), bk, dumpBucketAfterMoving) + require.NoError(t, dumpErr) + } return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() + }) + require.NoError(t, err) // skip assertion if failure expected - if tc.expActErr != nil { + if tc.expectedErr != nil { return } - // assert - if err := db.Update(func(tx *bbolt.Tx) error { - var movedBucket *bbolt.Bucket - srcBucket := openBuckets(t, tx, false, false, tc.parentSrc, tc.srcBucketPath...) - - if srcBucket != nil { - if movedBucket = srcBucket.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { - t.Fatalf("expected childBucket %v to be moved from srcBucket %v", tc.bucketToMove, srcBucket) - } - } else { - if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket != nil { - t.Fatalf("expected childBucket %v to be moved from root bucket %v", tc.bucketToMove, "root bucket") - } - } - - dstBucket := openBuckets(t, tx, false, false, tc.parentDst, tc.dstBucketPath...) - if dstBucket != nil { - if movedBucket = dstBucket.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { - t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, dstBucket) - } - } else { - if movedBucket = tx.Bucket([]byte(tc.bucketToMove)); movedBucket == nil { - t.Fatalf("expected childBucket %v to be child of dstBucket %v", tc.bucketToMove, "root bucket") - } - } - - wErr := dumpBucket([]byte(tc.bucketToMove), movedBucket, dumpBucketAfterMoving) - if wErr != nil { - t.Fatalf("error dumping bucket %v to file %v", movedBucket.String(), dumpBucketAfterMoving) - } - - beforeBucket := readBucketFromFile(t, dumpBucketBeforeMoving) - afterBucket := readBucketFromFile(t, dumpBucketAfterMoving) - - if !bytes.Equal(beforeBucket, afterBucket) { - t.Fatalf("bucket's content before moving is different than after moving") - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.MustCheck() + t.Log("Verifying the bucket should be identical before and after being moved") + dataBeforeMove, err := os.ReadFile(dumpBucketBeforeMoving) + require.NoError(t, err) + dataAfterMove, err := os.ReadFile(dumpBucketAfterMoving) + require.NoError(t, err) + require.Equal(t, dataBeforeMove, dataAfterMove) }) } } -func openBuckets(t testing.TB, tx *bbolt.Tx, incompatibleKey bool, init bool, parent bool, paths ...string) *bbolt.Bucket { - t.Helper() - +// prepareBuckets opens the bucket chain. For each bucket in the chain, +// open it if existed, otherwise create it and populate sample data. +func prepareBuckets(t testing.TB, tx *bbolt.Tx, buckets ...string) *bbolt.Bucket { var bk *bbolt.Bucket - var err error - idx := len(paths) - 1 - for i, key := range paths { - if len(key) == 0 { - if !init { - break - } - continue - } - if (incompatibleKey && i == idx) || (parent && i == idx) { - continue - } - if bk == nil { - bk, err = tx.CreateBucketIfNotExists([]byte(key)) + for _, key := range buckets { + if childBucket := openBucket(tx, bk, key); childBucket == nil { + bk = createBucketAndPopulateData(t, tx, bk, key) } else { - bk, err = bk.CreateBucketIfNotExists([]byte(key)) - } - if err != nil { - t.Fatalf("error creating bucket %v: %v", key, err) - } - if init { - insertRandKeysValuesBucket(t, bk, rand.Intn(4096)) + bk = childBucket } } - return bk } -func readBucketFromFile(t testing.TB, tmpFile string) []byte { - data, err := os.ReadFile(tmpFile) - if err != nil { - t.Fatalf("error reading temp file %v", tmpFile) +func openBucket(tx *bbolt.Tx, bk *bbolt.Bucket, bucketToOpen string) *bbolt.Bucket { + if bk == nil { + return tx.Bucket([]byte(bucketToOpen)) + } + return bk.Bucket([]byte(bucketToOpen)) +} + +func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, bucketName string) *bbolt.Bucket { + if bk == nil { + newBucket, err := tx.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) + return newBucket } - return data + newBucket, err := bk.CreateBucket([]byte(bucketName)) + require.NoError(t, err, "failed to create bucket %s", bucketName) + populateSampleDataInBucket(t, bk, rand.Intn(4096)) + return newBucket } -func insertRandKeysValuesBucket(t testing.TB, bk *bbolt.Bucket, n int) { +func populateSampleDataInBucket(t testing.TB, bk *bbolt.Bucket, n int) { var min, max = 1, 1024 for i := 0; i < n; i++ { diff --git a/utils_test.go b/utils_test.go index 867109493..1a4f23939 100644 --- a/utils_test.go +++ b/utils_test.go @@ -16,6 +16,7 @@ func dumpBucket(srcBucketName []byte, srcBucket *bolt.Bucket, dstFilename string if err != nil { return err } + defer dstDB.Close() return dstDB.Update(func(tx *bolt.Tx) error { dstBucket, err := tx.CreateBucket(srcBucketName) From 886eccbdf505b939898b39aa1889c93cede58dc0 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 2 Jan 2024 14:11:09 +0000 Subject: [PATCH 189/439] Add log into MoveBucket and clone the key Signed-off-by: Benjamin Wang --- bucket.go | 48 ++++++++++++++++++++++++++++++---------------- movebucket_test.go | 2 +- 2 files changed, 33 insertions(+), 17 deletions(-) diff --git a/bucket.go b/bucket.go index 78a68f548..9fbc9766c 100644 --- a/bucket.go +++ b/bucket.go @@ -285,19 +285,21 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { + if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) + child := b.Bucket(newKey) err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) @@ -309,7 +311,7 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -317,7 +319,7 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } @@ -328,48 +330,62 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { // 2. or the key already exists in the destination bucket; // 3. or the key represents a non-bucket value; // 4. the source and destination buckets are the same. -func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) error { +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + lg.Debugf("Moving bucket %q", string(key)) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", string(key), err) + } else { + lg.Debugf("Moving bucket %q successfully", string(key)) + } + }() + if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed } else if !dstBucket.Writable() { return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, v, flags := c.seek(key) + k, v, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { + if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { - return fmt.Errorf("key %q isn't a bucket in the source bucket: %w", key, errors.ErrIncompatibleValue) + lg.Errorf("An incompatible key %s exists in the source bucket", string(newKey)) + return errors.ErrIncompatibleValue } // Do nothing (return true directly) if the source bucket and the // destination bucket are actually the same bucket. if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { - return fmt.Errorf("source bucket %s and target bucket %s are the same: %w", b.String(), dstBucket.String(), errors.ErrSameBuckets) + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b.String(), dstBucket.String()) + return errors.ErrSameBuckets } // check whether the key already exists in the destination bucket curDst := dstBucket.Cursor() - k, _, flags = curDst.seek(key) + k, _, flags = curDst.seek(newKey) // Return an error if there is an existing key in the destination bucket. - if bytes.Equal(key, k) { + if bytes.Equal(newKey, k) { if (flags & common.BucketLeafFlag) != 0 { return errors.ErrBucketExists } - return fmt.Errorf("key %q already exists in the target bucket: %w", key, errors.ErrIncompatibleValue) + lg.Errorf("An incompatible key %s exists in the target bucket", string(newKey)) + return errors.ErrIncompatibleValue } // remove the sub-bucket from the source bucket - delete(b.buckets, string(key)) - c.node().del(key) + delete(b.buckets, string(newKey)) + c.node().del(newKey) // add te sub-bucket to the destination bucket - newKey := cloneBytes(key) newValue := cloneBytes(v) curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) diff --git a/movebucket_test.go b/movebucket_test.go index b89b9602f..0b60d95bd 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -248,7 +248,7 @@ func createBucketAndPopulateData(t testing.TB, tx *bbolt.Tx, bk *bbolt.Bucket, b newBucket, err := bk.CreateBucket([]byte(bucketName)) require.NoError(t, err, "failed to create bucket %s", bucketName) - populateSampleDataInBucket(t, bk, rand.Intn(4096)) + populateSampleDataInBucket(t, newBucket, rand.Intn(4096)) return newBucket } From 49eb212fa8ab67709ea460df01982504cf7fa4a1 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 4 Jan 2024 14:40:21 +0800 Subject: [PATCH 190/439] tests/robustness: switch to kill if no panic after 10sec If file doesn't grow in 10 sec, bbolt won't trigger the following errors: * lackOfDiskSpace * mapError * resizeFileError * unmapError We should switch to kill instead of waiting for panic. In order to trigger these errors, we should increase value size to 512. Signed-off-by: Wei Fu --- tests/robustness/powerfailure_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index 09ae88124..4b150ccdc 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -94,6 +94,7 @@ func doPowerFailure(t *testing.T, du time.Duration, fsMountOpt string, useFailpo "-path", dbPath, "-count=1000000000", "-batch-size=5", // separate total count into multiple truncation + "-value-size=512", } logPath := filepath.Join(t.TempDir(), fmt.Sprintf("%s.log", t.Name())) @@ -140,7 +141,7 @@ func doPowerFailure(t *testing.T, du time.Duration, fsMountOpt string, useFailpo select { case <-time.After(10 * time.Second): - t.Error("bbolt should stop with panic in seconds") + t.Log("bbolt is supposed to be already stopped, but actually not yet; forcibly kill it") assert.NoError(t, cmd.Process.Kill()) case err := <-errCh: require.Error(t, err) From 68ab1a12074467ed2b3c842a6fefeb89379687a8 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Thu, 4 Jan 2024 15:55:40 +0800 Subject: [PATCH 191/439] *: run test-robustness with build Add new recipe to build bbolt command so that we can ensure that test-robustness recipe is using bbolt with gofail. Since test-robustness requires root, move `sudo` into test-robustness recipe. Otherwise, both bbolt binary and gofail code belongs to root user. Signed-off-by: Wei Fu --- .github/workflows/robustness_template.yaml | 6 +----- Makefile | 15 ++++++++++++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 54ed3b483..f99ea6689 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -31,8 +31,4 @@ jobs: run: | set -euo pipefail - make gofail-enable - - # build bbolt with failpoint - go install ./cmd/bbolt - sudo -E PATH=$PATH make ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" test-robustness + ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" make test-robustness diff --git a/Makefile b/Makefile index b2e95df8e..b0d019802 100644 --- a/Makefile +++ b/Makefile @@ -61,6 +61,15 @@ coverage: TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic +BOLT_CMD=bbolt + +build: + go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD} + +.PHONY: clean +clean: # Clean binaries + rm -f ./bin/${BOLT_CMD} + .PHONY: gofail-enable gofail-enable: install-gofail gofail enable . @@ -82,6 +91,6 @@ test-failpoint: BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint .PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root From 32f6f6646f9cc37cda826c1de980d9f2d22f7866 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Wed, 3 Jan 2024 22:10:53 +0800 Subject: [PATCH 192/439] .github: enable robustness on arm64 Signed-off-by: Wei Fu --- .github/workflows/robustness_nightly.yaml | 9 ++++++++- .github/workflows/robustness_template.yaml | 1 + .github/workflows/robustness_test.yaml | 8 +++++++- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/.github/workflows/robustness_nightly.yaml b/.github/workflows/robustness_nightly.yaml index 8b2bdb81e..96a519afa 100644 --- a/.github/workflows/robustness_nightly.yaml +++ b/.github/workflows/robustness_nightly.yaml @@ -8,10 +8,17 @@ on: workflow_dispatch: jobs: - main: + amd64: # GHA has a maximum amount of 6h execution time, we try to get done within 3h uses: ./.github/workflows/robustness_template.yaml with: count: 100 testTimeout: 200m runs-on: "['ubuntu-latest-8-cores']" + arm64: + # GHA has a maximum amount of 6h execution time, we try to get done within 3h + uses: ./.github/workflows/robustness_template.yaml + with: + count: 100 + testTimeout: 200m + runs-on: "['actuated-arm64-4cpu-8gb']" diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index f99ea6689..e9b9a38d7 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -30,5 +30,6 @@ jobs: - name: test-robustness run: | set -euo pipefail + sudo apt-get install -y dmsetup ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" make test-robustness diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index a96854d4e..4d6afd9e8 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -2,9 +2,15 @@ name: Robustness Test on: [push, pull_request] permissions: read-all jobs: - test: + amd64: uses: ./.github/workflows/robustness_template.yaml with: count: 10 testTimeout: 30m runs-on: "['ubuntu-latest-8-cores']" + arm64: + uses: ./.github/workflows/robustness_template.yaml + with: + count: 10 + testTimeout: 30m + runs-on: "['actuated-arm64-4cpu-8gb']" From 9cec34d9c6e0df5cf7306d593a5b87d1cb9fbc55 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 5 Jan 2024 10:09:21 +0000 Subject: [PATCH 193/439] move the closing of channel into the same method of creating the channel Signed-off-by: Benjamin Wang --- tx_check.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tx_check.go b/tx_check.go index 08a1a6eda..da4ff3c47 100644 --- a/tx_check.go +++ b/tx_check.go @@ -27,11 +27,15 @@ func (tx *Tx) Check(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() @@ -57,7 +61,7 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Recursively check buckets. - tx.recursivelyCheckBucket(&tx.root, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) // Ensure all pages below high water mark are either reachable or freed. for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { @@ -66,9 +70,6 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) } } - - // Close the channel to signal completion. - close(ch) } func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, From f9c9a172dd944ad6f57787649a5dbaa7ab662e34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 14:42:18 +0000 Subject: [PATCH 194/439] build(deps): Bump golang.org/x/sys from 0.15.0 to 0.16.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.15.0 to 0.16.0. - [Commits](https://github.com/golang/sys/compare/v0.15.0...v0.16.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e597edd70..b12bf4aaf 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.5.0 - golang.org/x/sys v0.15.0 + golang.org/x/sys v0.16.0 ) require ( diff --git a/go.sum b/go.sum index 6634d6b66..dd05a6fdc 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 927052356575468f43202914e840643a1f169a13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jan 2024 17:05:07 +0000 Subject: [PATCH 195/439] build(deps): Bump golang.org/x/sync from 0.5.0 to 0.6.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.5.0 to 0.6.0. - [Commits](https://github.com/golang/sync/compare/v0.5.0...v0.6.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b12bf4aaf..c3a0db424 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.5.0 + golang.org/x/sync v0.6.0 golang.org/x/sys v0.16.0 ) diff --git a/go.sum b/go.sum index dd05a6fdc..26d0e638d 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= -golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 204e8bd9d9567f3b35d655475a703a7f54d3643f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 10 Jan 2024 15:38:20 +0000 Subject: [PATCH 196/439] Update bbolt version to 1.4.0-alpha.0 Signed-off-by: Benjamin Wang --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index b0387d599..af945d771 100644 --- a/version/version.go +++ b/version/version.go @@ -2,5 +2,5 @@ package version var ( // Version shows the last bbolt binary version released. - Version = "1.3.7" + Version = "1.4.0-alpha.0" ) From 019c34e51f940334d41a0c1cc1a50e7ba6d0f782 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 11 Jan 2024 13:03:58 +0000 Subject: [PATCH 197/439] add method Inspect to inspect bucket structure Also added a related command: bbolt inspect db The outputed etcd data structure: { "name": "root", "keyN": 0, "children": [ { "name": "alarm", "keyN": 0 }, { "name": "auth", "keyN": 2 }, { "name": "authRoles", "keyN": 1 }, { "name": "authUsers", "keyN": 1 }, { "name": "cluster", "keyN": 1 }, { "name": "key", "keyN": 1285 }, { "name": "lease", "keyN": 2 }, { "name": "members", "keyN": 1 }, { "name": "members_removed", "keyN": 0 }, { "name": "meta", "keyN": 3 } ] } Signed-off-by: Benjamin Wang --- bucket.go | 30 +++++++++ bucket_test.go | 105 ++++++++++++++++++++++++++++++ cmd/bbolt/README.md | 55 ++++++++++++++++ cmd/bbolt/command_inspect.go | 55 ++++++++++++++++ cmd/bbolt/command_inspect_test.go | 27 ++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/command_surgery.go | 10 --- cmd/bbolt/main.go | 1 + cmd/bbolt/utils.go | 16 +++++ tx.go | 5 ++ 10 files changed, 295 insertions(+), 10 deletions(-) create mode 100644 cmd/bbolt/command_inspect.go create mode 100644 cmd/bbolt/command_inspect_test.go create mode 100644 cmd/bbolt/utils.go diff --git a/bucket.go b/bucket.go index 9fbc9766c..f87a1b19b 100644 --- a/bucket.go +++ b/bucket.go @@ -392,6 +392,30 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { return nil } +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. @@ -955,3 +979,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/bucket_test.go b/bucket_test.go index b60a1b912..3255e7b89 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -1623,6 +1623,111 @@ func TestBucket_Stats_Nested(t *testing.T) { } } +func TestBucket_Inspect(t *testing.T) { + db := btesting.MustCreateDB(t) + + expectedStructure := bolt.BucketStructure{ + Name: "root", + KeyN: 0, + Children: []bolt.BucketStructure{ + { + Name: "b1", + KeyN: 3, + Children: []bolt.BucketStructure{ + { + Name: "b1_1", + KeyN: 6, + }, + { + Name: "b1_2", + KeyN: 7, + }, + { + Name: "b1_3", + KeyN: 8, + }, + }, + }, + { + Name: "b2", + KeyN: 4, + Children: []bolt.BucketStructure{ + { + Name: "b2_1", + KeyN: 10, + }, + { + Name: "b2_2", + KeyN: 12, + Children: []bolt.BucketStructure{ + { + Name: "b2_2_1", + KeyN: 2, + }, + { + Name: "b2_2_2", + KeyN: 3, + }, + }, + }, + { + Name: "b2_3", + KeyN: 11, + }, + }, + }, + }, + } + + type bucketItem struct { + b *bolt.Bucket + bs bolt.BucketStructure + } + + t.Log("Populating the database") + err := db.Update(func(tx *bolt.Tx) error { + queue := []bucketItem{ + { + b: nil, + bs: expectedStructure, + }, + } + + for len(queue) > 0 { + item := queue[0] + queue = queue[1:] + + if item.b != nil { + for i := 0; i < item.bs.KeyN; i++ { + err := item.b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))) + require.NoError(t, err) + } + + for _, child := range item.bs.Children { + childBucket, err := item.b.CreateBucket([]byte(child.Name)) + require.NoError(t, err) + queue = append(queue, bucketItem{b: childBucket, bs: child}) + } + } else { + for _, child := range item.bs.Children { + childBucket, err := tx.CreateBucket([]byte(child.Name)) + require.NoError(t, err) + queue = append(queue, bucketItem{b: childBucket, bs: child}) + } + } + } + return nil + }) + require.NoError(t, err) + + t.Log("Inspecting the database") + _ = db.View(func(tx *bolt.Tx) error { + actualStructure := tx.Inspect() + assert.Equal(t, expectedStructure, actualStructure) + return nil + }) +} + // Ensure a large bucket can calculate stats. func TestBucket_Stats_Large(t *testing.T) { if testing.Short() { diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index 047b4977a..41aa151ec 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -162,6 +162,61 @@ Bytes used for inlined buckets: 780 (0%) ``` +### inspect +- `inspect` inspect the structure of the database. +- Usage: `bbolt inspect [path to the bbolt database]` + + Example: +```bash +$ ./bbolt inspect ~/default.etcd/member/snap/db +{ + "name": "root", + "keyN": 0, + "buckets": [ + { + "name": "alarm", + "keyN": 0 + }, + { + "name": "auth", + "keyN": 2 + }, + { + "name": "authRoles", + "keyN": 1 + }, + { + "name": "authUsers", + "keyN": 1 + }, + { + "name": "cluster", + "keyN": 1 + }, + { + "name": "key", + "keyN": 1285 + }, + { + "name": "lease", + "keyN": 2 + }, + { + "name": "members", + "keyN": 1 + }, + { + "name": "members_removed", + "keyN": 0 + }, + { + "name": "meta", + "keyN": 3 + } + ] +} +``` + ### pages - Pages prints a table of pages with their type (meta, leaf, branch, freelist). diff --git a/cmd/bbolt/command_inspect.go b/cmd/bbolt/command_inspect.go new file mode 100644 index 000000000..68cbe53f6 --- /dev/null +++ b/cmd/bbolt/command_inspect.go @@ -0,0 +1,55 @@ +package main + +import ( + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" +) + +func newInspectCobraCommand() *cobra.Command { + inspectCmd := &cobra.Command{ + Use: "inspect", + Short: "inspect the structure of the database", + Args: func(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return errors.New("db file path not provided") + } + if len(args) > 1 { + return errors.New("too many arguments") + } + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + return inspectFunc(args[0]) + }, + } + + return inspectCmd +} + +func inspectFunc(srcDBPath string) error { + if _, err := checkSourceDBPath(srcDBPath); err != nil { + return err + } + + db, err := bolt.Open(srcDBPath, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + bs := tx.Inspect() + out, err := json.MarshalIndent(bs, "", " ") + if err != nil { + return err + } + fmt.Fprintln(os.Stdout, string(out)) + return nil + }) +} diff --git a/cmd/bbolt/command_inspect_test.go b/cmd/bbolt/command_inspect_test.go new file mode 100644 index 000000000..f1ec8de73 --- /dev/null +++ b/cmd/bbolt/command_inspect_test.go @@ -0,0 +1,27 @@ +package main_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +func TestInspect(t *testing.T) { + pageSize := 4096 + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: pageSize}) + srcPath := db.Path() + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{ + "inspect", srcPath, + }) + err := rootCmd.Execute() + require.NoError(t, err) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 31a174080..b69a619ed 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -19,6 +19,7 @@ func NewRootCommand() *cobra.Command { rootCmd.AddCommand( newVersionCobraCommand(), newSurgeryCobraCommand(), + newInspectCobraCommand(), ) return rootCmd diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 129ae459d..b0ecd9025 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -330,13 +330,3 @@ func readMetaPage(path string) (*common.Meta, error) { } return m[1], nil } - -func checkSourceDBPath(srcPath string) (os.FileInfo, error) { - fi, err := os.Stat(srcPath) - if os.IsNotExist(err) { - return nil, fmt.Errorf("source database file %q doesn't exist", srcPath) - } else if err != nil { - return nil, fmt.Errorf("failed to open source database file %q: %v", srcPath, err) - } - return fi, nil -} diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index ea284539e..121fd4da9 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -170,6 +170,7 @@ The commands are: pages print list of pages with their types page-item print the key and value of a page item. stats iterate over all pages and generate usage stats + inspect inspect the structure of the database surgery perform surgery on bbolt database Use "bbolt [command] -h" for more information about a command. diff --git a/cmd/bbolt/utils.go b/cmd/bbolt/utils.go new file mode 100644 index 000000000..71f1a3d8c --- /dev/null +++ b/cmd/bbolt/utils.go @@ -0,0 +1,16 @@ +package main + +import ( + "fmt" + "os" +) + +func checkSourceDBPath(srcPath string) (os.FileInfo, error) { + fi, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return nil, fmt.Errorf("source database file %q doesn't exist", srcPath) + } else if err != nil { + return nil, fmt.Errorf("failed to open source database file %q: %v", srcPath, err) + } + return fi, nil +} diff --git a/tx.go b/tx.go index 81913b0fe..950d06151 100644 --- a/tx.go +++ b/tx.go @@ -100,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. From ae4ae4a0f90dacbd63faea9a6ee82b9c67bd205f Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Fri, 12 Jan 2024 10:08:00 +0800 Subject: [PATCH 198/439] README.md: introduce known issue section The users might run into data corrupted issues caused by underlay filesystem. It's out of scope for bboltdb maintainers to fix filesystem issue. But the section to track known issues can help users and contributors to analyse root cause when they run into data corrupted issues. Signed-off-by: Wei Fu --- README.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/README.md b/README.md index 06eec0e9c..ff619b291 100644 --- a/README.md +++ b/README.md @@ -69,6 +69,7 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -934,6 +935,19 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + ## Other Projects Using Bolt From 7555f264f5b3f8cb428ac65e8c4a52c08883c922 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Jan 2024 10:23:39 +0100 Subject: [PATCH 199/439] prevent MoveBucket from moving a bucket across two different db files Signed-off-by: Mustafa Elbehery --- bucket.go | 7 ++- errors/errors.go | 4 ++ movebucket_test.go | 123 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 133 insertions(+), 1 deletion(-) diff --git a/bucket.go b/bucket.go index f87a1b19b..2f1d71048 100644 --- a/bucket.go +++ b/bucket.go @@ -343,10 +343,15 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed - } else if !dstBucket.Writable() { + } else if !b.Writable() || !dstBucket.Writable() { return errors.ErrTxNotWritable } + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + newKey := cloneBytes(key) // Move cursor to correct position. diff --git a/errors/errors.go b/errors/errors.go index 5709bcf2c..e5428b9d6 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -77,4 +77,8 @@ var ( // ErrSameBuckets is returned when trying to move a sub-bucket between // source and target buckets, while source and target buckets are the same. ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") ) diff --git a/movebucket_test.go b/movebucket_test.go index 0b60d95bd..a04e24c9c 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -216,6 +216,129 @@ func TestTx_MoveBucket(t *testing.T) { } } +func TestBucket_MoveBucket_DiffDB(t *testing.T) { + srcBucketPath := []string{"sb1", "sb2"} + dstBucketPath := []string{"db1", "db2"} + bucketToMove := "bucketToMove" + + var srcBucket *bbolt.Bucket + + t.Log("Creating source bucket and populate some data") + srcDB := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) + err := srcDB.Update(func(tx *bbolt.Tx) error { + srcBucket = prepareBuckets(t, tx, srcBucketPath...) + return nil + }) + require.NoError(t, err) + defer func() { + require.NoError(t, srcDB.Close()) + }() + + t.Log("Creating target bucket and populate some data") + dstDB := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) + err = dstDB.Update(func(tx *bbolt.Tx) error { + prepareBuckets(t, tx, dstBucketPath...) + return nil + }) + require.NoError(t, err) + defer func() { + require.NoError(t, dstDB.Close()) + }() + + t.Log("Reading source bucket in a separate RWTx") + sTx, sErr := srcDB.Begin(true) + require.NoError(t, sErr) + defer func() { + require.NoError(t, sTx.Rollback()) + }() + srcBucket = prepareBuckets(t, sTx, srcBucketPath...) + + t.Log("Moving the sub-bucket in a separate RWTx") + err = dstDB.Update(func(tx *bbolt.Tx) error { + dstBucket := prepareBuckets(t, tx, dstBucketPath...) + mErr := srcBucket.MoveBucket([]byte(bucketToMove), dstBucket) + require.Equal(t, errors.ErrDifferentDB, mErr) + + return nil + }) + require.NoError(t, err) +} + +func TestBucket_MoveBucket_DiffTx(t *testing.T) { + testCases := []struct { + name string + srcBucketPath []string + dstBucketPath []string + isSrcReadonlyTx bool + isDstReadonlyTx bool + bucketToMove string + expectedErr error + }{ + { + name: "src is RWTx and target is RTx", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + isSrcReadonlyTx: true, + isDstReadonlyTx: false, + bucketToMove: "bucketToMove", + expectedErr: errors.ErrTxNotWritable, + }, + { + name: "src is RTx and target is RWTx", + srcBucketPath: []string{"sb1", "sb2"}, + dstBucketPath: []string{"db1", "db2"}, + isSrcReadonlyTx: false, + isDstReadonlyTx: true, + bucketToMove: "bucketToMove", + expectedErr: errors.ErrTxNotWritable, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var srcBucket *bbolt.Bucket + var dstBucket *bbolt.Bucket + + t.Log("Creating source and target buckets and populate some data") + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) + err := db.Update(func(tx *bbolt.Tx) error { + srcBucket = prepareBuckets(t, tx, tc.srcBucketPath...) + dstBucket = prepareBuckets(t, tx, tc.dstBucketPath...) + return nil + }) + require.NoError(t, err) + defer func() { + require.NoError(t, db.Close()) + }() + + t.Log("Opening source bucket in a separate Tx") + sTx, sErr := db.Begin(tc.isSrcReadonlyTx) + require.NoError(t, sErr) + defer func() { + require.NoError(t, sTx.Rollback()) + }() + srcBucket = prepareBuckets(t, sTx, tc.srcBucketPath...) + + t.Log("Opening target bucket in a separate Tx") + dTx, dErr := db.Begin(tc.isDstReadonlyTx) + require.NoError(t, dErr) + defer func() { + require.NoError(t, dTx.Rollback()) + }() + dstBucket = prepareBuckets(t, dTx, tc.dstBucketPath...) + + t.Log("Moving the sub-bucket") + err = db.View(func(tx *bbolt.Tx) error { + mErr := srcBucket.MoveBucket([]byte(tc.bucketToMove), dstBucket) + require.Equal(t, tc.expectedErr, mErr) + + return nil + }) + require.NoError(t, err) + }) + } +} + // prepareBuckets opens the bucket chain. For each bucket in the chain, // open it if existed, otherwise create it and populate sample data. func prepareBuckets(t testing.TB, tx *bbolt.Tx, buckets ...string) *bbolt.Bucket { From 6b4b05d3957ede374d0d2f04bdf6514894a2a3b2 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 12 Jan 2024 16:08:21 +0000 Subject: [PATCH 200/439] add changelog for v1.4.0-alpha.0 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 CHANGELOG/CHANGELOG-1.4.md diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md new file mode 100644 index 000000000..cbec652dd --- /dev/null +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -0,0 +1,20 @@ + +
+ +## v1.4.0-alpha.0(2024-01-12) + +### BoltDB +- [Improve the performance of hashmapGetFreePageIDs](https://github.com/etcd-io/bbolt/pull/419) +- [Improve CreateBucketIfNotExists to avoid double searching the same key](https://github.com/etcd-io/bbolt/pull/532) +- [Support Android platform](https://github.com/etcd-io/bbolt/pull/571) +- [Record the count of free page to improve the performance of hashmapFreeCount](https://github.com/etcd-io/bbolt/pull/585) +- [Add logger to bbolt](https://github.com/etcd-io/bbolt/issues/509) +- [Support moving bucket inside the same db](https://github.com/etcd-io/bbolt/pull/635) +- [Support inspecting database structure](https://github.com/etcd-io/bbolt/pull/674) + +### CMD +- [Add `surgery clear-page-elements` command](https://github.com/etcd-io/bbolt/pull/417) +- [Add `surgery abandon-freelist` command](https://github.com/etcd-io/bbolt/pull/443) +- [Add `bbolt version` command](https://github.com/etcd-io/bbolt/pull/552) +- [Add `bbolt inspect` command](https://github.com/etcd-io/bbolt/pull/674) +- [Add `--no-sync` option to `bbolt compact` command](https://github.com/etcd-io/bbolt/pull/290) From af1fd0d5a6856d9671d3141e6480f701a5537438 Mon Sep 17 00:00:00 2001 From: mojighahar Date: Sat, 13 Jan 2024 13:56:22 +0330 Subject: [PATCH 201/439] add portainer to bolt users Signed-off-by: Mojtaba Ghahari --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index ff619b291..237aaac6b 100644 --- a/README.md +++ b/README.md @@ -994,6 +994,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. From a984bde424b7a89070bbb453b1b9aff81f0fb7f1 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 18 Jan 2024 15:02:08 -0800 Subject: [PATCH 202/439] dependency: update go version to 1.21.6 Signed-off-by: Ivan Valdes --- .go-version | 2 +- cmd/bbolt/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index ce2dd5357..c262b1f0d 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.5 +1.21.6 diff --git a/cmd/bbolt/README.md b/cmd/bbolt/README.md index 41aa151ec..a07584934 100644 --- a/cmd/bbolt/README.md +++ b/cmd/bbolt/README.md @@ -72,7 +72,7 @@ ```bash $bbolt version bbolt version: 1.3.7 - Go Version: go1.21.5 + Go Version: go1.21.6 Go OS/Arch: darwin/arm64 ``` From 248e6f8a1545269a59c74011cf8b232f3a4d6023 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 31 Dec 2023 14:13:48 +0000 Subject: [PATCH 203/439] enhance check functionality to support checking starting from a pageId Signed-off-by: Benjamin Wang --- tx_check.go | 74 ++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 65 insertions(+), 9 deletions(-) diff --git a/tx_check.go b/tx_check.go index da4ff3c47..a7fa99c7e 100644 --- a/tx_check.go +++ b/tx_check.go @@ -60,15 +60,63 @@ func (tx *Tx) check(cfg checkConfig, ch chan error) { } } - // Recursively check buckets. - tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) + + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } + } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) + } +} + +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) +} + +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + p := tx.page(pageId) + + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) + } + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.Flags()&common.BucketLeafFlag != 0 { + + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(&tmpBucket, reachable, freed, kvStringer, ch) + } + } } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) } } @@ -167,7 +215,7 @@ func (tx *Tx) recursivelyCheckPageKeyOrderInternal( return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -202,6 +250,7 @@ func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, pr type checkConfig struct { kvStringer KVStringer + pageId uint } type CheckOption func(options *checkConfig) @@ -212,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string From 20e961112dd4f84be7d0c108652b686c12cb0f01 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sat, 27 Jan 2024 14:38:56 +0100 Subject: [PATCH 204/439] add test check page Signed-off-by: Mustafa Elbehery --- tx_check_test.go | 144 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100644 tx_check_test.go diff --git a/tx_check_test.go b/tx_check_test.go new file mode 100644 index 000000000..18afb0862 --- /dev/null +++ b/tx_check_test.go @@ -0,0 +1,144 @@ +package bbolt_test + +import ( + "bytes" + "encoding/binary" + "fmt" + "math/rand" + "testing" + "unsafe" + + "github.com/stretchr/testify/require" + + "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" +) + +func TestTx_Check_CorruptPage(t *testing.T) { + bucketKey := "testBucket" + + t.Log("Creating db file.") + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) + defer func() { + require.NoError(t, db.Close()) + }() + + uErr := db.Update(func(tx *bbolt.Tx) error { + t.Logf("Creating bucket '%v'.", bucketKey) + b, bErr := tx.CreateBucketIfNotExists([]byte(bucketKey)) + require.NoError(t, bErr) + t.Logf("Generating random data in bucket '%v'.", bucketKey) + generateSampleDataInBucket(t, b, pageSize, 3) + return nil + }) + require.NoError(t, uErr) + + t.Logf("Corrupting random leaf page in bucket '%v'.", bucketKey) + victimPageId, validPageIds := corruptLeafPage(t, db.DB) + + t.Log("Running consistency check.") + vErr := db.View(func(tx *bbolt.Tx) error { + var cErrs []error + + t.Log("Check corrupted page.") + errChan := tx.Check(bbolt.WithPageId(uint(victimPageId))) + for cErr := range errChan { + cErrs = append(cErrs, cErr) + } + require.Greater(t, len(cErrs), 0) + + t.Log("Check valid pages.") + cErrs = cErrs[:0] + for _, pgId := range validPageIds { + errChan = tx.Check(bbolt.WithPageId(uint(pgId))) + for cErr := range errChan { + cErrs = append(cErrs, cErr) + } + require.Equal(t, 0, len(cErrs)) + } + return nil + }) + require.NoError(t, vErr) +} + +// corruptLeafPage write an invalid leafPageElement into the victim page. +func corruptLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { + t.Helper() + victimPageId, validPageIds = findVictimPageId(t, db) + victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) + require.NoError(t, err) + require.True(t, victimPage.IsLeafPage()) + require.True(t, victimPage.Count() > 0) + // Dumping random bytes in victim page for corruption. + copy(victimBuf[32:], generateCorruptionBytes(t)) + // Write the corrupt page to db file. + err = guts_cli.WritePage(db.Path(), victimBuf) + require.NoError(t, err) + return victimPageId, validPageIds +} + +// findVictimPageId finds all the leaf pages of a bucket and picks a random leaf page as a victim to be corrupted. +func findVictimPageId(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { + t.Helper() + // Read DB's RootPage. + rootPageId, _, err := guts_cli.GetRootPage(db.Path()) + require.NoError(t, err) + rootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(rootPageId)) + require.NoError(t, err) + require.True(t, rootPage.IsLeafPage()) + require.Equal(t, 1, len(rootPage.LeafPageElements())) + // Find Bucket's RootPage. + lpe := rootPage.LeafPageElement(uint16(0)) + require.Equal(t, uint32(common.BranchPageFlag), lpe.Flags()) + k := lpe.Key() + require.Equal(t, "testBucket", string(k)) + bucketRootPageId := lpe.Bucket().RootPage() + // Read Bucket's RootPage. + bucketRootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(bucketRootPageId)) + require.NoError(t, err) + require.Equal(t, uint16(common.BranchPageFlag), bucketRootPage.Flags()) + // Retrieve Bucket's PageIds + var bucketPageIds []common.Pgid + for _, bpe := range bucketRootPage.BranchPageElements() { + bucketPageIds = append(bucketPageIds, bpe.Pgid()) + } + randomIdx := rand.Intn(len(bucketPageIds)) + victimPageId = bucketPageIds[randomIdx] + validPageIds = append(bucketPageIds[:randomIdx], bucketPageIds[randomIdx+1:]...) + return victimPageId, validPageIds +} + +// generateSampleDataInBucket fill in sample data into given bucket to create the given +// number of leafPages. To control the number of leafPages, sample data are generated in order. +func generateSampleDataInBucket(t testing.TB, bk *bbolt.Bucket, pageSize int, lPages int) { + t.Helper() + maxBytesInPage := int(bk.FillPercent * float64(pageSize)) + currentKey := 1 + currentVal := 100 + for i := 0; i < lPages; i++ { + currentSize := common.PageHeaderSize + for { + err := bk.Put([]byte(fmt.Sprintf("key_%d", currentKey)), []byte(fmt.Sprintf("val_%d", currentVal))) + require.NoError(t, err) + currentSize += common.LeafPageElementSize + unsafe.Sizeof(currentKey) + unsafe.Sizeof(currentVal) + if int(currentSize) >= maxBytesInPage { + break + } + currentKey++ + currentVal++ + } + } +} + +// generateCorruptionBytes returns random bytes to corrupt a page. +// It inserts a page element which violates the btree key order if no panic is expected. +func generateCorruptionBytes(t testing.TB) []byte { + t.Helper() + invalidLPE := common.NewLeafPageElement(0, 0, 0, 0) + var buf bytes.Buffer + err := binary.Write(&buf, binary.BigEndian, invalidLPE) + require.NoError(t, err) + return buf.Bytes() +} From 3405ebb2152e8a2ddbf408267e83624b3c036cb2 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 29 Jan 2024 20:18:55 +0100 Subject: [PATCH 205/439] fix cli cmds panics Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 9 +++++++++ cmd/bbolt/main_test.go | 27 +++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 121fd4da9..3ec567c25 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -55,6 +55,9 @@ var ( // ErrKeyNotFound is returned when a key is not found. ErrKeyNotFound = errors.New("key not found") + + // ErrNotEnoughArgs is returned with a cmd is being executed with fewer arguments. + ErrNotEnoughArgs = errors.New("not enough arguments") ) func main() { @@ -921,6 +924,9 @@ func (cmd *keysCommand) Run(args ...string) error { // Require database path and bucket. relevantArgs := fs.Args() + if len(relevantArgs) < 2 { + return ErrNotEnoughArgs + } path, buckets := relevantArgs[0], relevantArgs[1:] if path == "" { return ErrPathRequired @@ -1000,6 +1006,9 @@ func (cmd *getCommand) Run(args ...string) error { // Require database path, bucket and key. relevantArgs := fs.Args() + if len(relevantArgs) < 3 { + return ErrNotEnoughArgs + } path, buckets := relevantArgs[0], relevantArgs[1:len(relevantArgs)-1] key, err := parseBytes(relevantArgs[len(relevantArgs)-1], parseFormat) if err != nil { diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index e137db3e9..b980e468a 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -649,6 +649,33 @@ func TestCompactCommand_Run(t *testing.T) { } } +func TestCommands_Run_NoArgs(t *testing.T) { + testCases := []struct { + name string + cmd string + expErr error + }{ + { + name: "get", + cmd: "get", + expErr: main.ErrNotEnoughArgs, + }, + { + name: "keys", + cmd: "keys", + expErr: main.ErrNotEnoughArgs, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + m := NewMain() + err := m.Run(tc.cmd) + require.ErrorIs(t, err, main.ErrNotEnoughArgs) + }) + } +} + func fillBucket(b *bolt.Bucket, prefix []byte) error { n := 10 + rand.Intn(50) for i := 0; i < n; i++ { From ad96767fd13c2fd70407cead39b61fb6d12b30fb Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 31 Jan 2024 19:13:51 +0000 Subject: [PATCH 206/439] Add changelog item for release-1.3 to cover the fix for bbolt keys and get commands Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index ee1eedec9..5dfd7fb4c 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -5,6 +5,9 @@ Note that we start to track changes starting from v1.3.7. ### BoltDB - [Clone the key before operating data in bucket against the key](https://github.com/etcd-io/bbolt/pull/639) +### CMD +- [Fix `bbolt keys` and `bbolt get` to prevent them from panicking when no parameter provided](https://github.com/etcd-io/bbolt/pull/683) +
## v1.3.8(2023-10-26) From 29d1e3d29169a28109784a1ca5d6b72261559bce Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 3 Feb 2024 16:37:29 +0000 Subject: [PATCH 207/439] refactor/simplify the test case TestTx_Check_CorruptPage Signed-off-by: Benjamin Wang --- tx_check_test.go | 111 ++++++++++++++++------------------------------- 1 file changed, 38 insertions(+), 73 deletions(-) diff --git a/tx_check_test.go b/tx_check_test.go index 18afb0862..194cec32b 100644 --- a/tx_check_test.go +++ b/tx_check_test.go @@ -1,12 +1,9 @@ package bbolt_test import ( - "bytes" - "encoding/binary" "fmt" "math/rand" "testing" - "unsafe" "github.com/stretchr/testify/require" @@ -17,26 +14,19 @@ import ( ) func TestTx_Check_CorruptPage(t *testing.T) { - bucketKey := "testBucket" - t.Log("Creating db file.") - db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: pageSize}) - defer func() { - require.NoError(t, db.Close()) - }() - - uErr := db.Update(func(tx *bbolt.Tx) error { - t.Logf("Creating bucket '%v'.", bucketKey) - b, bErr := tx.CreateBucketIfNotExists([]byte(bucketKey)) - require.NoError(t, bErr) - t.Logf("Generating random data in bucket '%v'.", bucketKey) - generateSampleDataInBucket(t, b, pageSize, 3) - return nil - }) - require.NoError(t, uErr) + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) + + // Each page can hold roughly 20 key/values pair, so 100 such + // key/value pairs will consume about 5 leaf pages. + err := db.Fill([]byte("data"), 1, 100, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 100) }, + ) + require.NoError(t, err) - t.Logf("Corrupting random leaf page in bucket '%v'.", bucketKey) - victimPageId, validPageIds := corruptLeafPage(t, db.DB) + t.Log("Corrupting random leaf page.") + victimPageId, validPageIds := corruptRandomLeafPage(t, db.DB) t.Log("Running consistency check.") vErr := db.View(func(tx *bbolt.Tx) error { @@ -61,45 +51,53 @@ func TestTx_Check_CorruptPage(t *testing.T) { return nil }) require.NoError(t, vErr) + t.Log("All check passed") + + // Manually close the db, otherwise the PostTestCleanup will + // check the db again and accordingly fail the test. + db.MustClose() } -// corruptLeafPage write an invalid leafPageElement into the victim page. -func corruptLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { - t.Helper() - victimPageId, validPageIds = findVictimPageId(t, db) +// corruptRandomLeafPage corrupts one random leaf page. +func corruptRandomLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { + victimPageId, validPageIds = pickupRandomLeafPage(t, db) victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) require.NoError(t, err) require.True(t, victimPage.IsLeafPage()) - require.True(t, victimPage.Count() > 0) - // Dumping random bytes in victim page for corruption. - copy(victimBuf[32:], generateCorruptionBytes(t)) + require.True(t, victimPage.Count() > 1) + + // intentionally make the second key < the first key. + element := victimPage.LeafPageElement(1) + key := element.Key() + key[0] = 0 + // Write the corrupt page to db file. err = guts_cli.WritePage(db.Path(), victimBuf) require.NoError(t, err) return victimPageId, validPageIds } -// findVictimPageId finds all the leaf pages of a bucket and picks a random leaf page as a victim to be corrupted. -func findVictimPageId(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { - t.Helper() - // Read DB's RootPage. +// pickupRandomLeafPage picks up a random leaf page. +func pickupRandomLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { + // Read DB's RootPage, which should be a leaf page. rootPageId, _, err := guts_cli.GetRootPage(db.Path()) require.NoError(t, err) rootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(rootPageId)) require.NoError(t, err) require.True(t, rootPage.IsLeafPage()) - require.Equal(t, 1, len(rootPage.LeafPageElements())) - // Find Bucket's RootPage. + + // The leaf page contains only one item, namely the bucket + require.Equal(t, uint16(1), rootPage.Count()) lpe := rootPage.LeafPageElement(uint16(0)) - require.Equal(t, uint32(common.BranchPageFlag), lpe.Flags()) - k := lpe.Key() - require.Equal(t, "testBucket", string(k)) + require.True(t, lpe.IsBucketEntry()) + + // The bucket should be pointing to a branch page bucketRootPageId := lpe.Bucket().RootPage() - // Read Bucket's RootPage. bucketRootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(bucketRootPageId)) require.NoError(t, err) - require.Equal(t, uint16(common.BranchPageFlag), bucketRootPage.Flags()) - // Retrieve Bucket's PageIds + require.True(t, bucketRootPage.IsBranchPage()) + + // Retrieve all the leaf pages included in the branch page, and pick up random one from them. var bucketPageIds []common.Pgid for _, bpe := range bucketRootPage.BranchPageElements() { bucketPageIds = append(bucketPageIds, bpe.Pgid()) @@ -109,36 +107,3 @@ func findVictimPageId(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, val validPageIds = append(bucketPageIds[:randomIdx], bucketPageIds[randomIdx+1:]...) return victimPageId, validPageIds } - -// generateSampleDataInBucket fill in sample data into given bucket to create the given -// number of leafPages. To control the number of leafPages, sample data are generated in order. -func generateSampleDataInBucket(t testing.TB, bk *bbolt.Bucket, pageSize int, lPages int) { - t.Helper() - maxBytesInPage := int(bk.FillPercent * float64(pageSize)) - currentKey := 1 - currentVal := 100 - for i := 0; i < lPages; i++ { - currentSize := common.PageHeaderSize - for { - err := bk.Put([]byte(fmt.Sprintf("key_%d", currentKey)), []byte(fmt.Sprintf("val_%d", currentVal))) - require.NoError(t, err) - currentSize += common.LeafPageElementSize + unsafe.Sizeof(currentKey) + unsafe.Sizeof(currentVal) - if int(currentSize) >= maxBytesInPage { - break - } - currentKey++ - currentVal++ - } - } -} - -// generateCorruptionBytes returns random bytes to corrupt a page. -// It inserts a page element which violates the btree key order if no panic is expected. -func generateCorruptionBytes(t testing.TB) []byte { - t.Helper() - invalidLPE := common.NewLeafPageElement(0, 0, 0, 0) - var buf bytes.Buffer - err := binary.Write(&buf, binary.BigEndian, invalidLPE) - require.NoError(t, err) - return buf.Bytes() -} From 2b1ee6c191e44a21c29768b9574f35f448738288 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 4 Feb 2024 09:53:00 +0000 Subject: [PATCH 208/439] Continue to enhance check functionality and add one more case to cover the nested bucket case Signed-off-by: Benjamin Wang --- tx_check.go | 6 +-- tx_check_test.go | 121 ++++++++++++++++++++++++++++++++++------------- 2 files changed, 92 insertions(+), 35 deletions(-) diff --git a/tx_check.go b/tx_check.go index a7fa99c7e..1a6d8e2dc 100644 --- a/tx_check.go +++ b/tx_check.go @@ -102,16 +102,16 @@ func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[com case p.IsLeafPage(): for i := range p.LeafPageElements() { elem := p.LeafPageElement(uint16(i)) - if elem.Flags()&common.BucketLeafFlag != 0 { - + if elem.IsBucketEntry() { inBkt := common.NewInBucket(pageId, 0) tmpBucket := Bucket{ InBucket: &inBkt, rootNode: &node{isLeaf: p.IsLeafPage()}, FillPercent: DefaultFillPercent, + tx: tx, } if child := tmpBucket.Bucket(elem.Key()); child != nil { - tx.recursivelyCheckBucket(&tmpBucket, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } } } diff --git a/tx_check_test.go b/tx_check_test.go index 194cec32b..af3610897 100644 --- a/tx_check_test.go +++ b/tx_check_test.go @@ -14,19 +14,21 @@ import ( ) func TestTx_Check_CorruptPage(t *testing.T) { + bucketName := []byte("data") + t.Log("Creating db file.") db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) // Each page can hold roughly 20 key/values pair, so 100 such // key/value pairs will consume about 5 leaf pages. - err := db.Fill([]byte("data"), 1, 100, + err := db.Fill(bucketName, 1, 100, func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, func(tx int, k int) []byte { return make([]byte, 100) }, ) require.NoError(t, err) - t.Log("Corrupting random leaf page.") - victimPageId, validPageIds := corruptRandomLeafPage(t, db.DB) + t.Log("Corrupting a random leaf page.") + victimPageId, validPageIds := corruptRandomLeafPageInBucket(t, db.DB, bucketName) t.Log("Running consistency check.") vErr := db.View(func(tx *bbolt.Tx) error { @@ -58,41 +60,69 @@ func TestTx_Check_CorruptPage(t *testing.T) { db.MustClose() } -// corruptRandomLeafPage corrupts one random leaf page. -func corruptRandomLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { - victimPageId, validPageIds = pickupRandomLeafPage(t, db) - victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) - require.NoError(t, err) - require.True(t, victimPage.IsLeafPage()) - require.True(t, victimPage.Count() > 1) +func TestTx_Check_WithNestBucket(t *testing.T) { + parentBucketName := []byte("parentBucket") - // intentionally make the second key < the first key. - element := victimPage.LeafPageElement(1) - key := element.Key() - key[0] = 0 + t.Log("Creating db file.") + db := btesting.MustCreateDBWithOption(t, &bbolt.Options{PageSize: 4096}) - // Write the corrupt page to db file. - err = guts_cli.WritePage(db.Path(), victimBuf) - require.NoError(t, err) - return victimPageId, validPageIds -} + err := db.Update(func(tx *bbolt.Tx) error { + pb, bErr := tx.CreateBucket(parentBucketName) + if bErr != nil { + return bErr + } -// pickupRandomLeafPage picks up a random leaf page. -func pickupRandomLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, validPageIds []common.Pgid) { - // Read DB's RootPage, which should be a leaf page. - rootPageId, _, err := guts_cli.GetRootPage(db.Path()) - require.NoError(t, err) - rootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(rootPageId)) + t.Log("put some key/values under the parent bucket directly") + for i := 0; i < 10; i++ { + k, v := fmt.Sprintf("%04d", i), fmt.Sprintf("value_%4d", i) + if pErr := pb.Put([]byte(k), []byte(v)); pErr != nil { + return pErr + } + } + + t.Log("create a nested bucket and put some key/values under the nested bucket") + cb, bErr := pb.CreateBucket([]byte("nestedBucket")) + if bErr != nil { + return bErr + } + + for i := 0; i < 2000; i++ { + k, v := fmt.Sprintf("%04d", i), fmt.Sprintf("value_%4d", i) + if pErr := cb.Put([]byte(k), []byte(v)); pErr != nil { + return pErr + } + } + + return nil + }) require.NoError(t, err) - require.True(t, rootPage.IsLeafPage()) - // The leaf page contains only one item, namely the bucket - require.Equal(t, uint16(1), rootPage.Count()) - lpe := rootPage.LeafPageElement(uint16(0)) - require.True(t, lpe.IsBucketEntry()) + // Get the bucket's root page. + bucketRootPageId := mustGetBucketRootPage(t, db.DB, parentBucketName) + + t.Logf("Running consistency check starting from pageId: %d", bucketRootPageId) + vErr := db.View(func(tx *bbolt.Tx) error { + var cErrs []error + + errChan := tx.Check(bbolt.WithPageId(uint(bucketRootPageId))) + for cErr := range errChan { + cErrs = append(cErrs, cErr) + } + require.Equal(t, 0, len(cErrs)) - // The bucket should be pointing to a branch page - bucketRootPageId := lpe.Bucket().RootPage() + return nil + }) + require.NoError(t, vErr) + t.Log("All check passed") + + // Manually close the db, otherwise the PostTestCleanup will + // check the db again and accordingly fail the test. + db.MustClose() +} + +// corruptRandomLeafPage corrupts one random leaf page. +func corruptRandomLeafPageInBucket(t testing.TB, db *bbolt.DB, bucketName []byte) (victimPageId common.Pgid, validPageIds []common.Pgid) { + bucketRootPageId := mustGetBucketRootPage(t, db, bucketName) bucketRootPage, _, err := guts_cli.ReadPage(db.Path(), uint64(bucketRootPageId)) require.NoError(t, err) require.True(t, bucketRootPage.IsBranchPage()) @@ -105,5 +135,32 @@ func pickupRandomLeafPage(t testing.TB, db *bbolt.DB) (victimPageId common.Pgid, randomIdx := rand.Intn(len(bucketPageIds)) victimPageId = bucketPageIds[randomIdx] validPageIds = append(bucketPageIds[:randomIdx], bucketPageIds[randomIdx+1:]...) + + victimPage, victimBuf, err := guts_cli.ReadPage(db.Path(), uint64(victimPageId)) + require.NoError(t, err) + require.True(t, victimPage.IsLeafPage()) + require.True(t, victimPage.Count() > 1) + + // intentionally make the second key < the first key. + element := victimPage.LeafPageElement(1) + key := element.Key() + key[0] = 0 + + // Write the corrupt page to db file. + err = guts_cli.WritePage(db.Path(), victimBuf) + require.NoError(t, err) return victimPageId, validPageIds } + +// mustGetBucketRootPage returns the root page for the provided bucket. +func mustGetBucketRootPage(t testing.TB, db *bbolt.DB, bucketName []byte) common.Pgid { + var rootPageId common.Pgid + _ = db.View(func(tx *bbolt.Tx) error { + b := tx.Bucket(bucketName) + require.NotNil(t, b) + rootPageId = b.RootPage() + return nil + }) + + return rootPageId +} From 1c772e6e67fcc35cb904b3267d3d01f8805fbe03 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 5 Feb 2024 14:35:36 +0000 Subject: [PATCH 209/439] Update ahrtr's email Signed-off-by: Benjamin Wang --- OWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index 7f8eb86ab..886c3b324 100644 --- a/OWNERS +++ b/OWNERS @@ -1,7 +1,7 @@ # See the OWNERS docs at https://go.k8s.io/owners approvers: - - ahrtr # Benjamin Wang + - ahrtr # Benjamin Wang - mitake # Hitoshi Mitake - serathius # Marek Siarkowicz - ptabor # Piotr Tabor From c0ab8df53baa0e6cadf62f58fcf06af40a085739 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 6 Feb 2024 12:47:09 +0000 Subject: [PATCH 210/439] Remove mitake from OWNERS Signed-off-by: Benjamin Wang --- OWNERS | 1 - 1 file changed, 1 deletion(-) diff --git a/OWNERS b/OWNERS index 886c3b324..ab9a6b81f 100644 --- a/OWNERS +++ b/OWNERS @@ -2,7 +2,6 @@ approvers: - ahrtr # Benjamin Wang - - mitake # Hitoshi Mitake - serathius # Marek Siarkowicz - ptabor # Piotr Tabor - spzala # Sahdev Zala From d17ee55b7a62e1319107e9873a7831613a4901c0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:12:44 +0000 Subject: [PATCH 211/439] build(deps): Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3.7.0 to 4.0.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/3a919529898de77ec3da873e3063ca4b10e7f5cc...3cfe3a4abbb849e10058ce4af15d205b6da42804) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 74d6aa1d4..b8e8cd34f 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,4 +52,4 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index cf9b73f9d..227235434 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -39,7 +39,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@3a919529898de77ec3da873e3063ca4b10e7f5cc # v3.7.0 + uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 coverage: needs: ["test-windows"] From d0ab4cc862047b2f2bfdbbd2e8214ac15d1bd3b7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:43:18 +0000 Subject: [PATCH 212/439] build(deps): Bump golang.org/x/sys from 0.16.0 to 0.17.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/sys/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c3a0db424..7ae2b4678 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/stretchr/testify v1.8.4 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 + golang.org/x/sys v0.17.0 ) require ( diff --git a/go.sum b/go.sum index 26d0e638d..7c535fdb9 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 7a72c9a241c8a0c68ba36b30019db6721b542c2f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 24 Feb 2024 15:33:25 +0000 Subject: [PATCH 213/439] Update the release date of v1.3.9 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 5dfd7fb4c..bd856b900 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -1,6 +1,8 @@ Note that we start to track changes starting from v1.3.7. -## v1.3.9(TBD) +
+ +## v1.3.9(2024-02-24) ### BoltDB - [Clone the key before operating data in bucket against the key](https://github.com/etcd-io/bbolt/pull/639) From cdac32e57c84354ab0a33593f5dfd0c344d4442b Mon Sep 17 00:00:00 2001 From: Ishan Tyagi Date: Sun, 25 Feb 2024 19:02:02 +0530 Subject: [PATCH 214/439] Change the type of pageId uint -> uint64. Signed-off-by: ishan16696 --- tx_check.go | 6 +++--- tx_check_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tx_check.go b/tx_check.go index 1a6d8e2dc..4e3c41ae4 100644 --- a/tx_check.go +++ b/tx_check.go @@ -74,7 +74,7 @@ func (tx *Tx) check(cfg checkConfig, ch chan error) { } } else { // Check the db file starting from a specified pageId. - if cfg.pageId < 2 || cfg.pageId >= uint(tx.meta.Pgid()) { + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) return } @@ -250,7 +250,7 @@ func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, pr type checkConfig struct { kvStringer KVStringer - pageId uint + pageId uint64 } type CheckOption func(options *checkConfig) @@ -262,7 +262,7 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } // WithPageId sets a page ID from which the check command starts to check -func WithPageId(pageId uint) CheckOption { +func WithPageId(pageId uint64) CheckOption { return func(c *checkConfig) { c.pageId = pageId } diff --git a/tx_check_test.go b/tx_check_test.go index af3610897..a0ce69a29 100644 --- a/tx_check_test.go +++ b/tx_check_test.go @@ -35,7 +35,7 @@ func TestTx_Check_CorruptPage(t *testing.T) { var cErrs []error t.Log("Check corrupted page.") - errChan := tx.Check(bbolt.WithPageId(uint(victimPageId))) + errChan := tx.Check(bbolt.WithPageId(uint64(victimPageId))) for cErr := range errChan { cErrs = append(cErrs, cErr) } @@ -44,7 +44,7 @@ func TestTx_Check_CorruptPage(t *testing.T) { t.Log("Check valid pages.") cErrs = cErrs[:0] for _, pgId := range validPageIds { - errChan = tx.Check(bbolt.WithPageId(uint(pgId))) + errChan = tx.Check(bbolt.WithPageId(uint64(pgId))) for cErr := range errChan { cErrs = append(cErrs, cErr) } @@ -104,7 +104,7 @@ func TestTx_Check_WithNestBucket(t *testing.T) { vErr := db.View(func(tx *bbolt.Tx) error { var cErrs []error - errChan := tx.Check(bbolt.WithPageId(uint(bucketRootPageId))) + errChan := tx.Check(bbolt.WithPageId(uint64(bucketRootPageId))) for cErr := range errChan { cErrs = append(cErrs, cErr) } From 60ae138f23de8f7925a9b46f7b7da4b870387256 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Tue, 27 Feb 2024 12:57:36 +0100 Subject: [PATCH 215/439] fixing surgery freelist command in info logs Signed-off-by: Thomas Jungblut --- cmd/bbolt/command_surgery.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 129ae459d..75f122c7e 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -160,7 +160,7 @@ func surgeryCopyPageFunc(srcDBPath string, cfg surgeryCopyPageOptions) error { } if meta.IsFreelistPersisted() { fmt.Fprintf(os.Stdout, "WARNING: the free list might have changed.\n") - fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery freelist abandon ...`\n") } fmt.Fprintf(os.Stdout, "The page %d was successfully copied to page %d\n", cfg.sourcePageId, cfg.destinationPageId) @@ -229,7 +229,7 @@ func surgeryClearPageFunc(srcDBPath string, cfg surgeryClearPageOptions) error { if needAbandonFreelist { fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") - fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery freelist abandon ...`\n") } fmt.Fprintf(os.Stdout, "The page (%d) was cleared\n", cfg.pageId) @@ -304,7 +304,7 @@ func surgeryClearPageElementFunc(srcDBPath string, cfg surgeryClearPageElementsO if needAbandonFreelist { fmt.Fprintf(os.Stdout, "WARNING: The clearing has abandoned some pages that are not yet referenced from free list.\n") - fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery abandon-freelist ...`\n") + fmt.Fprintf(os.Stdout, "Please consider executing `./bbolt surgery freelist abandon ...`\n") } fmt.Fprintf(os.Stdout, "All elements in [%d, %d) in page %d were cleared\n", cfg.startElementIdx, cfg.endElementIdx, cfg.pageId) From 9f4ff8a54912e8c4ef40b479ddadddd33180089a Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Tue, 27 Feb 2024 15:54:17 +0100 Subject: [PATCH 216/439] remove txid references in meta surgery Signed-off-by: Thomas Jungblut --- cmd/bbolt/command_surgery_meta.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/bbolt/command_surgery_meta.go b/cmd/bbolt/command_surgery_meta.go index 36cf33ce5..f774007bf 100644 --- a/cmd/bbolt/command_surgery_meta.go +++ b/cmd/bbolt/command_surgery_meta.go @@ -91,11 +91,11 @@ var allowedMetaUpdateFields = map[string]struct{}{ } // AddFlags sets the flags for `meta update` command. -// Example: --fields root:16,freelist:8 --fields pgid:128 --fields txid:1234 -// Result: []string{"root:16", "freelist:8", "pgid:128", "txid:1234"} +// Example: --fields root:16,freelist:8 --fields pgid:128 +// Result: []string{"root:16", "freelist:8", "pgid:128"} func (o *surgeryMetaUpdateOptions) AddFlags(fs *pflag.FlagSet) { o.surgeryBaseOptions.AddFlags(fs) - fs.StringSliceVarP(&o.fields, "fields", "", o.fields, "comma separated list of fields (supported fields: pageSize, root, freelist, pgid and txid) to be updated, and each item is a colon-separated key-value pair") + fs.StringSliceVarP(&o.fields, "fields", "", o.fields, "comma separated list of fields (supported fields: pageSize, root, freelist and pgid) to be updated, and each item is a colon-separated key-value pair") fs.Uint32VarP(&o.metaPageId, "meta-page", "", o.metaPageId, "the meta page ID to operate on, valid values are 0 and 1") } From 37e9ab571600b0c384608b5454cfa70968613427 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Mar 2024 14:36:28 +0000 Subject: [PATCH 217/439] build(deps): Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.4 to 1.9.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.8.4...v1.9.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7ae2b4678..75e0a21b5 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.21 require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.8.4 + github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.17.0 diff --git a/go.sum b/go.sum index 7c535fdb9..a14bf142a 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= From 5d5f587e4eea6c355191efd8d808a356edeb601c Mon Sep 17 00:00:00 2001 From: Allen Ray Date: Wed, 7 Feb 2024 11:28:35 -0500 Subject: [PATCH 218/439] Migrate to go1.22 Signed-off-by: Allen Ray --- .go-version | 2 +- go.mod | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index c262b1f0d..6245beecd 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.21.6 +1.22.1 diff --git a/go.mod b/go.mod index c3a0db424..6dac8fc3d 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,8 @@ module go.etcd.io/bbolt -go 1.21 +go 1.22 + +toolchain go1.22.1 require ( github.com/spf13/cobra v1.8.0 From 2e41a6711c7f9f405b32a169c9acfc0b0bbd35b6 Mon Sep 17 00:00:00 2001 From: Park Zhou Date: Sun, 10 Mar 2024 23:01:09 +0800 Subject: [PATCH 219/439] README.md: fix typo Signed-off-by: Park Zhou --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 237aaac6b..f1e5f7686 100644 --- a/README.md +++ b/README.md @@ -906,7 +906,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. From f75cc0ea31b3befc6f773ea02ed4d4f7b5edfbc7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Mar 2024 14:55:02 +0000 Subject: [PATCH 220/439] build(deps): Bump golang.org/x/sys from 0.17.0 to 0.18.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.17.0 to 0.18.0. - [Commits](https://github.com/golang/sys/compare/v0.17.0...v0.18.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index abf0bc11e..09ada5727 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 + golang.org/x/sys v0.18.0 ) require ( diff --git a/go.sum b/go.sum index a14bf142a..9f586f302 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 47b03dbfb4c9edf53279b78c713c348157f4ab8c Mon Sep 17 00:00:00 2001 From: Park Zhou Date: Mon, 11 Mar 2024 16:31:49 +0800 Subject: [PATCH 221/439] all: fix doc of var ErrChecksum Signed-off-by: Park Zhou --- errors.go | 2 +- errors/errors.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/errors.go b/errors.go index 4d7cd8001..02958c86f 100644 --- a/errors.go +++ b/errors.go @@ -27,7 +27,7 @@ var ( // Deprecated: Use the error variables defined in the bbolt/errors package. ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. // // Deprecated: Use the error variables defined in the bbolt/errors package. ErrChecksum = errors.ErrChecksum diff --git a/errors/errors.go b/errors/errors.go index e5428b9d6..c115289e5 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -21,7 +21,7 @@ var ( // different version of Bolt. ErrVersionMismatch = errors.New("version mismatch") - // ErrChecksum is returned when either meta page checksum does not match. + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. ErrChecksum = errors.New("checksum error") // ErrTimeout is returned when a database cannot obtain an exclusive lock From c27eedcf803fb5c6990d8846275c5136e06de1ae Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Thu, 7 Mar 2024 10:38:15 +0100 Subject: [PATCH 222/439] Add basic XFS powerfailure tests This also introduces mkfs options, in case we need to accomodate for non-default parameters here in the future. Signed-off-by: Thomas Jungblut --- .github/workflows/robustness_template.yaml | 2 +- tests/dmflakey/dmflakey.go | 18 ++++-- tests/dmflakey/dmflakey_test.go | 42 +++++++------ tests/robustness/powerfailure_test.go | 72 +++++++++++++++++++--- 4 files changed, 100 insertions(+), 34 deletions(-) diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index e9b9a38d7..9300e14eb 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -30,6 +30,6 @@ jobs: - name: test-robustness run: | set -euo pipefail - sudo apt-get install -y dmsetup + sudo apt-get install -y dmsetup xfsprogs ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" make test-robustness diff --git a/tests/dmflakey/dmflakey.go b/tests/dmflakey/dmflakey.go index 25061a4cb..88c3c2d48 100644 --- a/tests/dmflakey/dmflakey.go +++ b/tests/dmflakey/dmflakey.go @@ -90,9 +90,9 @@ const ( // The device-mapper device will be /dev/mapper/$flakeyDevice. And the filesystem // image will be created at $dataStorePath/$flakeyDevice.img. By default, the // device is available for 2 minutes and size is 10 GiB. -func InitFlakey(flakeyDevice, dataStorePath string, fsType FSType) (_ Flakey, retErr error) { +func InitFlakey(flakeyDevice, dataStorePath string, fsType FSType, mkfsOpt string) (_ Flakey, retErr error) { imgPath := filepath.Join(dataStorePath, fmt.Sprintf("%s.img", flakeyDevice)) - if err := createEmptyFSImage(imgPath, fsType); err != nil { + if err := createEmptyFSImage(imgPath, fsType, mkfsOpt); err != nil { return nil, err } defer func() { @@ -276,7 +276,7 @@ func (f *flakey) Teardown() error { // createEmptyFSImage creates empty filesystem on dataStorePath folder with // default size - 10 GiB. -func createEmptyFSImage(imgPath string, fsType FSType) error { +func createEmptyFSImage(imgPath string, fsType FSType, mkfsOpt string) error { if err := validateFSType(fsType); err != nil { return err } @@ -308,10 +308,16 @@ func createEmptyFSImage(imgPath string, fsType FSType) error { imgPath, defaultImgSize, err) } - output, err := exec.Command(mkfs, imgPath).CombinedOutput() + args := []string{imgPath} + if mkfsOpt != "" { + splitArgs := strings.Split(mkfsOpt, " ") + args = append(splitArgs, imgPath) + } + + output, err := exec.Command(mkfs, args...).CombinedOutput() if err != nil { - return fmt.Errorf("failed to mkfs.%s on %s (out: %s): %w", - fsType, imgPath, string(output), err) + return fmt.Errorf("failed to mkfs on %s (%s %v) (out: %s): %w", + imgPath, mkfs, args, string(output), err) } return nil } diff --git a/tests/dmflakey/dmflakey_test.go b/tests/dmflakey/dmflakey_test.go index 41c66db8d..99e2de062 100644 --- a/tests/dmflakey/dmflakey_test.go +++ b/tests/dmflakey/dmflakey_test.go @@ -26,31 +26,35 @@ func TestMain(m *testing.M) { } func TestBasic(t *testing.T) { - tmpDir := t.TempDir() + for _, fsType := range []FSType{FSTypeEXT4, FSTypeXFS} { + t.Run(string(fsType), func(t *testing.T) { + tmpDir := t.TempDir() - flakey, err := InitFlakey("go-dmflakey", tmpDir, FSTypeEXT4) - require.NoError(t, err, "init flakey") - defer func() { - assert.NoError(t, flakey.Teardown()) - }() + flakey, err := InitFlakey("go-dmflakey", tmpDir, fsType, "") + require.NoError(t, err, "init flakey") + defer func() { + assert.NoError(t, flakey.Teardown()) + }() - target := filepath.Join(tmpDir, "root") - require.NoError(t, os.MkdirAll(target, 0600)) + target := filepath.Join(tmpDir, "root") + require.NoError(t, os.MkdirAll(target, 0600)) - require.NoError(t, mount(target, flakey.DevicePath(), "")) - defer func() { - assert.NoError(t, unmount(target)) - }() + require.NoError(t, mount(target, flakey.DevicePath(), "")) + defer func() { + assert.NoError(t, unmount(target)) + }() - file := filepath.Join(target, "test") - assert.NoError(t, writeFile(file, []byte("hello, world"), 0600, true)) + file := filepath.Join(target, "test") + assert.NoError(t, writeFile(file, []byte("hello, world"), 0600, true)) - assert.NoError(t, unmount(target)) + assert.NoError(t, unmount(target)) - assert.NoError(t, flakey.Teardown()) + assert.NoError(t, flakey.Teardown()) + }) + } } -func TestDropWrites(t *testing.T) { +func TestDropWritesExt4(t *testing.T) { flakey, root := initFlakey(t, FSTypeEXT4) // commit=1000 is to delay commit triggered by writeback thread @@ -82,7 +86,7 @@ func TestDropWrites(t *testing.T) { assert.True(t, errors.Is(err, os.ErrNotExist)) } -func TestErrorWrites(t *testing.T) { +func TestErrorWritesExt4(t *testing.T) { flakey, root := initFlakey(t, FSTypeEXT4) // commit=1000 is to delay commit triggered by writeback thread @@ -114,7 +118,7 @@ func initFlakey(t *testing.T, fsType FSType) (_ Flakey, root string) { target := filepath.Join(tmpDir, "root") require.NoError(t, os.MkdirAll(target, 0600)) - flakey, err := InitFlakey("go-dmflakey", tmpDir, FSTypeEXT4) + flakey, err := InitFlakey("go-dmflakey", tmpDir, fsType, "") require.NoError(t, err, "init flakey") t.Cleanup(func() { diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index 4b150ccdc..d8c497e0a 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -35,8 +35,8 @@ var panicFailpoints = []string{ "unmapError", } -// TestRestartFromPowerFailure is to test data after unexpected power failure. -func TestRestartFromPowerFailure(t *testing.T) { +// TestRestartFromPowerFailureExt4 is to test data after unexpected power failure on ext4. +func TestRestartFromPowerFailureExt4(t *testing.T) { for _, tc := range []struct { name string du time.Duration @@ -78,13 +78,69 @@ func TestRestartFromPowerFailure(t *testing.T) { }, } { t.Run(tc.name, func(t *testing.T) { - doPowerFailure(t, tc.du, tc.fsMountOpt, tc.useFailpoint) + doPowerFailure(t, tc.du, dmflakey.FSTypeEXT4, "", tc.fsMountOpt, tc.useFailpoint) }) } } -func doPowerFailure(t *testing.T, du time.Duration, fsMountOpt string, useFailpoint bool) { - flakey := initFlakeyDevice(t, strings.Replace(t.Name(), "/", "_", -1), dmflakey.FSTypeEXT4, fsMountOpt) +func TestRestartFromPowerFailureXFS(t *testing.T) { + for _, tc := range []struct { + name string + mkfsOpt string + fsMountOpt string + useFailpoint bool + }{ + { + name: "xfs_no_opts", + mkfsOpt: "", + fsMountOpt: "", + useFailpoint: true, + }, + { + name: "lazy-log", + mkfsOpt: "-l lazy-count=1", + fsMountOpt: "", + useFailpoint: true, + }, + { + name: "odd-allocsize", + mkfsOpt: "", + fsMountOpt: "allocsize=" + fmt.Sprintf("%d", 4096*5), + useFailpoint: true, + }, + { + name: "nolargeio", + mkfsOpt: "", + fsMountOpt: "nolargeio", + useFailpoint: true, + }, + { + name: "odd-alignment", + mkfsOpt: "-d sunit=1024,swidth=1024", + fsMountOpt: "noalign", + useFailpoint: true, + }, + { + name: "openshift-sno-options", + mkfsOpt: "-m bigtime=1,finobt=1,rmapbt=0,reflink=1 -i sparse=1 -l lazy-count=1", + // openshift also supplies seclabel,relatime,prjquota on RHEL, but that's not supported on our CI + // prjquota is only unsupported on our ARM runners. + // You can find more information in either the man page with `man xfs` or `man mkfs.xfs`. + // Also refer to https://man7.org/linux/man-pages/man8/mkfs.xfs.8.html. + fsMountOpt: "rw,attr2,inode64,logbufs=8,logbsize=32k", + useFailpoint: true, + }, + } { + t.Run(tc.name, func(t *testing.T) { + t.Logf("mkfs opts: %s", tc.mkfsOpt) + t.Logf("mount opts: %s", tc.fsMountOpt) + doPowerFailure(t, 5*time.Second, dmflakey.FSTypeXFS, tc.mkfsOpt, tc.fsMountOpt, tc.useFailpoint) + }) + } +} + +func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfsOpt string, fsMountOpt string, useFailpoint bool) { + flakey := initFlakeyDevice(t, strings.Replace(t.Name(), "/", "_", -1), fsType, mkfsOpt, fsMountOpt) root := flakey.RootFS() dbPath := filepath.Join(root, "boltdb") @@ -186,10 +242,10 @@ type FlakeyDevice interface { } // initFlakeyDevice returns FlakeyDevice instance with a given filesystem. -func initFlakeyDevice(t *testing.T, name string, fsType dmflakey.FSType, mntOpt string) FlakeyDevice { +func initFlakeyDevice(t *testing.T, name string, fsType dmflakey.FSType, mkfsOpt string, mntOpt string) FlakeyDevice { imgDir := t.TempDir() - flakey, err := dmflakey.InitFlakey(name, imgDir, fsType) + flakey, err := dmflakey.InitFlakey(name, imgDir, fsType, mkfsOpt) require.NoError(t, err, "init flakey %s", name) t.Cleanup(func() { assert.NoError(t, flakey.Teardown()) @@ -240,7 +296,7 @@ func (f *flakeyT) PowerFailure(mntOpt string) error { } if err := unix.Mount(f.DevicePath(), f.rootDir, string(f.Filesystem()), 0, mntOpt); err != nil { - return fmt.Errorf("failed to mount rootfs %s: %w", f.rootDir, err) + return fmt.Errorf("failed to mount rootfs %s (%s): %w", f.rootDir, mntOpt, err) } return nil } From 6c1d16e8c21a664f4179d3c49ee78ef0e496eb8f Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Thu, 4 Apr 2024 10:27:26 +0200 Subject: [PATCH 223/439] Bump go toolchain version to address CVE-2023-45288 Changes: - Bump toolchain version to 1.22.2 due to CVE-2023-45288 Reference: - PR #17703 Signed-off-by: Chun-Hung Tseng --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 6245beecd..6fee2fedb 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.1 +1.22.2 diff --git a/go.mod b/go.mod index 09ada5727..a81c4d9f6 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.22 -toolchain go1.22.1 +toolchain go1.22.2 require ( github.com/spf13/cobra v1.8.0 From 4352c84fe564e95cac04fddd42c263bf403dc4b9 Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Thu, 4 Apr 2024 21:57:18 +0200 Subject: [PATCH 224/439] Remove unused seed generation code Signed-off-by: Chun-Hung Tseng --- cmd/bbolt/main_test.go | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index b980e468a..8a5cc94e8 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -557,11 +557,6 @@ func NewMain() *Main { } func TestCompactCommand_Run(t *testing.T) { - var s int64 - if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil { - t.Fatal(err) - } - dstdb := btesting.MustCreateDB(t) dstdb.Close() From ed64923fcae484c54cd19a4b70b8d25aeffd3d37 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:00:31 +0000 Subject: [PATCH 225/439] build(deps): Bump golang.org/x/sync from 0.6.0 to 0.7.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.6.0 to 0.7.0. - [Commits](https://github.com/golang/sync/compare/v0.6.0...v0.7.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index a81c4d9f6..0235344f7 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 - golang.org/x/sync v0.6.0 + golang.org/x/sync v0.7.0 golang.org/x/sys v0.18.0 ) diff --git a/go.sum b/go.sum index 9f586f302..6252944a8 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 72d285151868af9f5bbb5f0bc92a042812749518 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Apr 2024 19:03:10 +0000 Subject: [PATCH 226/439] build(deps): Bump golang.org/x/sys from 0.18.0 to 0.19.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.18.0 to 0.19.0. - [Commits](https://github.com/golang/sys/compare/v0.18.0...v0.19.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0235344f7..c5c2121e3 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.18.0 + golang.org/x/sys v0.19.0 ) require ( diff --git a/go.sum b/go.sum index 6252944a8..5b6b36db2 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From d3370d3859d36efcddf3b6af93f4aefab4ffa09b Mon Sep 17 00:00:00 2001 From: Adam Baxter Date: Wed, 10 Apr 2024 12:08:50 +0100 Subject: [PATCH 227/439] Add rnd read-mode to bbolt bench command Signed-off-by: Adam Baxter --- cmd/bbolt/main.go | 144 ++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 119 insertions(+), 25 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 3ec567c25..a5a4e9f2e 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1103,17 +1103,27 @@ func (cmd *benchCommand) Run(args ...string) error { db.NoSync = options.NoSync defer db.Close() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + // Write to the database. var writeResults BenchResults + fmt.Fprintf(cmd.Stderr, "starting write benchmark.\n") - if err := cmd.runWrites(db, options, &writeResults); err != nil { + keys, err := cmd.runWrites(db, options, &writeResults, r) + if err != nil { return fmt.Errorf("write: %v", err) } + if keys != nil { + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + }) + } + var readResults BenchResults fmt.Fprintf(cmd.Stderr, "starting read benchmark.\n") // Read from the database. - if err := cmd.runReads(db, options, &readResults); err != nil { + if err := cmd.runReads(db, options, &readResults, keys); err != nil { return fmt.Errorf("bench: read: %s", err) } @@ -1172,7 +1182,7 @@ func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { } // Writes to the database. -func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults) error { +func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { // Start profiling for writes. if options.ProfileMode == "rw" || options.ProfileMode == "w" { cmd.startProfiling(options) @@ -1184,18 +1194,19 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * t := time.Now() + var keys []nestedKey var err error switch options.WriteMode { case "seq": - err = cmd.runWritesSequential(db, options, results) + keys, err = cmd.runWritesSequential(db, options, results) case "rnd": - err = cmd.runWritesRandom(db, options, results) + keys, err = cmd.runWritesRandom(db, options, results, r) case "seq-nest": - err = cmd.runWritesSequentialNested(db, options, results) + keys, err = cmd.runWritesSequentialNested(db, options, results) case "rnd-nest": - err = cmd.runWritesRandomNested(db, options, results) + keys, err = cmd.runWritesRandomNested(db, options, results, r) default: - return fmt.Errorf("invalid write mode: %s", options.WriteMode) + return nil, fmt.Errorf("invalid write mode: %s", options.WriteMode) } // Save time to write. @@ -1206,30 +1217,33 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * cmd.stopProfiling() } - return err + return keys, err } -func (cmd *benchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { +func (cmd *benchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { var i = uint32(0) return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) } -func (cmd *benchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) +func (cmd *benchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) } -func (cmd *benchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { +func (cmd *benchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { var i = uint32(0) return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { i++; return i }) } -func (cmd *benchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - r := rand.New(rand.NewSource(time.Now().UnixNano())) +func (cmd *benchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { return r.Uint32() }) } -func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { +func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + if options.ReadMode == "rnd" { + keys = make([]nestedKey, 0, options.Iterations) + } + for i := int64(0); i < options.Iterations; i += options.BatchSize { if err := db.Update(func(tx *bolt.Tx) error { b, _ := tx.CreateBucketIfNotExists(benchBucketName) @@ -1247,20 +1261,27 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, if err := b.Put(key, value); err != nil { return err } - + if keys != nil { + keys = append(keys, nestedKey{nil, key}) + } results.AddCompletedOps(1) } fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) return nil }); err != nil { - return err + return nil, err } } - return nil + return keys, nil } -func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) error { +func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + if options.ReadMode == "rnd" { + keys = make([]nestedKey, 0, options.Iterations) + } + for i := int64(0); i < options.Iterations; i += options.BatchSize { if err := db.Update(func(tx *bolt.Tx) error { top, err := tx.CreateBucketIfNotExists(benchBucketName) @@ -1292,21 +1313,23 @@ func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOp if err := b.Put(key, value); err != nil { return err } - + if keys != nil { + keys = append(keys, nestedKey{name, key}) + } results.AddCompletedOps(1) } fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) return nil }); err != nil { - return err + return nil, err } } - return nil + return keys, nil } // Reads from the database. -func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults) error { +func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults, keys []nestedKey) error { // Start profiling for reads. if options.ProfileMode == "r" { cmd.startProfiling(options) @@ -1327,6 +1350,13 @@ func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B default: err = cmd.runReadsSequential(db, options, results) } + case "rnd": + switch options.WriteMode { + case "seq-nest", "rnd-nest": + err = cmd.runReadsRandomNested(db, options, keys, results) + default: + err = cmd.runReadsRandom(db, options, keys, results) + } default: return fmt.Errorf("invalid read mode: %s", options.ReadMode) } @@ -1342,6 +1372,8 @@ func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *B return err } +type nestedKey struct{ bucket, key []byte } + func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { return db.View(func(tx *bolt.Tx) error { t := time.Now() @@ -1353,7 +1385,37 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, numReads++ results.AddCompletedOps(1) if v == nil { - return errors.New("invalid value") + return ErrInvalidValue + } + } + + if options.WriteMode == "seq" && numReads != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func (cmd *benchCommand) runReadsRandom(db *bolt.DB, options *BenchOptions, keys []nestedKey, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + b := tx.Bucket(benchBucketName) + for _, key := range keys { + v := b.Get(key.key) + numReads++ + results.AddCompletedOps(1) + if v == nil { + return ErrInvalidValue } } @@ -1408,6 +1470,38 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt }) } +func (cmd *benchCommand) runReadsRandomNested(db *bolt.DB, options *BenchOptions, nestedKeys []nestedKey, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + var top = tx.Bucket(benchBucketName) + for _, nestedKey := range nestedKeys { + if b := top.Bucket(nestedKey.bucket); b != nil { + v := b.Get(nestedKey.key) + numReads++ + results.AddCompletedOps(1) + if v == nil { + return ErrInvalidValue + } + } + } + + if options.WriteMode == "seq-nest" && numReads != options.Iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + func checkProgress(results *BenchResults, finishChan chan interface{}, stderr io.Writer) { ticker := time.Tick(time.Second) lastCompleted, lastTime := int64(0), time.Now() From b21ea719bcce181745f20f0d8bd6e6d9b73dd70c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cenk=20Alt=C4=B1?= Date: Sun, 14 Apr 2024 12:16:09 -0400 Subject: [PATCH 228/439] Add workflow to close stale issues and PRs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Cenk Altı Update .github/workflows/stale.yaml Co-authored-by: Benjamin Wang --- .github/workflows/stale.yaml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/stale.yaml diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml new file mode 100644 index 000000000..adef90226 --- /dev/null +++ b/.github/workflows/stale.yaml @@ -0,0 +1,19 @@ +name: 'Close stale issues and PRs' +on: + schedule: + - cron: '0 0 * * *' # every day at 00:00 UTC + +permissions: + issues: write + pull-requests: write + +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + days-before-stale: 90 + days-before-close: 21 + stale-issue-label: stale + stale-pr-label: stale From 3a180ad9c91f65b47b575dff12addbaf2796019a Mon Sep 17 00:00:00 2001 From: deferdeter Date: Tue, 16 Apr 2024 14:25:07 +0800 Subject: [PATCH 229/439] chore: fix function name in comment Signed-off-by: deferdeter --- tests/dmflakey/loopback.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/dmflakey/loopback.go b/tests/dmflakey/loopback.go index 35e82cf8f..701336382 100644 --- a/tests/dmflakey/loopback.go +++ b/tests/dmflakey/loopback.go @@ -73,7 +73,7 @@ func detachLoopDevice(loopDevice string) error { return unix.IoctlSetInt(int(loopFd.Fd()), unix.LOOP_CLR_FD, 0) } -// getFreeLoopbackDevice allocates or finds a free loop device for use. +// getFreeLoopDevice allocates or finds a free loop device for use. // // REF: https://man7.org/linux/man-pages/man4/loop.4.html func getFreeLoopDevice() (string, error) { From 2fe6b6347f08cc79e623c3538872c4e10a9ceb7a Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 17 Apr 2024 11:04:56 +0200 Subject: [PATCH 230/439] fix: use cobra exactArgs Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_inspect.go | 11 +------- cmd/bbolt/command_surgery.go | 40 +++------------------------ cmd/bbolt/command_surgery_freelist.go | 21 ++------------ cmd/bbolt/command_surgery_meta.go | 21 ++------------ 4 files changed, 9 insertions(+), 84 deletions(-) diff --git a/cmd/bbolt/command_inspect.go b/cmd/bbolt/command_inspect.go index 68cbe53f6..ccdb162b2 100644 --- a/cmd/bbolt/command_inspect.go +++ b/cmd/bbolt/command_inspect.go @@ -2,7 +2,6 @@ package main import ( "encoding/json" - "errors" "fmt" "os" @@ -15,15 +14,7 @@ func newInspectCobraCommand() *cobra.Command { inspectCmd := &cobra.Command{ Use: "inspect", Short: "inspect the structure of the database", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { return inspectFunc(args[0]) }, diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index 4d1f78077..fba5b2e48 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -54,15 +54,7 @@ func newSurgeryRevertMetaPageCommand() *cobra.Command { revertMetaPageCmd := &cobra.Command{ Use: "revert-meta-page [options]", Short: "Revert the meta page to revert the changes performed by the latest transaction", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err @@ -121,15 +113,7 @@ func newSurgeryCopyPageCommand() *cobra.Command { copyPageCmd := &cobra.Command{ Use: "copy-page [options]", Short: "Copy page from the source page Id to the destination page Id", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err @@ -193,15 +177,7 @@ func newSurgeryClearPageCommand() *cobra.Command { clearPageCmd := &cobra.Command{ Use: "clear-page [options]", Short: "Clears all elements from the given page, which can be a branch or leaf page", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err @@ -268,15 +244,7 @@ func newSurgeryClearPageElementsCommand() *cobra.Command { clearElementCmd := &cobra.Command{ Use: "clear-page-elements [options]", Short: "Clears elements from the given page, which can be a branch or leaf page", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err diff --git a/cmd/bbolt/command_surgery_freelist.go b/cmd/bbolt/command_surgery_freelist.go index 81e2ea9a9..b6cd658d1 100644 --- a/cmd/bbolt/command_surgery_freelist.go +++ b/cmd/bbolt/command_surgery_freelist.go @@ -1,7 +1,6 @@ package main import ( - "errors" "fmt" "os" @@ -29,15 +28,7 @@ func newSurgeryFreelistAbandonCommand() *cobra.Command { abandonFreelistCmd := &cobra.Command{ Use: "abandon [options]", Short: "Abandon the freelist from both meta pages", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err @@ -72,15 +63,7 @@ func newSurgeryFreelistRebuildCommand() *cobra.Command { rebuildFreelistCmd := &cobra.Command{ Use: "rebuild [options]", Short: "Rebuild the freelist", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err diff --git a/cmd/bbolt/command_surgery_meta.go b/cmd/bbolt/command_surgery_meta.go index f774007bf..ba15dd002 100644 --- a/cmd/bbolt/command_surgery_meta.go +++ b/cmd/bbolt/command_surgery_meta.go @@ -1,7 +1,6 @@ package main import ( - "errors" "fmt" "io" "os" @@ -37,15 +36,7 @@ func newSurgeryMetaValidateCommand() *cobra.Command { metaValidateCmd := &cobra.Command{ Use: "validate [options]", Short: "Validate both meta pages", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { return surgeryMetaValidateFunc(args[0]) }, @@ -131,15 +122,7 @@ func newSurgeryMetaUpdateCommand() *cobra.Command { metaUpdateCmd := &cobra.Command{ Use: "update [options]", Short: "Update fields in meta pages", - Args: func(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return errors.New("db file path not provided") - } - if len(args) > 1 { - return errors.New("too many arguments") - } - return nil - }, + Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Validate(); err != nil { return err From 432a97935b98ca68a4f36c0ee9ef762377bc3a2a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 17 Apr 2024 17:22:13 +0100 Subject: [PATCH 231/439] Add a known issue on the writing a value with a length of 0 always result in reading back an empty []byte{} value Signed-off-by: Benjamin Wang --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index f1e5f7686..4123b130e 100644 --- a/README.md +++ b/README.md @@ -948,6 +948,8 @@ them via pull request. Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt From e3afa4080440ce00b09bc506216447ab11db3dbe Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 15 Apr 2024 14:46:40 +0200 Subject: [PATCH 232/439] add check cmd cobra Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_check.go | 58 ++++++++++++++++++++++++ cmd/bbolt/command_check_test.go | 33 ++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 78 --------------------------------- cmd/bbolt/main_test.go | 14 ------ 5 files changed, 92 insertions(+), 92 deletions(-) create mode 100644 cmd/bbolt/command_check.go create mode 100644 cmd/bbolt/command_check_test.go diff --git a/cmd/bbolt/command_check.go b/cmd/bbolt/command_check.go new file mode 100644 index 000000000..237e02f91 --- /dev/null +++ b/cmd/bbolt/command_check.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/guts_cli" +) + +func newCheckCommand() *cobra.Command { + checkCmd := &cobra.Command{ + Use: "check ", + Short: "verify integrity of bbolt database data", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return checkFunc(cmd, args[0]) + }, + } + + return checkCmd +} + +func checkFunc(cmd *cobra.Command, dbPath string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // Open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + PreLoadFreelist: true, + }) + if err != nil { + return err + } + defer db.Close() + + // Perform consistency check. + return db.View(func(tx *bolt.Tx) error { + var count int + for err := range tx.Check(bolt.WithKVStringer(CmdKvStringer())) { + fmt.Fprintln(cmd.OutOrStdout(), err) + count++ + } + + // Print summary of errors. + if count > 0 { + fmt.Fprintf(cmd.OutOrStdout(), "%d errors found\n", count) + return guts_cli.ErrCorrupt + } + + // Notify user that database is valid. + fmt.Fprintln(cmd.OutOrStdout(), "OK") + return nil + }) +} diff --git a/cmd/bbolt/command_check_test.go b/cmd/bbolt/command_check_test.go new file mode 100644 index 000000000..02500745c --- /dev/null +++ b/cmd/bbolt/command_check_test.go @@ -0,0 +1,33 @@ +package main_test + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" + + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +func TestCheckCommand_Run(t *testing.T) { + db := btesting.MustCreateDB(t) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + rootCmd := main.NewRootCommand() + // capture output for assertion + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + rootCmd.SetArgs([]string{ + "check", db.Path(), + }) + err := rootCmd.Execute() + require.NoError(t, err) + + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Equalf(t, "OK\n", string(output), "unexpected stdout:\n\n%s", string(output)) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index b69a619ed..979f485ab 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -20,6 +20,7 @@ func NewRootCommand() *cobra.Command { newVersionCobraCommand(), newSurgeryCobraCommand(), newInspectCobraCommand(), + newCheckCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a5a4e9f2e..2c803f311 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -123,8 +123,6 @@ func (m *Main) Run(args ...string) error { return newBenchCommand(m).Run(args[1:]...) case "buckets": return newBucketsCommand(m).Run(args[1:]...) - case "check": - return newCheckCommand(m).Run(args[1:]...) case "compact": return newCompactCommand(m).Run(args[1:]...) case "dump": @@ -180,82 +178,6 @@ Use "bbolt [command] -h" for more information about a command. `, "\n") } -// checkCommand represents the "check" command execution. -type checkCommand struct { - baseCommand -} - -// newCheckCommand returns a checkCommand. -func newCheckCommand(m *Main) *checkCommand { - c := &checkCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *checkCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ - ReadOnly: true, - PreLoadFreelist: true, - }) - if err != nil { - return err - } - defer db.Close() - - // Perform consistency check. - return db.View(func(tx *bolt.Tx) error { - var count int - for err := range tx.Check(bolt.WithKVStringer(CmdKvStringer())) { - fmt.Fprintln(cmd.Stdout, err) - count++ - } - - // Print summary of errors. - if count > 0 { - fmt.Fprintf(cmd.Stdout, "%d errors found\n", count) - return guts_cli.ErrCorrupt - } - - // Notify user that database is valid. - fmt.Fprintln(cmd.Stdout, "OK") - return nil - }) -} - -// Usage returns the help message. -func (cmd *checkCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt check PATH - -Check opens a database at PATH and runs an exhaustive check to verify that -all pages are accessible or are marked as freed. It also verifies that no -pages are double referenced. - -Verification errors will stream out as they are found and the process will -return after all pages have been checked. -`, "\n") -} - // infoCommand represents the "info" command execution. type infoCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 8a5cc94e8..2dfb04449 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -79,20 +79,6 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { } } -func TestCheckCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - m := NewMain() - err := m.Run("check", db.Path()) - require.NoError(t, err) - if m.Stdout.String() != "OK\n" { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - func TestDumpCommand_Run(t *testing.T) { db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) db.Close() From 6ff4ce700938e8588dcacecf355a73354bf0ea08 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 18 Apr 2024 03:12:47 -0600 Subject: [PATCH 233/439] cmd: replace CobraCommand suffix with Command * Rename newInspectCobraCommand to newInspectCommand * Rename newVersionCobraCommand to newVersionCommand Signed-off-by: Ivan Valdes --- cmd/bbolt/command_inspect.go | 2 +- cmd/bbolt/command_root.go | 4 ++-- cmd/bbolt/command_version.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/bbolt/command_inspect.go b/cmd/bbolt/command_inspect.go index ccdb162b2..8d34401e1 100644 --- a/cmd/bbolt/command_inspect.go +++ b/cmd/bbolt/command_inspect.go @@ -10,7 +10,7 @@ import ( bolt "go.etcd.io/bbolt" ) -func newInspectCobraCommand() *cobra.Command { +func newInspectCommand() *cobra.Command { inspectCmd := &cobra.Command{ Use: "inspect", Short: "inspect the structure of the database", diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 979f485ab..6308a8cc2 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -17,9 +17,9 @@ func NewRootCommand() *cobra.Command { } rootCmd.AddCommand( - newVersionCobraCommand(), + newVersionCommand(), newSurgeryCobraCommand(), - newInspectCobraCommand(), + newInspectCommand(), newCheckCommand(), ) diff --git a/cmd/bbolt/command_version.go b/cmd/bbolt/command_version.go index 4434c515f..73019c798 100644 --- a/cmd/bbolt/command_version.go +++ b/cmd/bbolt/command_version.go @@ -8,7 +8,7 @@ import ( "go.etcd.io/bbolt/version" ) -func newVersionCobraCommand() *cobra.Command { +func newVersionCommand() *cobra.Command { versionCmd := &cobra.Command{ Use: "version", Short: "print the current version of bbolt", From f7de41e900a042f2d29f3e41bde69f8ea2318f6b Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 17 Apr 2024 16:43:02 +0100 Subject: [PATCH 234/439] Simplify the command's description Signed-off-by: Benjamin Wang --- cmd/bbolt/command_inspect.go | 2 +- cmd/bbolt/command_root.go | 2 +- cmd/bbolt/command_surgery.go | 10 +++++----- cmd/bbolt/command_surgery_freelist.go | 4 ++-- cmd/bbolt/command_surgery_meta.go | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/cmd/bbolt/command_inspect.go b/cmd/bbolt/command_inspect.go index 8d34401e1..7f150835a 100644 --- a/cmd/bbolt/command_inspect.go +++ b/cmd/bbolt/command_inspect.go @@ -12,7 +12,7 @@ import ( func newInspectCommand() *cobra.Command { inspectCmd := &cobra.Command{ - Use: "inspect", + Use: "inspect ", Short: "inspect the structure of the database", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 6308a8cc2..0336ea36c 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -18,7 +18,7 @@ func NewRootCommand() *cobra.Command { rootCmd.AddCommand( newVersionCommand(), - newSurgeryCobraCommand(), + newSurgeryCommand(), newInspectCommand(), newCheckCommand(), ) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index fba5b2e48..c7abfe1dd 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -17,7 +17,7 @@ var ( ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") ) -func newSurgeryCobraCommand() *cobra.Command { +func newSurgeryCommand() *cobra.Command { surgeryCmd := &cobra.Command{ Use: "surgery ", Short: "surgery related commands", @@ -52,7 +52,7 @@ func (o *surgeryBaseOptions) Validate() error { func newSurgeryRevertMetaPageCommand() *cobra.Command { var o surgeryBaseOptions revertMetaPageCmd := &cobra.Command{ - Use: "revert-meta-page [options]", + Use: "revert-meta-page ", Short: "Revert the meta page to revert the changes performed by the latest transaction", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -111,7 +111,7 @@ func (o *surgeryCopyPageOptions) Validate() error { func newSurgeryCopyPageCommand() *cobra.Command { var o surgeryCopyPageOptions copyPageCmd := &cobra.Command{ - Use: "copy-page [options]", + Use: "copy-page ", Short: "Copy page from the source page Id to the destination page Id", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -175,7 +175,7 @@ func (o *surgeryClearPageOptions) Validate() error { func newSurgeryClearPageCommand() *cobra.Command { var o surgeryClearPageOptions clearPageCmd := &cobra.Command{ - Use: "clear-page [options]", + Use: "clear-page ", Short: "Clears all elements from the given page, which can be a branch or leaf page", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -242,7 +242,7 @@ func (o *surgeryClearPageElementsOptions) Validate() error { func newSurgeryClearPageElementsCommand() *cobra.Command { var o surgeryClearPageElementsOptions clearElementCmd := &cobra.Command{ - Use: "clear-page-elements [options]", + Use: "clear-page-elements ", Short: "Clears elements from the given page, which can be a branch or leaf page", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bbolt/command_surgery_freelist.go b/cmd/bbolt/command_surgery_freelist.go index b6cd658d1..9b9da0b48 100644 --- a/cmd/bbolt/command_surgery_freelist.go +++ b/cmd/bbolt/command_surgery_freelist.go @@ -26,7 +26,7 @@ func newSurgeryFreelistCommand() *cobra.Command { func newSurgeryFreelistAbandonCommand() *cobra.Command { var o surgeryBaseOptions abandonFreelistCmd := &cobra.Command{ - Use: "abandon [options]", + Use: "abandon ", Short: "Abandon the freelist from both meta pages", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -61,7 +61,7 @@ func surgeryFreelistAbandonFunc(srcDBPath string, cfg surgeryBaseOptions) error func newSurgeryFreelistRebuildCommand() *cobra.Command { var o surgeryBaseOptions rebuildFreelistCmd := &cobra.Command{ - Use: "rebuild [options]", + Use: "rebuild ", Short: "Rebuild the freelist", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bbolt/command_surgery_meta.go b/cmd/bbolt/command_surgery_meta.go index ba15dd002..513c1fb2e 100644 --- a/cmd/bbolt/command_surgery_meta.go +++ b/cmd/bbolt/command_surgery_meta.go @@ -34,7 +34,7 @@ func newSurgeryMetaCommand() *cobra.Command { func newSurgeryMetaValidateCommand() *cobra.Command { metaValidateCmd := &cobra.Command{ - Use: "validate [options]", + Use: "validate ", Short: "Validate both meta pages", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -120,7 +120,7 @@ func (o *surgeryMetaUpdateOptions) Validate() error { func newSurgeryMetaUpdateCommand() *cobra.Command { var o surgeryMetaUpdateOptions metaUpdateCmd := &cobra.Command{ - Use: "update [options]", + Use: "update ", Short: "Update fields in meta pages", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { From 43c669db88d2fff587482e131b4e672f81d95c50 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Fri, 12 Apr 2024 15:14:56 -0700 Subject: [PATCH 235/439] bench: aggregate adding completed ops for reads Currently, the completed operations are added to the read benchmarks one by one, and given that each operation is atomic, it impacts the benchmark's performance. Change to update only once per cycle, with the total number of reads. Signed-off-by: Ivan Valdes --- cmd/bbolt/main.go | 71 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a5a4e9f2e..14c88e6b1 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1380,13 +1380,22 @@ func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, for { numReads := int64(0) - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - numReads++ - results.AddCompletedOps(1) - if v == nil { - return ErrInvalidValue + err := func() error { + defer func() { results.AddCompletedOps(numReads) }() + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + numReads++ + if v == nil { + return ErrInvalidValue + } } + + return nil + }() + + if err != nil { + return err } if options.WriteMode == "seq" && numReads != options.Iterations { @@ -1409,14 +1418,23 @@ func (cmd *benchCommand) runReadsRandom(db *bolt.DB, options *BenchOptions, keys for { numReads := int64(0) - b := tx.Bucket(benchBucketName) - for _, key := range keys { - v := b.Get(key.key) - numReads++ - results.AddCompletedOps(1) - if v == nil { - return ErrInvalidValue + err := func() error { + defer func() { results.AddCompletedOps(numReads) }() + + b := tx.Bucket(benchBucketName) + for _, key := range keys { + v := b.Get(key.key) + numReads++ + if v == nil { + return ErrInvalidValue + } } + + return nil + }() + + if err != nil { + return err } if options.WriteMode == "seq" && numReads != options.Iterations { @@ -1441,11 +1459,11 @@ func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOpt numReads := int64(0) var top = tx.Bucket(benchBucketName) if err := top.ForEach(func(name, _ []byte) error { + defer func() { results.AddCompletedOps(numReads) }() if b := top.Bucket(name); b != nil { c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { numReads++ - results.AddCompletedOps(1) if v == nil { return ErrInvalidValue } @@ -1476,16 +1494,25 @@ func (cmd *benchCommand) runReadsRandomNested(db *bolt.DB, options *BenchOptions for { numReads := int64(0) - var top = tx.Bucket(benchBucketName) - for _, nestedKey := range nestedKeys { - if b := top.Bucket(nestedKey.bucket); b != nil { - v := b.Get(nestedKey.key) - numReads++ - results.AddCompletedOps(1) - if v == nil { - return ErrInvalidValue + err := func() error { + defer func() { results.AddCompletedOps(numReads) }() + + var top = tx.Bucket(benchBucketName) + for _, nestedKey := range nestedKeys { + if b := top.Bucket(nestedKey.bucket); b != nil { + v := b.Get(nestedKey.key) + numReads++ + if v == nil { + return ErrInvalidValue + } } } + + return nil + }() + + if err != nil { + return err } if options.WriteMode == "seq-nest" && numReads != options.Iterations { From b005c0c435e2e58c4a33b67881363aad6dc4244b Mon Sep 17 00:00:00 2001 From: ChengenH Date: Sun, 21 Apr 2024 21:35:18 +0800 Subject: [PATCH 236/439] chore: use errors.New to replace fmt.Errorf with no parameters will much better Signed-off-by: ChengenH --- README.md | 2 +- cmd/bbolt/command_surgery.go | 2 +- cmd/bbolt/main.go | 4 ++-- db.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4123b130e..5b6cf621e 100644 --- a/README.md +++ b/README.md @@ -302,7 +302,7 @@ You can retrieve an existing bucket using the `Tx.Bucket()` function: db.Update(func(tx *bolt.Tx) error { b := tx.Bucket([]byte("MyBucket")) if b == nil { - return fmt.Errorf("bucket does not exist") + return errors.New("bucket does not exist") } return nil }) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index c7abfe1dd..ca369cddb 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -44,7 +44,7 @@ func (o *surgeryBaseOptions) AddFlags(fs *pflag.FlagSet) { func (o *surgeryBaseOptions) Validate() error { if o.outputDBFilePath == "" { - return fmt.Errorf("output database path wasn't given, specify output database file path with --output option") + return errors.New("output database path wasn't given, specify output database file path with --output option") } return nil } diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index d5d39199b..10bb95b96 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -392,7 +392,7 @@ func (cmd *pageItemCommand) Run(args ...string) error { } if options.keyOnly && options.valueOnly { - return fmt.Errorf("The --key-only or --value-only flag may be set, but not both.") + return errors.New("The --key-only or --value-only flag may be set, but not both.") } // Require database path and page id. @@ -1675,7 +1675,7 @@ func (cmd *compactCommand) Run(args ...string) (err error) { } else if err != nil { return err } else if cmd.DstPath == "" { - return fmt.Errorf("output file required") + return errors.New("output file required") } // Require database paths. diff --git a/db.go b/db.go index db9dbafed..4e1625451 100644 --- a/db.go +++ b/db.go @@ -562,7 +562,7 @@ func (db *DB) mmapSize(size int) (int, error) { // Verify the requested size is not above the maximum allowed. if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. From acfa0868c56a7eb7c33685d3ab46f35c148d712a Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 22 Apr 2024 20:43:25 +0200 Subject: [PATCH 237/439] add pageId flag to check cmd Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_check.go | 21 ++++++++-- cmd/bbolt/command_check_test.go | 69 ++++++++++++++++++++++++--------- 2 files changed, 69 insertions(+), 21 deletions(-) diff --git a/cmd/bbolt/command_check.go b/cmd/bbolt/command_check.go index 237e02f91..cb6e3b47d 100644 --- a/cmd/bbolt/command_check.go +++ b/cmd/bbolt/command_check.go @@ -4,25 +4,36 @@ import ( "fmt" "github.com/spf13/cobra" + "github.com/spf13/pflag" bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/guts_cli" ) +type checkOptions struct { + fromPageID uint64 +} + +func (o *checkOptions) AddFlags(fs *pflag.FlagSet) { + fs.Uint64VarP(&o.fromPageID, "from-page", "", o.fromPageID, "check db integrity starting from the given page ID") +} + func newCheckCommand() *cobra.Command { + var o checkOptions checkCmd := &cobra.Command{ Use: "check ", Short: "verify integrity of bbolt database data", Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - return checkFunc(cmd, args[0]) + return checkFunc(cmd, args[0], o) }, } + o.AddFlags(checkCmd.Flags()) return checkCmd } -func checkFunc(cmd *cobra.Command, dbPath string) error { +func checkFunc(cmd *cobra.Command, dbPath string, cfg checkOptions) error { if _, err := checkSourceDBPath(dbPath); err != nil { return err } @@ -37,10 +48,14 @@ func checkFunc(cmd *cobra.Command, dbPath string) error { } defer db.Close() + opts := []bolt.CheckOption{bolt.WithKVStringer(CmdKvStringer())} + if cfg.fromPageID != 0 { + opts = append(opts, bolt.WithPageId(cfg.fromPageID)) + } // Perform consistency check. return db.View(func(tx *bolt.Tx) error { var count int - for err := range tx.Check(bolt.WithKVStringer(CmdKvStringer())) { + for err := range tx.Check(opts...) { fmt.Fprintln(cmd.OutOrStdout(), err) count++ } diff --git a/cmd/bbolt/command_check_test.go b/cmd/bbolt/command_check_test.go index 02500745c..a2cdc6716 100644 --- a/cmd/bbolt/command_check_test.go +++ b/cmd/bbolt/command_check_test.go @@ -9,25 +9,58 @@ import ( main "go.etcd.io/bbolt/cmd/bbolt" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/guts_cli" ) func TestCheckCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) - db.Close() - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - rootCmd := main.NewRootCommand() - // capture output for assertion - outputBuf := bytes.NewBufferString("") - rootCmd.SetOut(outputBuf) - - rootCmd.SetArgs([]string{ - "check", db.Path(), - }) - err := rootCmd.Execute() - require.NoError(t, err) - - output, err := io.ReadAll(outputBuf) - require.NoError(t, err) - require.Equalf(t, "OK\n", string(output), "unexpected stdout:\n\n%s", string(output)) + testCases := []struct { + name string + args []string + expErr error + expOutput string + }{ + { + name: "check whole db", + args: []string{"check", "path"}, + expErr: nil, + expOutput: "OK\n", + }, + { + name: "check valid pageId", + args: []string{"check", "path", "--from-page", "3"}, + expErr: nil, + expOutput: "OK\n", + }, + { + name: "check invalid pageId", + args: []string{"check", "path", "--from-page", "1"}, + expErr: guts_cli.ErrCorrupt, + expOutput: "page ID (1) out of range [2, 4)", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running check cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") // capture output for assertion + rootCmd.SetOut(outputBuf) + + tc.args[1] = db.Path() // path to be replaced with db.Path() + rootCmd.SetArgs(tc.args) + err := rootCmd.Execute() + require.Equal(t, tc.expErr, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Containsf(t, string(output), tc.expOutput, "unexpected stdout:\n\n%s", string(output)) + }) + } } From 4936519f3a520e9eddab9bdbc6ed2d2eeb01650d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:17:11 +0000 Subject: [PATCH 238/439] build(deps): Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4.0.0 to 5.0.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/3cfe3a4abbb849e10058ce4af15d205b6da42804...82d40c283aeb1f2b6595839195e95c2d6a49081b) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index b8e8cd34f..10af27cc7 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,4 +52,4 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 + uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 227235434..14e1b084f 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -39,7 +39,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@3cfe3a4abbb849e10058ce4af15d205b6da42804 # v4.0.0 + uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 coverage: needs: ["test-windows"] From fbea5d610d3b9d501cf4e0241f51d76a87f7dcf7 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 29 Apr 2024 15:13:06 -0700 Subject: [PATCH 239/439] *: skip logging if logger is discardLogger If there is no logger defined (discardLogger), skip logging altogether for highly frequent called methods (Put, Delete, CreateBucket, CreateBucketIfNotExists, DeleteBucket, Begin, Commit, Open, MoveBucket, Sync). Signed-off-by: Ivan Valdes --- bucket.go | 119 +++++++++++++++++++++++++++++------------------------- db.go | 54 ++++++++++++++----------- tx.go | 18 +++++---- 3 files changed, 103 insertions(+), 88 deletions(-) diff --git a/bucket.go b/bucket.go index 2f1d71048..1d1cee0ef 100644 --- a/bucket.go +++ b/bucket.go @@ -146,15 +146,16 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { - lg := b.tx.db.Logger() - lg.Debugf("Creating bucket %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Creating bucket %q failed: %v", string(key), err) - } else { - lg.Debugf("Creating bucket %q successfully", string(key)) - } - }() + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { return nil, errors.ErrTxClosed } else if !b.tx.writable { @@ -202,15 +203,16 @@ func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { - lg := b.tx.db.Logger() - lg.Debugf("Creating bucket if not exist %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Creating bucket if not exist %q failed: %v", string(key), err) - } else { - lg.Debugf("Creating bucket if not exist %q successfully", string(key)) - } - }() + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } if b.tx.db == nil { return nil, errors.ErrTxClosed @@ -269,15 +271,16 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. func (b *Bucket) DeleteBucket(key []byte) (err error) { - lg := b.tx.db.Logger() - lg.Debugf("Deleting bucket %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Deleting bucket %q failed: %v", string(key), err) - } else { - lg.Debugf("Deleting bucket %q successfully", string(key)) - } - }() + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } if b.tx.db == nil { return errors.ErrTxClosed @@ -332,14 +335,16 @@ func (b *Bucket) DeleteBucket(key []byte) (err error) { // 4. the source and destination buckets are the same. func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { lg := b.tx.db.Logger() - lg.Debugf("Moving bucket %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Moving bucket %q failed: %v", string(key), err) - } else { - lg.Debugf("Moving bucket %q successfully", string(key)) - } - }() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } if b.tx.db == nil || dstBucket.tx.db == nil { return errors.ErrTxClosed @@ -362,14 +367,14 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { if !bytes.Equal(newKey, k) { return errors.ErrBucketNotFound } else if (flags & common.BucketLeafFlag) == 0 { - lg.Errorf("An incompatible key %s exists in the source bucket", string(newKey)) + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) return errors.ErrIncompatibleValue } // Do nothing (return true directly) if the source bucket and the // destination bucket are actually the same bucket. if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { - lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b.String(), dstBucket.String()) + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) return errors.ErrSameBuckets } @@ -382,7 +387,7 @@ func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { if (flags & common.BucketLeafFlag) != 0 { return errors.ErrBucketExists } - lg.Errorf("An incompatible key %s exists in the target bucket", string(newKey)) + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) return errors.ErrIncompatibleValue } @@ -445,15 +450,16 @@ func (b *Bucket) Get(key []byte) []byte { // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) (err error) { - lg := b.tx.db.Logger() - lg.Debugf("Putting key %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Putting key %q failed: %v", string(key), err) - } else { - lg.Debugf("Putting key %q successfully", string(key)) - } - }() + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { return errors.ErrTxClosed } else if !b.Writable() { @@ -491,15 +497,16 @@ func (b *Bucket) Put(key []byte, value []byte) (err error) { // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. func (b *Bucket) Delete(key []byte) (err error) { - lg := b.tx.db.Logger() - lg.Debugf("Deleting key %q", string(key)) - defer func() { - if err != nil { - lg.Errorf("Deleting key %q failed: %v", string(key), err) - } else { - lg.Debugf("Deleting key %q successfully", string(key)) - } - }() + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } if b.tx.db == nil { return errors.ErrTxClosed diff --git a/db.go b/db.go index 4e1625451..cd3c5b0ba 100644 --- a/db.go +++ b/db.go @@ -204,14 +204,16 @@ func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { } lg := db.Logger() - lg.Infof("Opening db file (%s) with mode %x and with options: %s", path, mode, options) - defer func() { - if err != nil { - lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) - } else { - lg.Infof("Opening bbolt db (%s) successfully", path) - } - }() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %x and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -739,14 +741,16 @@ func (db *DB) close() error { // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (t *Tx, err error) { - db.Logger().Debugf("Starting a new transaction [writable: %t]", writable) - defer func() { - if err != nil { - db.Logger().Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) - } else { - db.Logger().Debugf("Starting a new transaction [writable: %t] successfully", writable) - } - }() + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } if writable { return db.beginRWTx() @@ -1095,14 +1099,16 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. func (db *DB) Sync() (err error) { - db.Logger().Debug("Syncing bbolt db (%s)", db.path) - defer func() { - if err != nil { - db.Logger().Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) - } else { - db.Logger().Debugf("Syncing bbolt db (%s) successfully", db.path) - } - }() + if lg := db.Logger(); lg != discardLogger { + lg.Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } return fdatasync(db) } diff --git a/tx.go b/tx.go index 950d06151..011e2c382 100644 --- a/tx.go +++ b/tx.go @@ -170,14 +170,16 @@ func (tx *Tx) OnCommit(fn func()) { func (tx *Tx) Commit() (err error) { txId := tx.ID() lg := tx.db.Logger() - lg.Debugf("Committing transaction %d", txId) - defer func() { - if err != nil { - lg.Errorf("Committing transaction failed: %v", err) - } else { - lg.Debugf("Committing transaction %d successfully", txId) - } - }() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { From 06d4621c8fb7a91ea4d0f9b59d2e922e1eebbf5d Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 30 Apr 2024 06:22:53 -0700 Subject: [PATCH 240/439] github/workflows: skip robustness tests in forks Robustness tests require a larger instance size, which user forks cannot access. It also tries to trigger an ARM64 build that runs on actuated infrastructure. By not running them on user forks, contributors won't be notified that their builds are failing due to timeouts trying to run the job, while the tests will still run on etcd-io/bbolt pull requests and commits. Signed-off-by: Ivan Valdes --- .github/workflows/robustness_template.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 9300e14eb..132a804ed 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -18,6 +18,8 @@ permissions: read-all jobs: test: + # this is to prevent the job to run at forked projects + if: github.repository == 'etcd-io/bbolt' timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: From e457711987e002b55c2192afe6fa645a21b4287e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 21 Apr 2024 12:42:06 +0100 Subject: [PATCH 241/439] Add test case to reproduce the issue that a cursor can't continue to iterate elements in reverse direction after it has reached to the first element Signed-off-by: Benjamin Wang --- cursor_test.go | 133 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/cursor_test.go b/cursor_test.go index 42e2cd6c0..581700149 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -11,11 +11,144 @@ import ( "testing" "testing/quick" + "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" ) +// TestCursor_RepeatOperations verifies that a cursor can continue to +// iterate over all elements in reverse direction when it has already +// reached to the end or beginning. +// Refer to https://github.com/etcd-io/bbolt/issues/733 +func TestCursor_RepeatOperations(t *testing.T) { + testCases := []struct { + name string + testFunc func(t2 *testing.T, bucket *bolt.Bucket) + }{ + { + name: "Repeat NextPrevNext", + testFunc: testRepeatCursorOperations_NextPrevNext, + }, + { + name: "Repeat PrevNextPrev", + testFunc: testRepeatCursorOperations_PrevNextPrev, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + + bucketName := []byte("data") + + _ = db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(bucketName) + testCursorRepeatOperations_PrepareData(t, b) + return nil + }) + + _ = db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + tc.testFunc(t, b) + return nil + }) + }) + } +} + +func testCursorRepeatOperations_PrepareData(t *testing.T, b *bolt.Bucket) { + // ensure we have at least one branch page. + for i := 0; i < 1000; i++ { + k := []byte(fmt.Sprintf("%05d", i)) + err := b.Put(k, k) + require.NoError(t, err) + } +} + +func testRepeatCursorOperations_NextPrevNext(t *testing.T, b *bolt.Bucket) { + c := b.Cursor() + c.First() + startKey := []byte(fmt.Sprintf("%05d", 2)) + returnedKey, _ := c.Seek(startKey) + require.Equal(t, startKey, returnedKey) + + // Step 1: verify next + for i := 3; i < 1000; i++ { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Next() + require.Equal(t, expectedKey, actualKey) + } + + // Once we've reached the end, it should always return nil no matter how many times we call `Next`. + for i := 0; i < 10; i++ { + k, _ := c.Next() + require.Equal(t, []byte(nil), k) + } + + // Step 2: verify prev + for i := 998; i >= 0; i-- { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Prev() + require.Equal(t, expectedKey, actualKey) + } + + // Once we've reached the beginning, it should always return nil no matter how many times we call `Prev`. + for i := 0; i < 10; i++ { + k, _ := c.Prev() + require.Equal(t, []byte(nil), k) + } + + // Step 3: verify next again + for i := 1; i < 1000; i++ { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Next() + require.Equal(t, expectedKey, actualKey) + } +} + +func testRepeatCursorOperations_PrevNextPrev(t *testing.T, b *bolt.Bucket) { + c := b.Cursor() + + startKey := []byte(fmt.Sprintf("%05d", 998)) + returnedKey, _ := c.Seek(startKey) + require.Equal(t, startKey, returnedKey) + + // Step 1: verify prev + for i := 997; i >= 0; i-- { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Prev() + require.Equal(t, expectedKey, actualKey) + } + + // Once we've reached the beginning, it should always return nil no matter how many times we call `Prev`. + for i := 0; i < 10; i++ { + k, _ := c.Prev() + require.Equal(t, []byte(nil), k) + } + + // Step 2: verify next + for i := 1; i < 1000; i++ { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Next() + require.Equal(t, expectedKey, actualKey) + } + + // Once we've reached the end, it should always return nil no matter how many times we call `Next`. + for i := 0; i < 10; i++ { + k, _ := c.Next() + require.Equal(t, []byte(nil), k) + } + + // Step 3: verify prev again + for i := 998; i >= 0; i-- { + expectedKey := []byte(fmt.Sprintf("%05d", i)) + actualKey, _ := c.Prev() + require.Equal(t, expectedKey, actualKey) + } +} + // Ensure that a cursor can return a reference to the bucket that created it. func TestCursor_Bucket(t *testing.T) { db := btesting.MustCreateDB(t) From 6967960a726f74f3efc32ff3dec59ce27f048fef Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 26 Apr 2024 15:41:43 +0100 Subject: [PATCH 242/439] Ensure a cursor can continue to iterate elements in reverse direction by call Next when it has already reached the beginning Signed-off-by: Benjamin Wang --- cursor.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/cursor.go b/cursor.go index acd2216e2..0c1e28c10 100644 --- a/cursor.go +++ b/cursor.go @@ -74,7 +74,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If this is an empty page (calling Delete may result in empty pages) // we call prev to find the last page that is not empty - for len(c.stack) > 0 && c.stack[len(c.stack)-1].count() == 0 { + for len(c.stack) > 1 && c.stack[len(c.stack)-1].count() == 0 { c.prev() } @@ -257,6 +257,15 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { elem.index-- break } + // If we've hit the beginning, we should stop moving the cursor, + // and stay at the first element, so that users can continue to + // iterate over the elements in reverse direction by calling `Next`. + // We should return nil in such case. + // Refer to https://github.com/etcd-io/bbolt/issues/733 + if len(c.stack) == 1 { + c.first() + return nil, nil, 0 + } c.stack = c.stack[:i] } From 5159803c625ecb36feb6edf9f3f1a7d8c6a0f052 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 2 May 2024 16:24:36 +0100 Subject: [PATCH 243/439] Update readme to clearly clarify the behaviour of Next and Prev Signed-off-by: Benjamin Wang --- README.md | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5b6cf621e..92c0083a1 100644 --- a/README.md +++ b/README.md @@ -441,10 +441,14 @@ Prev() Move to the previous key. ``` Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. +You must seek to a position using `First()`, `Last()`, or `Seek()` before calling +`Next()` or `Prev()`. If you do not seek to a position then these functions will +return a `nil` key. + +When you have iterated to the end of the cursor, then `Next()` will return a +`nil` key and the cursor still points to the last element if present. When you +have iterated to the beginning of the cursor, then `Prev()` will return a `nil` +key and the cursor still points to the first element if present. If you remove key/value pairs during iteration, the cursor may automatically move to the next position if present in current node each time removing a key. From 6ba9b2c20bcb97f9d0e06f9a4b0cb8689f71497a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 3 May 2024 15:18:13 +0100 Subject: [PATCH 244/439] Update changelog for 1.3.10 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index bd856b900..da7b339cf 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,6 +2,17 @@ Note that we start to track changes starting from v1.3.7.
+## v1.3.10(TBD) + +### BoltDB +- [Remove deprecated `UnsafeSlice` and use `unsafe.Slice`](https://github.com/etcd-io/bbolt/pull/717) +- [Stabilize the behaviour of Prev when the cursor already points to the first element](https://github.com/etcd-io/bbolt/pull/744) + +### Other +- [Bump go version to 1.21.9](https://github.com/etcd-io/bbolt/pull/713) + +
+ ## v1.3.9(2024-02-24) ### BoltDB From dc3e157531077734805987f2b0493cd8e0553dc9 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 3 May 2024 15:01:05 +0100 Subject: [PATCH 245/439] Update changelog for 1.4.0-alpha.1 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index cbec652dd..bd5cbe355 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,35 @@
+## v1.4.0-alpha.1(TBD) + +### BoltDB +- [Enhance check functionality to support checking starting from a pageId](https://github.com/etcd-io/bbolt/pull/659) +- [Optimize the logger performance for frequent called methods](https://github.com/etcd-io/bbolt/pull/741) +- [Stabilize the behaviour of Prev when the cursor already points to the first element](https://github.com/etcd-io/bbolt/pull/734) + +### CMD +- [Fix `bbolt keys` and `bbolt get` to prevent them from panicking when no parameter provided](https://github.com/etcd-io/bbolt/pull/682) +- [Fix surgery freelist command in info logs](https://github.com/etcd-io/bbolt/pull/700) +- [Remove txid references in surgery meta command's comment and description](https://github.com/etcd-io/bbolt/pull/703) +- [Add rnd read capabilities to bbolt bench](https://github.com/etcd-io/bbolt/pull/711) +- [Use `cobra.ExactArgs` to simplify the argument number check](https://github.com/etcd-io/bbolt/pull/728) +- [Migrate `bbolt check` command to cobra style](https://github.com/etcd-io/bbolt/pull/723) +- [Simplify the naming of cobra commands](https://github.com/etcd-io/bbolt/pull/732) +- [Aggregate adding completed ops for read test of the `bbolt bench` command](https://github.com/etcd-io/bbolt/pull/721) +- [Add `--from-page` flag to `bbolt check` command](https://github.com/etcd-io/bbolt/pull/737) + +### Document +- [Add document for a known issue on the writing a value with a length of 0](https://github.com/etcd-io/bbolt/pull/730) + +### Test +- [Enhance robustness test to cover XFS](https://github.com/etcd-io/bbolt/pull/707) + +### Other +- [Bump go toolchain version to 1.22.2](https://github.com/etcd-io/bbolt/pull/712) + +
+ ## v1.4.0-alpha.0(2024-01-12) ### BoltDB From 05bc008d6382397a687c2ab03754441a75ba9266 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 6 May 2024 10:21:51 +0100 Subject: [PATCH 246/439] Update release date of v1.3.10 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index da7b339cf..0d6b4a30f 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,7 +2,7 @@ Note that we start to track changes starting from v1.3.7.
-## v1.3.10(TBD) +## v1.3.10(2024-05-06) ### BoltDB - [Remove deprecated `UnsafeSlice` and use `unsafe.Slice`](https://github.com/etcd-io/bbolt/pull/717) From b0085af327e9093f4d45e171de26e4063803fd3c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 6 May 2024 10:31:29 +0100 Subject: [PATCH 247/439] Update release date of v1.4.0-alpha.1 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index bd5cbe355..317f9befb 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,7 +1,7 @@
-## v1.4.0-alpha.1(TBD) +## v1.4.0-alpha.1(2024-05-06) ### BoltDB - [Enhance check functionality to support checking starting from a pageId](https://github.com/etcd-io/bbolt/pull/659) From b1245be0270f5ab7ff67b1aecbfb3ff96bd8ab97 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 14:14:44 +0000 Subject: [PATCH 248/439] build(deps): Bump golang.org/x/sys from 0.19.0 to 0.20.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.19.0 to 0.20.0. - [Commits](https://github.com/golang/sys/compare/v0.19.0...v0.20.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c5c2121e3..6e0d5fdf0 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.19.0 + golang.org/x/sys v0.20.0 ) require ( diff --git a/go.sum b/go.sum index 5b6b36db2..f0de64be4 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From a045cf50c9f381ef127fbf01c5ec540dfae6ad29 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 May 2024 14:26:17 +0000 Subject: [PATCH 249/439] build(deps): Bump golangci/golangci-lint-action from 5.0.0 to 5.3.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.0.0 to 5.3.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/82d40c283aeb1f2b6595839195e95c2d6a49081b...38e1018663fa5173f3968ea0777460d3de38f256) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 10af27cc7..4ad777dcf 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,4 +52,4 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 + uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 14e1b084f..c2987b0b0 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -39,7 +39,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@82d40c283aeb1f2b6595839195e95c2d6a49081b # v5.0.0 + uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 coverage: needs: ["test-windows"] From 206432416108657677a1b79c84993cefd574996b Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 8 May 2024 17:42:06 -0400 Subject: [PATCH 250/439] github/workflows: don't run race tests on user forks Race tests require a larger instance size, which user forks cannot access. By not running them on user forks, contributors won't be notified that their builds are failing due to timeouts trying to run the job, while the tests will still run on etcd-io/bbolt pull requests and commits. Signed-off-by: Ivan Valdes --- .github/workflows/tests-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 4ad777dcf..81ce95d23 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -16,7 +16,7 @@ permissions: read-all jobs: test-linux: # this is to prevent arm64 jobs from running at forked projects - if: ${{ github.repository == 'etcd-io/bbolt' || startsWith(inputs.runs-on, 'ubuntu') }} + if: ${{ github.repository == 'etcd-io/bbolt' || inputs.runs-on == 'ubuntu-latest' }} strategy: fail-fast: false matrix: From 6a04aaa322d89d1e618bf4597850685cd3acee46 Mon Sep 17 00:00:00 2001 From: Lavish pal Date: Sat, 11 May 2024 00:49:43 +0530 Subject: [PATCH 251/439] update go version to 1.22.3 Signed-off-by: Lavish pal --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 6fee2fedb..89144dbc3 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.2 +1.22.3 diff --git a/go.mod b/go.mod index 6e0d5fdf0..fba6b5ac4 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.22 -toolchain go1.22.2 +toolchain go1.22.3 require ( github.com/spf13/cobra v1.8.0 From c4c4fbda3c598eed1787f8507e613e396eaa8459 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 May 2024 14:55:17 +0000 Subject: [PATCH 252/439] build(deps): Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.3.0 to 6.0.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/38e1018663fa5173f3968ea0777460d3de38f256...a4f60bb28d35aeee14e6880718e0c85ff1882e64) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 81ce95d23..59f5b05c3 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,4 +52,4 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index c2987b0b0..406a8b11a 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -39,7 +39,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@38e1018663fa5173f3968ea0777460d3de38f256 # v5.3.0 + uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 coverage: needs: ["test-windows"] From 7eeb5d89789f54499a25acca8aac581702a3447f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 14 May 2024 13:07:04 +0100 Subject: [PATCH 253/439] Extend the maintainers of bbolt 1. Add @tjungblu as a reviewer 2. Add OWNERS file under cmd/bbolt, and add the existing maintainers/reviewers as approvers, and add @Elbehery and @ivanvc as reviewers. Signed-off-by: Benjamin Wang --- OWNERS | 1 + cmd/bbolt/OWNERS | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 cmd/bbolt/OWNERS diff --git a/OWNERS b/OWNERS index ab9a6b81f..91f168a79 100644 --- a/OWNERS +++ b/OWNERS @@ -7,3 +7,4 @@ approvers: - spzala # Sahdev Zala reviewers: - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/cmd/bbolt/OWNERS b/cmd/bbolt/OWNERS new file mode 100644 index 000000000..d4d42d4af --- /dev/null +++ b/cmd/bbolt/OWNERS @@ -0,0 +1,12 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - fuweid # Wei Fu + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala + - tjungblu # Thomas Jungblut +reviewers: + - elbehery # Mustafa Elbehery + - ivanvc # Ivan Valdes From b8f9c332f4cf755f58d5d87d6e1146a6f37cc99b Mon Sep 17 00:00:00 2001 From: Anthony Nandaa Date: Thu, 23 May 2024 19:09:32 +0300 Subject: [PATCH 254/439] docs: add few more projects using bbolt bbolt is extensively used in the container world. I have just added the prominent projects here but I'm sure we have a few more. Signed-off-by: Anthony Nandaa --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 92c0083a1..c4ee84875 100644 --- a/README.md +++ b/README.md @@ -973,10 +973,12 @@ Below is a list of public, open source projects that use Bolt: * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. From bbf2a10609043bb77c3fff6561a3f74a23d26258 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 4 Jun 2024 14:51:19 +0100 Subject: [PATCH 255/439] add verification on the pageID when creating nodes for inline buckets Signed-off-by: Benjamin Wang --- bucket.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bucket.go b/bucket.go index 1d1cee0ef..785ad9bd5 100644 --- a/bucket.go +++ b/bucket.go @@ -876,6 +876,12 @@ func (b *Bucket) node(pgId common.Pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. From 83ef8d691c57bb07eb3e6ed9123060fe24e90ef2 Mon Sep 17 00:00:00 2001 From: ArkaSaha30 Date: Wed, 5 Jun 2024 15:10:45 +0530 Subject: [PATCH 256/439] Bump Go version to 1.22.4: CVE 2024-24790 fix Signed-off-by: ArkaSaha30 --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 89144dbc3..2a0ba77cc 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.3 +1.22.4 diff --git a/go.mod b/go.mod index fba6b5ac4..cc8eb79ea 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.22 -toolchain go1.22.3 +toolchain go1.22.4 require ( github.com/spf13/cobra v1.8.0 From 487b5dd3df17a0407e71a92b27749d6cf411951e Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 5 Jun 2024 17:14:23 +0100 Subject: [PATCH 257/439] Update concurrent test to support multiple operations in each transaction Signed-off-by: Benjamin Wang --- concurrent_test.go | 206 +++++++++++++++++++++++---------------------- 1 file changed, 105 insertions(+), 101 deletions(-) diff --git a/concurrent_test.go b/concurrent_test.go index 3995c0466..10f1a2f8e 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -347,27 +347,45 @@ func (w *worker) name() string { func (w *worker) run() (historyRecords, error) { var rs historyRecords + + ticker := time.NewTicker(1 * time.Second) + defer ticker.Stop() + for { select { case <-w.stopCh: - w.t.Logf("%q finished.", w.name()) return rs, nil default: } - op := w.pickOperation() - bucket, key := w.pickBucket(), w.pickKey() - rec, err := executeOperation(op, w.db, bucket, key, w.conf) - if err != nil { - readErr := fmt.Errorf("[%s: %s]: %w", w.name(), op, err) - w.t.Error(readErr) - w.errCh <- readErr - return rs, readErr - } + err := w.db.Update(func(tx *bolt.Tx) error { + for { + op := w.pickOperation() + bucket, key := w.pickBucket(), w.pickKey() + rec, eerr := executeOperation(op, tx, bucket, key, w.conf) + if eerr != nil { + opErr := fmt.Errorf("[%s: %s]: %w", w.name(), op, eerr) + w.t.Error(opErr) + w.errCh <- opErr + return opErr + } + + rs = append(rs, rec) + if w.conf.workInterval != (duration{}) { + time.Sleep(randomDurationInRange(w.conf.workInterval.min, w.conf.workInterval.max)) + } - rs = append(rs, rec) - if w.conf.workInterval != (duration{}) { - time.Sleep(randomDurationInRange(w.conf.workInterval.min, w.conf.workInterval.max)) + select { + case <-ticker.C: + return nil + case <-w.stopCh: + return nil + default: + } + } + }) + if err != nil { + return rs, err } } } @@ -401,111 +419,100 @@ func (w *worker) pickOperation() OperationType { panic("unexpected") } -func executeOperation(op OperationType, db *bolt.DB, bucket []byte, key []byte, conf concurrentConfig) (historyRecord, error) { +func executeOperation(op OperationType, tx *bolt.Tx, bucket []byte, key []byte, conf concurrentConfig) (historyRecord, error) { switch op { case Read: - return executeRead(db, bucket, key, conf.readInterval) + return executeRead(tx, bucket, key, conf.readInterval) case Write: - return executeWrite(db, bucket, key, conf.writeBytes, conf.noopWriteRatio) + return executeWrite(tx, bucket, key, conf.writeBytes, conf.noopWriteRatio) case Delete: - return executeDelete(db, bucket, key) + return executeDelete(tx, bucket, key) default: panic(fmt.Sprintf("unexpected operation type: %s", op)) } } -func executeRead(db *bolt.DB, bucket []byte, key []byte, readInterval duration) (historyRecord, error) { +func executeRead(tx *bolt.Tx, bucket []byte, key []byte, readInterval duration) (historyRecord, error) { var rec historyRecord - err := db.View(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) - initialVal := b.Get(key) - time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) - val := b.Get(key) + b := tx.Bucket(bucket) - if !bytes.Equal(initialVal, val) { - return fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", - string(key), formatBytes(initialVal), formatBytes(val)) - } + initialVal := b.Get(key) + time.Sleep(randomDurationInRange(readInterval.min, readInterval.max)) + val := b.Get(key) - clonedVal := make([]byte, len(val)) - copy(clonedVal, val) + if !bytes.Equal(initialVal, val) { + return rec, fmt.Errorf("read different values for the same key (%q), value1: %q, value2: %q", + string(key), formatBytes(initialVal), formatBytes(val)) + } - rec = historyRecord{ - OperationType: Read, - Bucket: string(bucket), - Key: string(key), - Value: clonedVal, - Txid: tx.ID(), - } + clonedVal := make([]byte, len(val)) + copy(clonedVal, val) - return nil - }) + rec = historyRecord{ + OperationType: Read, + Bucket: string(bucket), + Key: string(key), + Value: clonedVal, + Txid: tx.ID(), + } - return rec, err + return rec, nil } -func executeWrite(db *bolt.DB, bucket []byte, key []byte, writeBytes bytesRange, noopWriteRatio int) (historyRecord, error) { +func executeWrite(tx *bolt.Tx, bucket []byte, key []byte, writeBytes bytesRange, noopWriteRatio int) (historyRecord, error) { var rec historyRecord - err := db.Update(func(tx *bolt.Tx) error { - if mrand.Intn(100) < noopWriteRatio { - // A no-op write transaction has two consequences: - // 1. The txid increases by 1; - // 2. Two meta pages point to the same root page. - rec = historyRecord{ - OperationType: Write, - Bucket: string(bucket), - Key: noopTxKey, - Value: nil, - Txid: tx.ID(), - } - return nil + if mrand.Intn(100) < noopWriteRatio { + // A no-op write transaction has two consequences: + // 1. The txid increases by 1; + // 2. Two meta pages point to the same root page. + rec = historyRecord{ + OperationType: Write, + Bucket: string(bucket), + Key: noopTxKey, + Value: nil, + Txid: tx.ID(), } + return rec, nil + } - b := tx.Bucket(bucket) + b := tx.Bucket(bucket) - valueBytes := randomIntInRange(writeBytes.min, writeBytes.max) - v := make([]byte, valueBytes) - if _, cErr := crand.Read(v); cErr != nil { - return cErr - } + valueBytes := randomIntInRange(writeBytes.min, writeBytes.max) + v := make([]byte, valueBytes) + if _, cErr := crand.Read(v); cErr != nil { + return rec, cErr + } - putErr := b.Put(key, v) - if putErr == nil { - rec = historyRecord{ - OperationType: Write, - Bucket: string(bucket), - Key: string(key), - Value: v, - Txid: tx.ID(), - } + putErr := b.Put(key, v) + if putErr == nil { + rec = historyRecord{ + OperationType: Write, + Bucket: string(bucket), + Key: string(key), + Value: v, + Txid: tx.ID(), } + } - return putErr - }) - - return rec, err + return rec, putErr } -func executeDelete(db *bolt.DB, bucket []byte, key []byte) (historyRecord, error) { +func executeDelete(tx *bolt.Tx, bucket []byte, key []byte) (historyRecord, error) { var rec historyRecord - err := db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket(bucket) + b := tx.Bucket(bucket) - deleteErr := b.Delete(key) - if deleteErr == nil { - rec = historyRecord{ - OperationType: Delete, - Bucket: string(bucket), - Key: string(key), - Txid: tx.ID(), - } + err := b.Delete(key) + if err == nil { + rec = historyRecord{ + OperationType: Delete, + Bucket: string(bucket), + Key: string(key), + Txid: tx.ID(), } - - return deleteErr - }) + } return rec, err } @@ -674,17 +681,7 @@ func (rs historyRecords) Less(i, j int) bool { } // Sorted by txid - if rs[i].Txid != rs[j].Txid { - return rs[i].Txid < rs[j].Txid - } - - // Sorted by operation type: put `Read` after other operation types - // if they operate on the same (bucket, key) and have the same txid. - if rs[i].OperationType == Read { - return false - } - - return true + return rs[i].Txid < rs[j].Txid } func (rs historyRecords) Swap(i, j int) { @@ -695,7 +692,7 @@ func validateIncrementalTxid(rs historyRecords) error { lastTxid := rs[0].Txid for i := 1; i < len(rs); i++ { - if (rs[i].OperationType == Read && rs[i].Txid < lastTxid) || (rs[i].OperationType != Read && rs[i].Txid <= lastTxid) { + if rs[i].Txid < lastTxid { return fmt.Errorf("detected non-incremental txid(%d, %d) in %s mode", lastTxid, rs[i].Txid, rs[i].OperationType) } lastTxid = rs[i].Txid @@ -705,7 +702,7 @@ func validateIncrementalTxid(rs historyRecords) error { } func validateSequential(rs historyRecords) error { - sort.Sort(rs) + sort.Stable(rs) type bucketAndKey struct { bucket string @@ -886,7 +883,11 @@ func TestConcurrentRepeatableRead(t *testing.T) { t.Logf("Perform %d write operations after starting a long running read operation", writeOperationCountInBetween) for j := 0; j < writeOperationCountInBetween; j++ { - _, err := executeWrite(db, bucket, key, writeBytes, 0) + err := db.Update(func(tx *bolt.Tx) error { + _, eerr := executeWrite(tx, bucket, key, writeBytes, 0) + return eerr + }) + require.NoError(t, err) } } @@ -902,7 +903,10 @@ func TestConcurrentRepeatableRead(t *testing.T) { return default: } - _, err := executeWrite(db, bucket, key, writeBytes, 0) + err := db.Update(func(tx *bolt.Tx) error { + _, eerr := executeWrite(tx, bucket, key, writeBytes, 0) + return eerr + }) require.NoError(t, err) } }() From 29f165fdc0a418b9509ee16e825bde02e5a095c9 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Thu, 6 Jun 2024 12:01:28 +0200 Subject: [PATCH 258/439] add gobench output option Signed-off-by: Thomas Jungblut --- cmd/bbolt/main.go | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 10bb95b96..edaa98532 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -16,6 +16,7 @@ import ( "strconv" "strings" "sync/atomic" + "testing" "time" "unicode" "unicode/utf8" @@ -1050,12 +1051,29 @@ func (cmd *benchCommand) Run(args ...string) error { } // Print results. - fmt.Fprintf(cmd.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) - fmt.Fprintf(cmd.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) + if options.GoBenchOutput { + // below replicates the output of testing.B benchmarks, e.g. for external tooling + benchWriteName := "BenchmarkWrite" + benchReadName := "BenchmarkRead" + maxLen := max(len(benchReadName), len(benchWriteName)) + printGoBenchResult(cmd.Stderr, writeResults, maxLen, benchWriteName) + printGoBenchResult(cmd.Stderr, readResults, maxLen, benchReadName) + } else { + fmt.Fprintf(cmd.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) + fmt.Fprintf(cmd.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) + } fmt.Fprintln(cmd.Stderr, "") + return nil } +func printGoBenchResult(w io.Writer, r BenchResults, maxLen int, benchName string) { + gobenchResult := testing.BenchmarkResult{} + gobenchResult.T = r.Duration() + gobenchResult.N = int(r.CompletedOps()) + fmt.Fprintf(w, "%-*s\t%s\n", maxLen, benchName, gobenchResult.String()) +} + // ParseFlags parses the command line flags. func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { var options BenchOptions @@ -1076,6 +1094,7 @@ func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { fs.BoolVar(&options.NoSync, "no-sync", false, "") fs.BoolVar(&options.Work, "work", false, "") fs.StringVar(&options.Path, "path", "", "") + fs.BoolVar(&options.GoBenchOutput, "gobench-output", false, "") fs.SetOutput(cmd.Stderr) if err := fs.Parse(args); err != nil { return nil, err @@ -1555,6 +1574,7 @@ type BenchOptions struct { NoSync bool Work bool Path string + GoBenchOutput bool } // BenchResults represents the performance results of the benchmark and is thread-safe. From 9fbc1e8688fb6cc71cd74d3aa6caf50f6b03cb25 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Jun 2024 14:16:38 +0000 Subject: [PATCH 259/439] build(deps): Bump golang.org/x/sys from 0.20.0 to 0.21.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.20.0 to 0.21.0. - [Commits](https://github.com/golang/sys/compare/v0.20.0...v0.21.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index cc8eb79ea..44533b806 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.20.0 + golang.org/x/sys v0.21.0 ) require ( diff --git a/go.sum b/go.sum index f0de64be4..3a4b802c5 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 8704e5ef3aa52884aac1fd62770be9212f000ce8 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 10 Jun 2024 16:53:00 -0700 Subject: [PATCH 260/439] cmd/bbolt: write bench results to stdout Signed-off-by: Ivan Valdes --- cmd/bbolt/main.go | 8 ++++---- cmd/bbolt/main_test.go | 5 +++-- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index edaa98532..a9256a699 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1056,11 +1056,11 @@ func (cmd *benchCommand) Run(args ...string) error { benchWriteName := "BenchmarkWrite" benchReadName := "BenchmarkRead" maxLen := max(len(benchReadName), len(benchWriteName)) - printGoBenchResult(cmd.Stderr, writeResults, maxLen, benchWriteName) - printGoBenchResult(cmd.Stderr, readResults, maxLen, benchReadName) + printGoBenchResult(cmd.Stdout, writeResults, maxLen, benchWriteName) + printGoBenchResult(cmd.Stdout, readResults, maxLen, benchReadName) } else { - fmt.Fprintf(cmd.Stderr, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) - fmt.Fprintf(cmd.Stderr, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) + fmt.Fprintf(cmd.Stdout, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) + fmt.Fprintf(cmd.Stdout, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) } fmt.Fprintln(cmd.Stderr, "") diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 2dfb04449..727b38f55 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -484,6 +484,7 @@ func TestBenchCommand_Run(t *testing.T) { } stderr := m.Stderr.String() + stdout := m.Stdout.String() if !strings.Contains(stderr, "starting write benchmark.") || !strings.Contains(stderr, "starting read benchmark.") { t.Fatal(fmt.Errorf("benchmark result does not contain read/write start output:\n%s", stderr)) } @@ -492,8 +493,8 @@ func TestBenchCommand_Run(t *testing.T) { t.Fatal(fmt.Errorf("found iter mismatch in stdout:\n%s", stderr)) } - if !strings.Contains(stderr, "# Write") || !strings.Contains(stderr, "# Read") { - t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", stderr)) + if !strings.Contains(stdout, "# Write") || !strings.Contains(stdout, "# Read") { + t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", stdout)) } }) } From c3662951a6b7dd1eafcd47b922719af8828c3ffc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:15:33 +0000 Subject: [PATCH 261/439] build(deps): Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.0 to 1.8.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.8.0...v1.8.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 44533b806..553312f7d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 toolchain go1.22.4 require ( - github.com/spf13/cobra v1.8.0 + github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.1.0 diff --git a/go.sum b/go.sum index 3a4b802c5..2713acd4b 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -6,8 +6,8 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= From 127f59ee188dc46c17bc8f927ecfe247e567602e Mon Sep 17 00:00:00 2001 From: ArkaSaha30 Date: Wed, 19 Jun 2024 15:06:32 +0530 Subject: [PATCH 262/439] Dependency Bump: gofail to v0.2.0 This commit will bump gofail to v0.2.0 Signed-off-by: ArkaSaha30 --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 553312f7d..3a8735523 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 - go.etcd.io/gofail v0.1.0 + go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.7.0 golang.org/x/sys v0.21.0 ) diff --git a/go.sum b/go.sum index 2713acd4b..8f4f37091 100644 --- a/go.sum +++ b/go.sum @@ -12,8 +12,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.etcd.io/gofail v0.1.0 h1:XItAMIhOojXFQMgrxjnd2EIIHun/d5qL0Pf7FzVTkFg= -go.etcd.io/gofail v0.1.0/go.mod h1:VZBCXYGZhHAinaBiiqYvuDynvahNsAyLFwB3kEHKz1M= +go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= From 3c0d2eeff7d16cb625f0bf625a396ff126612583 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 20 Jun 2024 22:51:54 +0100 Subject: [PATCH 263/439] Add verification on mergeSpans There shouldn't have any duplicated free page IDs, or overlap between the new free page IDs and the existing free page IDs. Signed-off-by: Benjamin Wang --- freelist.go | 25 +++++++++++++++ freelist_hmap.go | 79 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 104 insertions(+) diff --git a/freelist.go b/freelist.go index 731d75c46..5bbc27445 100644 --- a/freelist.go +++ b/freelist.go @@ -389,5 +389,30 @@ func (f *freelist) reindex() { // arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array func (f *freelist) arrayMergeSpans(ids common.Pgids) { sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) f.ids = common.Pgids(f.ids).Merge(ids) } diff --git a/freelist_hmap.go b/freelist_hmap.go index 0d38976a1..c5c09f55e 100644 --- a/freelist_hmap.go +++ b/freelist_hmap.go @@ -1,6 +1,8 @@ package bbolt import ( + "fmt" + "reflect" "sort" "go.etcd.io/bbolt/internal/common" @@ -108,6 +110,33 @@ func (f *freelist) hashmapGetFreePageIDs() []common.Pgid { // hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans func (f *freelist) hashmapMergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) for _, id := range ids { // try to see if we can merge and update f.mergeWithExistingSpan(id) @@ -200,3 +229,53 @@ func (f *freelist) init(pgids []common.Pgid) { f.addSpan(start, size) } } + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *freelist) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *freelist) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *freelist) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} From 3a7dc774819c8d8c55e2c504c4bf4e729667daf5 Mon Sep 17 00:00:00 2001 From: Andy Xie Date: Tue, 25 Jun 2024 11:35:33 +0800 Subject: [PATCH 264/439] change FileMode var formatter to %s Signed-off-by: Andy Xie --- db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db.go b/db.go index cd3c5b0ba..c6b0e7e7e 100644 --- a/db.go +++ b/db.go @@ -205,7 +205,7 @@ func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { lg := db.Logger() if lg != discardLogger { - lg.Infof("Opening db file (%s) with mode %x and with options: %s", path, mode, options) + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) defer func() { if err != nil { lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) From 607abddc74e9ac089abc80d2c0dd866a0e7c986d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 26 Jun 2024 12:37:08 +0100 Subject: [PATCH 265/439] explicitly set the pagesize as 4096 for the concurrent test Different platforms may have different page size, but the test should be independent to the platforms; so explicitly set the pagesize as 4096. Signed-off-by: Benjamin Wang --- concurrent_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/concurrent_test.go b/concurrent_test.go index 10f1a2f8e..07c0c1f77 100644 --- a/concurrent_test.go +++ b/concurrent_test.go @@ -165,7 +165,9 @@ func concurrentReadAndWrite(t *testing.T, testDuration time.Duration) { t.Log("Preparing db.") - db := mustCreateDB(t, nil) + db := mustCreateDB(t, &bolt.Options{ + PageSize: 4096, + }) defer db.Close() err := db.Update(func(tx *bolt.Tx) error { for i := 0; i < conf.bucketCount; i++ { From a4a52a2d811e75be848f2a70ace9ccef1cbd2438 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Wed, 26 Jun 2024 17:07:15 +0200 Subject: [PATCH 266/439] move array related freelist functions into own file Signed-off-by: Thomas Jungblut --- freelist.go | 90 ------------------------------------------- freelist_array.go | 98 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 90 deletions(-) create mode 100644 freelist_array.go diff --git a/freelist.go b/freelist.go index 5bbc27445..fdf8a366e 100644 --- a/freelist.go +++ b/freelist.go @@ -82,11 +82,6 @@ func (f *freelist) count() int { return f.free_count() + f.pending_count() } -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - // pending_count returns count of pending pages func (f *freelist) pending_count() int { var count int @@ -107,50 +102,6 @@ func (f *freelist) copyall(dst []common.Pgid) { common.Mergepgids(dst, f.getFreePageIDs(), m) } -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid common.Txid, n int) common.Pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd common.Pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == common.Pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := common.Pgid(0); i < common.Pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - // free releases a page and its overflow for a given transaction id. // If the page is already free then a panic will occur. func (f *freelist) free(txid common.Txid, p *common.Page) { @@ -286,16 +237,6 @@ func (f *freelist) read(p *common.Page) { } } -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []common.Pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []common.Pgid { - return f.ids -} - // write writes the page ids onto a freelist page. All free and pending ids are // saved to disk since in the event of a program crash, all pending ids will // become free. @@ -385,34 +326,3 @@ func (f *freelist) reindex() { } } } - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids common.Pgids) { - sort.Sort(ids) - common.Verify(func() { - idsIdx := make(map[common.Pgid]struct{}) - for _, id := range f.ids { - // The existing f.ids shouldn't have duplicated free ID. - if _, ok := idsIdx[id]; ok { - panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) - } - idsIdx[id] = struct{}{} - } - - prev := common.Pgid(0) - for _, id := range ids { - // The ids shouldn't have duplicated free ID. Note page 0 and 1 - // are reserved for meta pages, so they can never be free page IDs. - if prev == id { - panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) - } - prev = id - - // The ids shouldn't have any overlap with the existing f.ids. - if _, ok := idsIdx[id]; ok { - panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) - } - } - }) - f.ids = common.Pgids(f.ids).Merge(ids) -} diff --git a/freelist_array.go b/freelist_array.go new file mode 100644 index 000000000..2f0a7e4aa --- /dev/null +++ b/freelist_array.go @@ -0,0 +1,98 @@ +package bbolt + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// arrayFreeCount returns count of free pages(array version) +func (f *freelist) arrayFreeCount() int { + return len(f.ids) +} + +// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. +// If a contiguous block cannot be found then 0 is returned. +func (f *freelist) arrayAllocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +// arrayReadIDs initializes the freelist from a given list of ids. +func (f *freelist) arrayReadIDs(ids []common.Pgid) { + f.ids = ids + f.reindex() +} + +func (f *freelist) arrayGetFreePageIDs() []common.Pgid { + return f.ids +} + +// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array +func (f *freelist) arrayMergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} From 848f5fb7e4347ca17bef731135019d2e431d5f7d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 27 Jun 2024 16:48:18 +0100 Subject: [PATCH 267/439] Enhance TestDB_Concurrent_WriteTo to check consistent read Signed-off-by: Benjamin Wang --- db_test.go | 109 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 68 insertions(+), 41 deletions(-) diff --git a/db_test.go b/db_test.go index 3d360857e..757b896e8 100644 --- a/db_test.go +++ b/db_test.go @@ -668,68 +668,95 @@ func TestDB_BeginRW(t *testing.T) { } // TestDB_Concurrent_WriteTo checks that issuing WriteTo operations concurrently -// with commits does not produce corrupted db files. -func TestDB_Concurrent_WriteTo(t *testing.T) { - o := &bolt.Options{NoFreelistSync: false} +// with commits does not produce corrupted db files. It also verifies that all +// readonly transactions, which are created based on the same data view, should +// always read the same data. +func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { + o := &bolt.Options{ + NoFreelistSync: false, + PageSize: 4096, + } db := btesting.MustCreateDBWithOption(t, o) + wtxs, rtxs := 50, 5 + bucketName := []byte("data") + + var dataLock sync.Mutex + dataCache := make(map[int][]map[string]string) + var wg sync.WaitGroup - wtxs, rtxs := 5, 5 wg.Add(wtxs * rtxs) - f := func(tx *bolt.Tx) { + f := func(round int, tx *bolt.Tx) { defer wg.Done() - f, err := os.CreateTemp("", "bolt-") - if err != nil { - panic(err) - } - time.Sleep(time.Duration(rand.Intn(20)+1) * time.Millisecond) - _, err = tx.WriteTo(f) - if err != nil { - panic(err) - } + time.Sleep(time.Duration(rand.Intn(200)+10) * time.Millisecond) + f := filepath.Join(t.TempDir(), fmt.Sprintf("%d-bolt-", round)) + err := tx.CopyFile(f, 0600) + require.NoError(t, err) + + // read all the data + b := tx.Bucket(bucketName) + data := make(map[string]string) + err = b.ForEach(func(k, v []byte) error { + data[string(k)] = string(v) + return nil + }) + require.NoError(t, err) + + // cache the data + dataLock.Lock() + dataSlice := dataCache[round] + dataSlice = append(dataSlice, data) + dataCache[round] = dataSlice + dataLock.Unlock() + err = tx.Rollback() - if err != nil { - panic(err) - } - f.Close() + require.NoError(t, err) copyOpt := *o - snap := btesting.MustOpenDBWithOption(t, f.Name(), ©Opt) + snap := btesting.MustOpenDBWithOption(t, f, ©Opt) defer snap.MustClose() snap.MustCheck() } - tx1, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if _, err := tx1.CreateBucket([]byte("abc")); err != nil { - t.Fatal(err) - } - if err := tx1.Commit(); err != nil { - t.Fatal(err) - } + err := db.Update(func(tx *bolt.Tx) error { + _, err := tx.CreateBucket(bucketName) + return err + }) + require.NoError(t, err) for i := 0; i < wtxs; i++ { tx, err := db.Begin(true) - if err != nil { - t.Fatal(err) - } - if err := tx.Bucket([]byte("abc")).Put([]byte{0}, []byte{0}); err != nil { - t.Fatal(err) - } + require.NoError(t, err) + + b := tx.Bucket(bucketName) + for j := 0; j < rtxs; j++ { rtx, rerr := db.Begin(false) - if rerr != nil { - t.Fatal(rerr) + require.NoError(t, rerr) + go f(i, rtx) + + for k := 0; k < 10; k++ { + key, value := fmt.Sprintf("key_%d", rand.Intn(10)), fmt.Sprintf("value_%d", rand.Intn(100)) + perr := b.Put([]byte(key), []byte(value)) + require.NoError(t, perr) } - go f(rtx) - } - if err := tx.Commit(); err != nil { - t.Fatal(err) } + err = tx.Commit() + require.NoError(t, err) } wg.Wait() + + // compare the data. The data generated in the same round + // should be exactly the same. + for round, dataSlice := range dataCache { + data0 := dataSlice[0] + + for i := 1; i < len(dataSlice); i++ { + datai := dataSlice[i] + same := reflect.DeepEqual(data0, datai) + require.True(t, same, fmt.Sprintf("found inconsistent data in round %d, data[0]: %v, data[%d] : %v", round, data0, i, datai)) + } + } } // Ensure that opening a transaction while the DB is closed returns an error. From ac4f75514de9e9ff3f1153a5de7a4c58498de6d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20R=C3=BCger?= Date: Thu, 8 Feb 2024 23:54:15 +0100 Subject: [PATCH 268/439] Add benchmark tooling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds benchmarking using cmd/bbolt's bench, inspired on what it's used in kube-state-matrics. Co-authored-by: Manuel Rüger Signed-off-by: Ivan Valdes wip Signed-off-by: Ivan Valdes --- .github/workflows/benchmark-pr.yaml | 42 +++++++++++++++++ Makefile | 11 +++++ scripts/compare_benchmarks.sh | 70 +++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+) create mode 100644 .github/workflows/benchmark-pr.yaml create mode 100755 scripts/compare_benchmarks.sh diff --git a/.github/workflows/benchmark-pr.yaml b/.github/workflows/benchmark-pr.yaml new file mode 100644 index 000000000..fb6728d46 --- /dev/null +++ b/.github/workflows/benchmark-pr.yaml @@ -0,0 +1,42 @@ +--- +name: Benchmarks on AMD64 +permissions: read-all +on: [pull_request] +jobs: + benchmark-pull-request: + runs-on: ubuntu-latest-8-cores + steps: + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + with: + fetch-depth: 0 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run Benchmarks + run: | + BENCHSTAT_OUTPUT_FILE=result.txt make test-benchmark-compare REF=${{ github.event.pull_request.base.sha }} + - run: | + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + cat result.txt >> "$GITHUB_STEP_SUMMARY" + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + cat <> "$GITHUB_STEP_SUMMARY" +
+ The table shows the median and 90% confidence interval (CI) summaries for each benchmark comparing the HEAD and the BASE of the pull request, and an A/B comparison under "vs base". The last column shows the statistical p-value with ten runs (n=10). + The last row has the Geometric Mean (geomean) for the given rows in the table. + Refer to [benchstat's documentation](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) for more help. + EOL + - name: Validate results under acceptable limit + run: | + export MAX_ACCEPTABLE_DIFFERENCE=5 + while IFS= read -r line; do + # Get fourth value, which is the comparison with the base. + value="$(echo "$line" | awk '{print $4}')" + if [[ "$value" = +* ]] || [[ "$value" = -* ]]; then + if (( $(echo "${value//[^0-9.]/}"'>'"$MAX_ACCEPTABLE_DIFFERENCE" | bc -l) )); then + echo "::error::$value is above the maximum acceptable difference ($MAX_ACCEPTABLE_DIFFERENCE)" + exit 1 + fi + fi + done < <(grep geomean result.txt) diff --git a/Makefile b/Makefile index b0d019802..2e0c09fa8 100644 --- a/Makefile +++ b/Makefile @@ -94,3 +94,14 @@ test-failpoint: test-robustness: gofail-enable build sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/scripts/compare_benchmarks.sh b/scripts/compare_benchmarks.sh new file mode 100755 index 000000000..2b7766940 --- /dev/null +++ b/scripts/compare_benchmarks.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +# https://github.com/kubernetes/kube-state-metrics/blob/main/tests/compare_benchmarks.sh (originally written by mxinden) + +# exit immediately when a command fails +set -e +# only exit with zero if all commands of the pipeline exit successfully +set -o pipefail +# error on unset variables +set -u + +[[ "$#" -eq 1 ]] || echo "One argument required, $# provided." + +REF_CURRENT="$(git rev-parse --abbrev-ref HEAD)" +BASE_TO_COMPARE=$1 + +RESULT_CURRENT="$(mktemp)-${REF_CURRENT}" +RESULT_TO_COMPARE="$(mktemp)-${BASE_TO_COMPARE}" + +BENCH_COUNT=${BENCH_COUNT:-10} +BENCHSTAT_CONFIDENCE_LEVEL=${BENCHSTAT_CONFIDENCE_LEVEL:-0.9} +BENCHSTAT_FORMAT=${BENCHSTAT_FORMAT:-"text"} +BENCH_PARAMETERS=${BENCH_PARAMETERS:-"-count 2000000 -batch-size 10000"} + +if [[ "${BENCHSTAT_FORMAT}" == "csv" ]] && [[ -z "${BENCHSTAT_OUTPUT_FILE}" ]]; then + echo "BENCHSTAT_FORMAT is set to csv, but BENCHSTAT_OUTPUT_FILE is not set." + exit 1 +fi + +function bench() { + local output_file + output_file="$1" + make build + + for _ in $(seq "$BENCH_COUNT"); do + echo ./bin/bbolt bench -gobench-output -profile-mode n ${BENCH_PARAMETERS} + # shellcheck disable=SC2086 + ./bin/bbolt bench -gobench-output -profile-mode n ${BENCH_PARAMETERS} >> "${output_file}" + done +} + +function main() { + echo "### Benchmarking PR ${REF_CURRENT}" + bench "${RESULT_CURRENT}" + echo "" + echo "### Done benchmarking ${REF_CURRENT}" + + echo "### Benchmarking base ${BASE_TO_COMPARE}" + git checkout "${BASE_TO_COMPARE}" + bench "${RESULT_TO_COMPARE}" + echo "" + echo "### Done benchmarking ${BASE_TO_COMPARE}" + + git checkout - + + echo "" + echo "### Result" + echo "BASE=${BASE_TO_COMPARE} HEAD=${REF_CURRENT}" + + if [[ "${BENCHSTAT_FORMAT}" == "csv" ]]; then + benchstat -format=csv -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 2>/dev/null 1>"${BENCHSTAT_OUTPUT_FILE}" + else + if [[ -z "${BENCHSTAT_OUTPUT_FILE}" ]]; then + benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" + else + benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 1>"${BENCHSTAT_OUTPUT_FILE}" + fi + fi +} + +main From 263e75d0594f5397905c21e17cd34b68d68441d5 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 1 Jul 2024 14:38:38 +0100 Subject: [PATCH 269/439] move method freePages into freelist.go The motivation is to get all freelist related logic included in freelist.go. We are going to introduce freelist interface in the next step. Signed-off-by: Benjamin Wang --- db.go | 35 +++++++---------------------------- freelist.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 28 deletions(-) diff --git a/db.go b/db.go index c6b0e7e7e..236698212 100644 --- a/db.go +++ b/db.go @@ -6,7 +6,6 @@ import ( "io" "os" "runtime" - "sort" "sync" "time" "unsafe" @@ -797,6 +796,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.addReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -841,36 +843,10 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.freePages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := common.Txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.Txid() - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.Txid()-1) - minid = t.meta.Txid() + 1 - } - db.freelist.releaseRange(minid, common.Txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.Txid() < t[j].meta.Txid() } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -890,6 +866,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.removeReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() diff --git a/freelist.go b/freelist.go index fdf8a366e..49b6c200b 100644 --- a/freelist.go +++ b/freelist.go @@ -2,6 +2,7 @@ package bbolt import ( "fmt" + "math" "sort" "unsafe" @@ -24,6 +25,7 @@ type pidSet map[common.Pgid]struct{} type freelist struct { freelistType FreelistType // freelist type ids []common.Pgid // all free and available free page ids. + readonlyTXIDs []common.Txid // all readonly transaction IDs. allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. @@ -326,3 +328,44 @@ func (f *freelist) reindex() { } } } + +func (f *freelist) addReadonlyTXID(tid common.Txid) { + f.readonlyTXIDs = append(f.readonlyTXIDs, tid) +} + +func (f *freelist) removeReadonlyTXID(tid common.Txid) { + for i := range f.readonlyTXIDs { + if f.readonlyTXIDs[i] == tid { + last := len(f.readonlyTXIDs) - 1 + f.readonlyTXIDs[i] = f.readonlyTXIDs[last] + f.readonlyTXIDs = f.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +// freePages releases any pages associated with closed read-only transactions. +func (f *freelist) freePages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(f.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(f.readonlyTXIDs) > 0 { + minid = f.readonlyTXIDs[0] + } + if minid > 0 { + f.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range f.readonlyTXIDs { + f.releaseRange(minid, tid-1) + minid = tid + 1 + } + f.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} From 9c4649ce627319940d818a9d17d0636d4a92ce26 Mon Sep 17 00:00:00 2001 From: D Tripp <38776199+thedtripp@users.noreply.github.com> Date: Thu, 4 Jul 2024 01:00:19 +0000 Subject: [PATCH 270/439] Bump Go version to 1.22.5: GO-2024-2963 fix Signed-off-by: D Tripp <38776199+thedtripp@users.noreply.github.com> --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 2a0ba77cc..da9594fd6 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.4 +1.22.5 diff --git a/go.mod b/go.mod index 3a8735523..b65ddddd5 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.22 -toolchain go1.22.4 +toolchain go1.22.5 require ( github.com/spf13/cobra v1.8.1 From 12835a3580b9248467dc815a5f776e63fb54092f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Jul 2024 15:00:07 +0000 Subject: [PATCH 271/439] build(deps): Bump golang.org/x/sys from 0.21.0 to 0.22.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.21.0 to 0.22.0. - [Commits](https://github.com/golang/sys/compare/v0.21.0...v0.22.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b65ddddd5..410bd1469 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.7.0 - golang.org/x/sys v0.21.0 + golang.org/x/sys v0.22.0 ) require ( diff --git a/go.sum b/go.sum index 8f4f37091..a636cca70 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 62e81c036f05bd3c71f7900fd663ee656ee68a92 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Tue, 2 Jul 2024 12:23:57 +0200 Subject: [PATCH 272/439] introduce a freelist interface This introduces an interface for the freelist, splits it into two concrete implementations. fixes etcd-io#773 Signed-off-by: Thomas Jungblut --- allocate_test.go | 44 +- bucket.go | 2 +- bucket_test.go | 3 + cmd/bbolt/command_version.go | 1 + db.go | 24 +- freelist.go | 371 -------------- freelist_test.go | 485 ------------------ .../freelist/array.go | 37 +- internal/freelist/array_test.go | 52 ++ internal/freelist/freelist.go | 82 +++ internal/freelist/freelist_test.go | 282 ++++++++++ .../freelist/hashmap.go | 191 +++---- internal/freelist/hashmap_test.go | 155 ++++++ internal/freelist/shared.go | 321 ++++++++++++ node.go | 4 +- tx.go | 28 +- tx_check.go | 4 +- 17 files changed, 1078 insertions(+), 1008 deletions(-) delete mode 100644 freelist.go delete mode 100644 freelist_test.go rename freelist_array.go => internal/freelist/array.go (74%) create mode 100644 internal/freelist/array_test.go create mode 100644 internal/freelist/freelist.go create mode 100644 internal/freelist/freelist_test.go rename freelist_hmap.go => internal/freelist/hashmap.go (79%) create mode 100644 internal/freelist/hashmap_test.go create mode 100644 internal/freelist/shared.go diff --git a/allocate_test.go b/allocate_test.go index 9f08be1cf..d8dedfb7b 100644 --- a/allocate_test.go +++ b/allocate_test.go @@ -4,32 +4,36 @@ import ( "testing" "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/freelist" ) func TestTx_allocatePageStats(t *testing.T) { - f := newTestFreelist() - ids := []common.Pgid{2, 3} - f.readIDs(ids) + for n, f := range map[string]freelist.Interface{"hashmap": freelist.NewHashMapFreelist(), "array": freelist.NewArrayFreelist()} { + t.Run(n, func(t *testing.T) { + ids := []common.Pgid{2, 3} + f.Init(ids) - tx := &Tx{ - db: &DB{ - freelist: f, - pageSize: common.DefaultPageSize, - }, - meta: &common.Meta{}, - pages: make(map[common.Pgid]*common.Page), - } + tx := &Tx{ + db: &DB{ + freelist: f, + pageSize: common.DefaultPageSize, + }, + meta: &common.Meta{}, + pages: make(map[common.Pgid]*common.Page), + } - txStats := tx.Stats() - prePageCnt := txStats.GetPageCount() - allocateCnt := f.free_count() + txStats := tx.Stats() + prePageCnt := txStats.GetPageCount() + allocateCnt := f.FreeCount() - if _, err := tx.allocate(allocateCnt); err != nil { - t.Fatal(err) - } + if _, err := tx.allocate(allocateCnt); err != nil { + t.Fatal(err) + } - txStats = tx.Stats() - if txStats.GetPageCount() != prePageCnt+int64(allocateCnt) { - t.Errorf("Allocated %d but got %d page in stats", allocateCnt, txStats.GetPageCount()) + txStats = tx.Stats() + if txStats.GetPageCount() != prePageCnt+int64(allocateCnt) { + t.Errorf("Allocated %d but got %d page in stats", allocateCnt, txStats.GetPageCount()) + } + }) } } diff --git a/bucket.go b/bucket.go index 785ad9bd5..6371ace97 100644 --- a/bucket.go +++ b/bucket.go @@ -903,7 +903,7 @@ func (b *Bucket) free() { var tx = b.tx b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.Txid(), p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } diff --git a/bucket_test.go b/bucket_test.go index 3255e7b89..493d133a7 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -430,6 +430,9 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) { if reopenFreePages := db.Stats().FreePageN; freePages != reopenFreePages { t.Fatalf("expected %d free pages, got %+v", freePages, db.Stats()) } + if reopenPendingPages := db.Stats().PendingPageN; reopenPendingPages != 0 { + t.Fatalf("expected no pending pages, got %+v", db.Stats()) + } } // Ensure that deleting of non-existing key is a no-op. diff --git a/cmd/bbolt/command_version.go b/cmd/bbolt/command_version.go index 73019c798..39d756bd9 100644 --- a/cmd/bbolt/command_version.go +++ b/cmd/bbolt/command_version.go @@ -5,6 +5,7 @@ import ( "runtime" "github.com/spf13/cobra" + "go.etcd.io/bbolt/version" ) diff --git a/db.go b/db.go index 236698212..349f187ae 100644 --- a/db.go +++ b/db.go @@ -12,6 +12,7 @@ import ( berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) // The time elapsed between consecutive file locking attempts. @@ -133,7 +134,7 @@ type DB struct { rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -418,12 +419,12 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().Freelist())) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } @@ -797,7 +798,7 @@ func (db *DB) beginTx() (*Tx, error) { db.txs = append(db.txs, t) n := len(db.txs) if db.freelist != nil { - db.freelist.addReadonlyTXID(t.meta.Txid()) + db.freelist.AddReadonlyTXID(t.meta.Txid()) } // Unlock the meta pages. @@ -843,7 +844,7 @@ func (db *DB) beginRWTx() (*Tx, error) { t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freelist.freePages() + db.freelist.ReleasePendingPages() return t, nil } @@ -867,7 +868,7 @@ func (db *DB) removeTx(tx *Tx) { } n := len(db.txs) if db.freelist != nil { - db.freelist.removeReadonlyTXID(tx.meta.Txid()) + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) } // Unlock the meta pages. @@ -1155,7 +1156,7 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - p.SetId(db.freelist.allocate(txid, count)) + p.SetId(db.freelist.Allocate(txid, count)) if p.Id() != 0 { return p, nil } @@ -1261,6 +1262,13 @@ func (db *DB) freepages() []common.Pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. diff --git a/freelist.go b/freelist.go deleted file mode 100644 index 49b6c200b..000000000 --- a/freelist.go +++ /dev/null @@ -1,371 +0,0 @@ -package bbolt - -import ( - "fmt" - "math" - "sort" - "unsafe" - - "go.etcd.io/bbolt/internal/common" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []common.Pgid - alloctx []common.Txid // txids allocating the ids - lastReleaseBegin common.Txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[common.Pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []common.Pgid // all free and available free page ids. - readonlyTXIDs []common.Txid // all readonly transaction IDs. - allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. - pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size - backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size - freePagesCount uint64 // count of free pages(hashmap version) - allocate func(txid common.Txid, n int) common.Pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids common.Pgids) // the mergeSpan func - getFreePageIDs func() []common.Pgid // get free pgids func - readIDs func(pgids []common.Pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[common.Pgid]common.Txid), - pending: make(map[common.Txid]*txPending), - cache: make(map[common.Pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[common.Pgid]uint64), - backwardMap: make(map[common.Pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []common.Pgid) { - m := make(common.Pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - common.Mergepgids(dst, f.getFreePageIDs(), m) -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid common.Txid, p *common.Page) { - if p.Id() <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.Id()] - if ok { - delete(f.allocs, p.Id()) - } else if p.IsFreelistPage() { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid common.Txid) { - m := make(common.Pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end common.Txid) { - if begin > end { - return - } - var m common.Pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid common.Txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m common.Pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId common.Pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *common.Page) { - if !p.IsFreelistPage() { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) - } - - ids := p.FreelistPageIds() - - // Copy the list of page ids from the freelist. - if len(ids) == 0 { - f.ids = nil - } else { - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]common.Pgid, len(ids)) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(common.Pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *common.Page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.SetFlags(common.FreelistPageFlag) - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.SetCount(uint16(l)) - } else if l < 0xFFFF { - p.SetCount(uint16(l)) - data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*common.Pgid)(data), l) - f.copyall(ids) - } else { - p.SetCount(0xFFFF) - data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*common.Pgid)(data), l+1) - ids[0] = common.Pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *common.Page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[common.Pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []common.Pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from Pgids and filters out pending items. -func (f *freelist) noSyncReload(Pgids []common.Pgid) { - // Build a cache of only pending pages. - pcache := make(map[common.Pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []common.Pgid - for _, id := range Pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[common.Pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -func (f *freelist) addReadonlyTXID(tid common.Txid) { - f.readonlyTXIDs = append(f.readonlyTXIDs, tid) -} - -func (f *freelist) removeReadonlyTXID(tid common.Txid) { - for i := range f.readonlyTXIDs { - if f.readonlyTXIDs[i] == tid { - last := len(f.readonlyTXIDs) - 1 - f.readonlyTXIDs[i] = f.readonlyTXIDs[last] - f.readonlyTXIDs = f.readonlyTXIDs[:last] - break - } - } -} - -type txIDx []common.Txid - -func (t txIDx) Len() int { return len(t) } -func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } - -// freePages releases any pages associated with closed read-only transactions. -func (f *freelist) freePages() { - // Free all pending pages prior to the earliest open transaction. - sort.Sort(txIDx(f.readonlyTXIDs)) - minid := common.Txid(math.MaxUint64) - if len(f.readonlyTXIDs) > 0 { - minid = f.readonlyTXIDs[0] - } - if minid > 0 { - f.release(minid - 1) - } - // Release unused txid extents. - for _, tid := range f.readonlyTXIDs { - f.releaseRange(minid, tid-1) - minid = tid + 1 - } - f.releaseRange(minid, common.Txid(math.MaxUint64)) - // Any page both allocated and freed in an extent is safe to release. -} diff --git a/freelist_test.go b/freelist_test.go deleted file mode 100644 index 5cf40bd1c..000000000 --- a/freelist_test.go +++ /dev/null @@ -1,485 +0,0 @@ -package bbolt - -import ( - "math/rand" - "os" - "reflect" - "sort" - "testing" - "unsafe" - - "go.etcd.io/bbolt/internal/common" -) - -// TestFreelistType is used as a env variable for test to indicate the backend type -const TestFreelistType = "TEST_FREELIST_TYPE" - -// Ensure that a page is added to a transaction's freelist. -func TestFreelist_free(t *testing.T) { - f := newTestFreelist() - f.free(100, common.NewPage(12, 0, 0, 0)) - if !reflect.DeepEqual([]common.Pgid{12}, f.pending[100].ids) { - t.Fatalf("exp=%v; got=%v", []common.Pgid{12}, f.pending[100].ids) - } -} - -// Ensure that a page and its overflow is added to a transaction's freelist. -func TestFreelist_free_overflow(t *testing.T) { - f := newTestFreelist() - f.free(100, common.NewPage(12, 0, 0, 3)) - if exp := []common.Pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100].ids) { - t.Fatalf("exp=%v; got=%v", exp, f.pending[100].ids) - } -} - -// Ensure that a transaction's free pages can be released. -func TestFreelist_release(t *testing.T) { - f := newTestFreelist() - f.free(100, common.NewPage(12, 0, 0, 1)) - f.free(100, common.NewPage(9, 0, 0, 0)) - f.free(102, common.NewPage(39, 0, 0, 0)) - f.release(100) - f.release(101) - if exp := []common.Pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) - } - - f.release(102) - if exp := []common.Pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) - } -} - -// Ensure that releaseRange handles boundary conditions correctly -func TestFreelist_releaseRange(t *testing.T) { - type testRange struct { - begin, end common.Txid - } - - type testPage struct { - id common.Pgid - n int - allocTxn common.Txid - freeTxn common.Txid - } - - var releaseRangeTests = []struct { - title string - pagesIn []testPage - releaseRanges []testRange - wantFree []common.Pgid - }{ - { - title: "Single pending in range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, - releaseRanges: []testRange{{1, 300}}, - wantFree: []common.Pgid{3}, - }, - { - title: "Single pending with minimum end range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, - releaseRanges: []testRange{{1, 200}}, - wantFree: []common.Pgid{3}, - }, - { - title: "Single pending outsize minimum end range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, - releaseRanges: []testRange{{1, 199}}, - wantFree: nil, - }, - { - title: "Single pending with minimum begin range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, - releaseRanges: []testRange{{100, 300}}, - wantFree: []common.Pgid{3}, - }, - { - title: "Single pending outside minimum begin range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, - releaseRanges: []testRange{{101, 300}}, - wantFree: nil, - }, - { - title: "Single pending in minimum range", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, - releaseRanges: []testRange{{199, 200}}, - wantFree: []common.Pgid{3}, - }, - { - title: "Single pending and read transaction at 199", - pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, - releaseRanges: []testRange{{100, 198}, {200, 300}}, - wantFree: nil, - }, - { - title: "Adjacent pending and read transactions at 199, 200", - pagesIn: []testPage{ - {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, - {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, - }, - releaseRanges: []testRange{ - {100, 198}, - {200, 199}, // Simulate the ranges db.freePages might produce. - {201, 300}, - }, - wantFree: nil, - }, - { - title: "Out of order ranges", - pagesIn: []testPage{ - {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, - {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, - }, - releaseRanges: []testRange{ - {201, 199}, - {201, 200}, - {200, 200}, - }, - wantFree: nil, - }, - { - title: "Multiple pending, read transaction at 150", - pagesIn: []testPage{ - {id: 3, n: 1, allocTxn: 100, freeTxn: 200}, - {id: 4, n: 1, allocTxn: 100, freeTxn: 125}, - {id: 5, n: 1, allocTxn: 125, freeTxn: 150}, - {id: 6, n: 1, allocTxn: 125, freeTxn: 175}, - {id: 7, n: 2, allocTxn: 150, freeTxn: 175}, - {id: 9, n: 2, allocTxn: 175, freeTxn: 200}, - }, - releaseRanges: []testRange{{50, 149}, {151, 300}}, - wantFree: []common.Pgid{4, 9, 10}, - }, - } - - for _, c := range releaseRangeTests { - f := newTestFreelist() - var ids []common.Pgid - for _, p := range c.pagesIn { - for i := uint64(0); i < uint64(p.n); i++ { - ids = append(ids, common.Pgid(uint64(p.id)+i)) - } - } - f.readIDs(ids) - for _, p := range c.pagesIn { - f.allocate(p.allocTxn, p.n) - } - - for _, p := range c.pagesIn { - f.free(p.freeTxn, common.NewPage(p.id, 0, 0, uint32(p.n-1))) - } - - for _, r := range c.releaseRanges { - f.releaseRange(r.begin, r.end) - } - - if exp := c.wantFree; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Errorf("exp=%v; got=%v for %s", exp, f.getFreePageIDs(), c.title) - } - } -} - -func TestFreelistHashmap_allocate(t *testing.T) { - f := newTestFreelist() - if f.freelistType != FreelistMapType { - t.Skip() - } - - ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} - f.readIDs(ids) - - f.allocate(1, 3) - if x := f.free_count(); x != 6 { - t.Fatalf("exp=6; got=%v", x) - } - - f.allocate(1, 2) - if x := f.free_count(); x != 4 { - t.Fatalf("exp=4; got=%v", x) - } - f.allocate(1, 1) - if x := f.free_count(); x != 3 { - t.Fatalf("exp=3; got=%v", x) - } - - f.allocate(1, 0) - if x := f.free_count(); x != 3 { - t.Fatalf("exp=3; got=%v", x) - } -} - -// Ensure that a freelist can find contiguous blocks of pages. -func TestFreelistArray_allocate(t *testing.T) { - f := newTestFreelist() - if f.freelistType != FreelistArrayType { - t.Skip() - } - ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} - f.readIDs(ids) - if id := int(f.allocate(1, 3)); id != 3 { - t.Fatalf("exp=3; got=%v", id) - } - if id := int(f.allocate(1, 1)); id != 6 { - t.Fatalf("exp=6; got=%v", id) - } - if id := int(f.allocate(1, 3)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(1, 2)); id != 12 { - t.Fatalf("exp=12; got=%v", id) - } - if id := int(f.allocate(1, 1)); id != 7 { - t.Fatalf("exp=7; got=%v", id) - } - if id := int(f.allocate(1, 0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if id := int(f.allocate(1, 0)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []common.Pgid{9, 18}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) - } - - if id := int(f.allocate(1, 1)); id != 9 { - t.Fatalf("exp=9; got=%v", id) - } - if id := int(f.allocate(1, 1)); id != 18 { - t.Fatalf("exp=18; got=%v", id) - } - if id := int(f.allocate(1, 1)); id != 0 { - t.Fatalf("exp=0; got=%v", id) - } - if exp := []common.Pgid{}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) - } -} - -// Ensure that a freelist can deserialize from a freelist page. -func TestFreelist_read(t *testing.T) { - // Create a page. - var buf [4096]byte - page := (*common.Page)(unsafe.Pointer(&buf[0])) - page.SetFlags(common.FreelistPageFlag) - page.SetCount(2) - - // Insert 2 page ids. - ids := (*[3]common.Pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) - ids[0] = 23 - ids[1] = 50 - - // Deserialize page into a freelist. - f := newTestFreelist() - f.read(page) - - // Ensure that there are two page ids in the freelist. - if exp := []common.Pgid{23, 50}; !reflect.DeepEqual(exp, f.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f.getFreePageIDs()) - } -} - -// Ensure that a freelist can serialize into a freelist page. -func TestFreelist_write(t *testing.T) { - // Create a freelist and write it to a page. - var buf [4096]byte - f := newTestFreelist() - - f.readIDs([]common.Pgid{12, 39}) - f.pending[100] = &txPending{ids: []common.Pgid{28, 11}} - f.pending[101] = &txPending{ids: []common.Pgid{3}} - p := (*common.Page)(unsafe.Pointer(&buf[0])) - if err := f.write(p); err != nil { - t.Fatal(err) - } - - // Read the page back out. - f2 := newTestFreelist() - f2.read(p) - - // Ensure that the freelist is correct. - // All pages should be present and in reverse order. - if exp := []common.Pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.getFreePageIDs()) { - t.Fatalf("exp=%v; got=%v", exp, f2.getFreePageIDs()) - } -} - -func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } -func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } -func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } -func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } - -func benchmark_FreelistRelease(b *testing.B, size int) { - ids := randomPgids(size) - pending := randomPgids(len(ids) / 400) - b.ResetTimer() - for i := 0; i < b.N; i++ { - txp := &txPending{ids: pending} - f := newTestFreelist() - f.pending = map[common.Txid]*txPending{1: txp} - f.readIDs(ids) - f.release(1) - } -} - -func randomPgids(n int) []common.Pgid { - pgids := make(common.Pgids, n) - for i := range pgids { - pgids[i] = common.Pgid(rand.Int63()) - } - sort.Sort(pgids) - return pgids -} - -func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { - f := newTestFreelist() - exp := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} - - f.readIDs(exp) - - if got := f.getFreePageIDs(); !reflect.DeepEqual(exp, got) { - t.Fatalf("exp=%v; got=%v", exp, got) - } - - f2 := newTestFreelist() - var exp2 []common.Pgid - f2.readIDs(exp2) - - if got2 := f2.getFreePageIDs(); !reflect.DeepEqual(got2, exp2) { - t.Fatalf("exp2=%#v; got2=%#v", exp2, got2) - } - -} - -func Test_freelist_mergeWithExist(t *testing.T) { - bm1 := pidSet{1: struct{}{}} - - bm2 := pidSet{5: struct{}{}} - tests := []struct { - name string - ids []common.Pgid - pgid common.Pgid - want []common.Pgid - wantForwardmap map[common.Pgid]uint64 - wantBackwardmap map[common.Pgid]uint64 - wantfreemap map[uint64]pidSet - }{ - { - name: "test1", - ids: []common.Pgid{1, 2, 4, 5, 6}, - pgid: 3, - want: []common.Pgid{1, 2, 3, 4, 5, 6}, - wantForwardmap: map[common.Pgid]uint64{1: 6}, - wantBackwardmap: map[common.Pgid]uint64{6: 6}, - wantfreemap: map[uint64]pidSet{6: bm1}, - }, - { - name: "test2", - ids: []common.Pgid{1, 2, 5, 6}, - pgid: 3, - want: []common.Pgid{1, 2, 3, 5, 6}, - wantForwardmap: map[common.Pgid]uint64{1: 3, 5: 2}, - wantBackwardmap: map[common.Pgid]uint64{6: 2, 3: 3}, - wantfreemap: map[uint64]pidSet{3: bm1, 2: bm2}, - }, - { - name: "test3", - ids: []common.Pgid{1, 2}, - pgid: 3, - want: []common.Pgid{1, 2, 3}, - wantForwardmap: map[common.Pgid]uint64{1: 3}, - wantBackwardmap: map[common.Pgid]uint64{3: 3}, - wantfreemap: map[uint64]pidSet{3: bm1}, - }, - { - name: "test4", - ids: []common.Pgid{2, 3}, - pgid: 1, - want: []common.Pgid{1, 2, 3}, - wantForwardmap: map[common.Pgid]uint64{1: 3}, - wantBackwardmap: map[common.Pgid]uint64{3: 3}, - wantfreemap: map[uint64]pidSet{3: bm1}, - }, - } - for _, tt := range tests { - f := newTestFreelist() - if f.freelistType == FreelistArrayType { - t.Skip() - } - f.readIDs(tt.ids) - - f.mergeWithExistingSpan(tt.pgid) - - if got := f.getFreePageIDs(); !reflect.DeepEqual(tt.want, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.want, got) - } - if got := f.forwardMap; !reflect.DeepEqual(tt.wantForwardmap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantForwardmap, got) - } - if got := f.backwardMap; !reflect.DeepEqual(tt.wantBackwardmap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantBackwardmap, got) - } - if got := f.freemaps; !reflect.DeepEqual(tt.wantfreemap, got) { - t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantfreemap, got) - } - } -} - -// newTestFreelist get the freelist type from env and initial the freelist -func newTestFreelist() *freelist { - freelistType := FreelistArrayType - if env := os.Getenv(TestFreelistType); env == string(FreelistMapType) { - freelistType = FreelistMapType - } - - return newFreelist(freelistType) -} - -func Test_freelist_hashmapGetFreePageIDs(t *testing.T) { - f := newTestFreelist() - if f.freelistType == FreelistArrayType { - t.Skip() - } - - N := int32(100000) - fm := make(map[common.Pgid]uint64) - i := int32(0) - val := int32(0) - for i = 0; i < N; { - val = rand.Int31n(1000) - fm[common.Pgid(i)] = uint64(val) - i += val - f.freePagesCount += uint64(val) - } - - f.forwardMap = fm - res := f.hashmapGetFreePageIDs() - - if !sort.SliceIsSorted(res, func(i, j int) bool { return res[i] < res[j] }) { - t.Fatalf("pgids not sorted") - } -} - -func Benchmark_freelist_hashmapGetFreePageIDs(b *testing.B) { - f := newTestFreelist() - if f.freelistType == FreelistArrayType { - b.Skip() - } - - N := int32(100000) - fm := make(map[common.Pgid]uint64) - i := int32(0) - val := int32(0) - for i = 0; i < N; { - val = rand.Int31n(1000) - fm[common.Pgid(i)] = uint64(val) - i += val - } - - f.forwardMap = fm - - b.ReportAllocs() - b.ResetTimer() - for n := 0; n < b.N; n++ { - f.hashmapGetFreePageIDs() - } -} diff --git a/freelist_array.go b/internal/freelist/array.go similarity index 74% rename from freelist_array.go rename to internal/freelist/array.go index 2f0a7e4aa..93ccc5edc 100644 --- a/freelist_array.go +++ b/internal/freelist/array.go @@ -1,4 +1,4 @@ -package bbolt +package freelist import ( "fmt" @@ -7,14 +7,18 @@ import ( "go.etcd.io/bbolt/internal/common" ) -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() } -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid common.Txid, n int) common.Pgid { +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { if len(f.ids) == 0 { return 0 } @@ -56,18 +60,15 @@ func (f *freelist) arrayAllocate(txid common.Txid, n int) common.Pgid { return 0 } -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []common.Pgid) { - f.ids = ids - f.reindex() +func (f *array) FreeCount() int { + return len(f.ids) } -func (f *freelist) arrayGetFreePageIDs() []common.Pgid { +func (f *array) freePageIds() common.Pgids { return f.ids } -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids common.Pgids) { +func (f *array) mergeSpans(ids common.Pgids) { sort.Sort(ids) common.Verify(func() { idsIdx := make(map[common.Pgid]struct{}) @@ -96,3 +97,11 @@ func (f *freelist) arrayMergeSpans(ids common.Pgids) { }) f.ids = common.Pgids(f.ids).Merge(ids) } + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + } + a.Interface = a + return a +} diff --git a/internal/freelist/array_test.go b/internal/freelist/array_test.go new file mode 100644 index 000000000..31b0702dc --- /dev/null +++ b/internal/freelist/array_test.go @@ -0,0 +1,52 @@ +package freelist + +import ( + "reflect" + "testing" + + "go.etcd.io/bbolt/internal/common" +) + +// Ensure that a freelist can find contiguous blocks of pages. +func TestFreelistArray_allocate(t *testing.T) { + f := NewArrayFreelist() + ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + f.Init(ids) + if id := int(f.Allocate(1, 3)); id != 3 { + t.Fatalf("exp=3; got=%v", id) + } + if id := int(f.Allocate(1, 1)); id != 6 { + t.Fatalf("exp=6; got=%v", id) + } + if id := int(f.Allocate(1, 3)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.Allocate(1, 2)); id != 12 { + t.Fatalf("exp=12; got=%v", id) + } + if id := int(f.Allocate(1, 1)); id != 7 { + t.Fatalf("exp=7; got=%v", id) + } + if id := int(f.Allocate(1, 0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if id := int(f.Allocate(1, 0)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := common.Pgids([]common.Pgid{9, 18}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } + + if id := int(f.Allocate(1, 1)); id != 9 { + t.Fatalf("exp=9; got=%v", id) + } + if id := int(f.Allocate(1, 1)); id != 18 { + t.Fatalf("exp=18; got=%v", id) + } + if id := int(f.Allocate(1, 1)); id != 0 { + t.Fatalf("exp=0; got=%v", id) + } + if exp := common.Pgids([]common.Pgid{}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } +} diff --git a/internal/freelist/freelist.go b/internal/freelist/freelist.go new file mode 100644 index 000000000..3d77d8f94 --- /dev/null +++ b/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/internal/freelist/freelist_test.go b/internal/freelist/freelist_test.go new file mode 100644 index 000000000..df7c7697e --- /dev/null +++ b/internal/freelist/freelist_test.go @@ -0,0 +1,282 @@ +package freelist + +import ( + "math/rand" + "os" + "reflect" + "sort" + "testing" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +// TestFreelistType is used as a env variable for test to indicate the backend type +const TestFreelistType = "TEST_FREELIST_TYPE" + +// Ensure that a page is added to a transaction's freelist. +func TestFreelist_free(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, 0, 0, 0)) + if !reflect.DeepEqual([]common.Pgid{12}, f.pendingPageIds()[100].ids) { + t.Fatalf("exp=%v; got=%v", []common.Pgid{12}, f.pendingPageIds()[100].ids) + } +} + +// Ensure that a page and its overflow is added to a transaction's freelist. +func TestFreelist_free_overflow(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, 0, 0, 3)) + if exp := []common.Pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pendingPageIds()[100].ids) { + t.Fatalf("exp=%v; got=%v", exp, f.pendingPageIds()[100].ids) + } +} + +// Ensure that a transaction's free pages can be released. +func TestFreelist_release(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, 0, 0, 1)) + f.Free(100, common.NewPage(9, 0, 0, 0)) + f.Free(102, common.NewPage(39, 0, 0, 0)) + f.release(100) + f.release(101) + if exp := common.Pgids([]common.Pgid{9, 12, 13}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } + + f.release(102) + if exp := common.Pgids([]common.Pgid{9, 12, 13, 39}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } +} + +// Ensure that releaseRange handles boundary conditions correctly +func TestFreelist_releaseRange(t *testing.T) { + type testRange struct { + begin, end common.Txid + } + + type testPage struct { + id common.Pgid + n int + allocTxn common.Txid + freeTxn common.Txid + } + + var releaseRangeTests = []struct { + title string + pagesIn []testPage + releaseRanges []testRange + wantFree []common.Pgid + }{ + { + title: "Single pending in range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 300}}, + wantFree: []common.Pgid{3}, + }, + { + title: "Single pending with minimum end range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 200}}, + wantFree: []common.Pgid{3}, + }, + { + title: "Single pending outsize minimum end range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{1, 199}}, + wantFree: nil, + }, + { + title: "Single pending with minimum begin range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{100, 300}}, + wantFree: []common.Pgid{3}, + }, + { + title: "Single pending outside minimum begin range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, + releaseRanges: []testRange{{101, 300}}, + wantFree: nil, + }, + { + title: "Single pending in minimum range", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, + releaseRanges: []testRange{{199, 200}}, + wantFree: []common.Pgid{3}, + }, + { + title: "Single pending and read transaction at 199", + pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, + releaseRanges: []testRange{{100, 198}, {200, 300}}, + wantFree: nil, + }, + { + title: "Adjacent pending and read transactions at 199, 200", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, + }, + releaseRanges: []testRange{ + {100, 198}, + {200, 199}, // Simulate the ranges db.freePages might produce. + {201, 300}, + }, + wantFree: nil, + }, + { + title: "Out of order ranges", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 199, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 200, freeTxn: 201}, + }, + releaseRanges: []testRange{ + {201, 199}, + {201, 200}, + {200, 200}, + }, + wantFree: nil, + }, + { + title: "Multiple pending, read transaction at 150", + pagesIn: []testPage{ + {id: 3, n: 1, allocTxn: 100, freeTxn: 200}, + {id: 4, n: 1, allocTxn: 100, freeTxn: 125}, + {id: 5, n: 1, allocTxn: 125, freeTxn: 150}, + {id: 6, n: 1, allocTxn: 125, freeTxn: 175}, + {id: 7, n: 2, allocTxn: 150, freeTxn: 175}, + {id: 9, n: 2, allocTxn: 175, freeTxn: 200}, + }, + releaseRanges: []testRange{{50, 149}, {151, 300}}, + wantFree: []common.Pgid{4, 9, 10}, + }, + } + + for _, c := range releaseRangeTests { + f := newTestFreelist() + var ids []common.Pgid + for _, p := range c.pagesIn { + for i := uint64(0); i < uint64(p.n); i++ { + ids = append(ids, common.Pgid(uint64(p.id)+i)) + } + } + f.Init(ids) + for _, p := range c.pagesIn { + f.Allocate(p.allocTxn, p.n) + } + + for _, p := range c.pagesIn { + f.Free(p.freeTxn, common.NewPage(p.id, 0, 0, uint32(p.n-1))) + } + + for _, r := range c.releaseRanges { + f.releaseRange(r.begin, r.end) + } + + if exp := common.Pgids(c.wantFree); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Errorf("exp=%v; got=%v for %s", exp, f.freePageIds(), c.title) + } + } +} + +// Ensure that a freelist can deserialize from a freelist page. +func TestFreelist_read(t *testing.T) { + // Create a page. + var buf [4096]byte + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.FreelistPageFlag) + page.SetCount(2) + + // Insert 2 page ids. + ids := (*[3]common.Pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(page)) + unsafe.Sizeof(*page))) + ids[0] = 23 + ids[1] = 50 + + // Deserialize page into a freelist. + f := newTestFreelist() + f.Read(page) + + // Ensure that there are two page ids in the freelist. + if exp := common.Pgids([]common.Pgid{23, 50}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } +} + +// Ensure that a freelist can serialize into a freelist page. +func TestFreelist_write(t *testing.T) { + // Create a freelist and write it to a page. + var buf [4096]byte + f := newTestFreelist() + + f.Init([]common.Pgid{12, 39}) + f.pendingPageIds()[100] = &txPending{ids: []common.Pgid{28, 11}} + f.pendingPageIds()[101] = &txPending{ids: []common.Pgid{3}} + p := (*common.Page)(unsafe.Pointer(&buf[0])) + f.Write(p) + + // Read the page back out. + f2 := newTestFreelist() + f2.Read(p) + + // Ensure that the freelist is correct. + // All pages should be present and in reverse order. + if exp := common.Pgids([]common.Pgid{3, 11, 12, 28, 39}); !reflect.DeepEqual(exp, f2.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f2.freePageIds()) + } +} + +func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } +func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } +func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } +func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } + +func benchmark_FreelistRelease(b *testing.B, size int) { + ids := randomPgids(size) + pending := randomPgids(len(ids) / 400) + b.ResetTimer() + for i := 0; i < b.N; i++ { + txp := &txPending{ids: pending} + f := newTestFreelist() + f.pendingPageIds()[1] = txp + f.Init(ids) + f.release(1) + } +} + +func randomPgids(n int) []common.Pgid { + pgids := make(common.Pgids, n) + for i := range pgids { + pgids[i] = common.Pgid(rand.Int63()) + } + sort.Sort(pgids) + return pgids +} + +func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { + f := newTestFreelist() + exp := common.Pgids([]common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}) + + f.Init(exp) + + if got := f.freePageIds(); !reflect.DeepEqual(exp, got) { + t.Fatalf("exp=%v; got=%v", exp, got) + } + + f2 := newTestFreelist() + var exp2 []common.Pgid + f2.Init(exp2) + + if got2 := f2.freePageIds(); !reflect.DeepEqual(got2, common.Pgids(exp2)) { + t.Fatalf("exp2=%#v; got2=%#v", exp2, got2) + } + +} + +// newTestFreelist get the freelist type from env and initial the freelist +func newTestFreelist() Interface { + if env := os.Getenv(TestFreelistType); env == "hashmap" { + return NewHashMapFreelist() + } + + return NewArrayFreelist() +} diff --git a/freelist_hmap.go b/internal/freelist/hashmap.go similarity index 79% rename from freelist_hmap.go rename to internal/freelist/hashmap.go index c5c09f55e..a6bad8976 100644 --- a/freelist_hmap.go +++ b/internal/freelist/hashmap.go @@ -1,4 +1,4 @@ -package bbolt +package freelist import ( "fmt" @@ -8,26 +8,57 @@ import ( "go.etcd.io/bbolt/internal/common" ) -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - common.Verify(func() { - expectedFreePageCount := f.hashmapFreeCountSlow() - common.Assert(int(f.freePagesCount) == expectedFreePageCount, - "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) - }) - return int(f.freePagesCount) +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size } -func (f *freelist) hashmapFreeCountSlow() int { - count := 0 - for _, size := range f.forwardMap { - count += int(size) +func (f *hashMap) Init(pgids common.Pgids) { + if len(pgids) == 0 { + return } - return count + + size := uint64(1) + start := pgids[0] + // reset the counter when freelist init + f.freePagesCount = 0 + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() } -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid common.Txid, n int) common.Pgid { +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { if n == 0 { return 0 } @@ -74,17 +105,17 @@ func (f *freelist) hashmapAllocate(txid common.Txid, n int) common.Pgid { return 0 } -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []common.Pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) } -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []common.Pgid { - count := f.free_count() +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() if count == 0 { return nil } @@ -108,8 +139,36 @@ func (f *freelist) hashmapGetFreePageIDs() []common.Pgid { return m } -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids common.Pgids) { +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { common.Verify(func() { ids1Freemap := f.idsFromFreemaps() ids2Forward := f.idsFromForwardMap() @@ -144,7 +203,7 @@ func (f *freelist) hashmapMergeSpans(ids common.Pgids) { } // mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid common.Pgid) { +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { prev := pid - 1 next := pid + 1 @@ -171,68 +230,9 @@ func (f *freelist) mergeWithExistingSpan(pid common.Pgid) { f.addSpan(newStart, newSize) } -func (f *freelist) addSpan(start common.Pgid, size uint64) { - f.backwardMap[start-1+common.Pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[common.Pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} - f.freePagesCount += size -} - -func (f *freelist) delSpan(start common.Pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+common.Pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } - f.freePagesCount -= size -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []common.Pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - // reset the counter when freelist init - f.freePagesCount = 0 - - if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[common.Pgid]uint64) - f.backwardMap = make(map[common.Pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} - // idsFromFreemaps get all free page IDs from f.freemaps. // used by test only. -func (f *freelist) idsFromFreemaps() map[common.Pgid]struct{} { +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { ids := make(map[common.Pgid]struct{}) for size, idSet := range f.freemaps { for start := range idSet { @@ -250,7 +250,7 @@ func (f *freelist) idsFromFreemaps() map[common.Pgid]struct{} { // idsFromForwardMap get all free page IDs from f.forwardMap. // used by test only. -func (f *freelist) idsFromForwardMap() map[common.Pgid]struct{} { +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { ids := make(map[common.Pgid]struct{}) for start, size := range f.forwardMap { for i := 0; i < int(size); i++ { @@ -266,7 +266,7 @@ func (f *freelist) idsFromForwardMap() map[common.Pgid]struct{} { // idsFromBackwardMap get all free page IDs from f.backwardMap. // used by test only. -func (f *freelist) idsFromBackwardMap() map[common.Pgid]struct{} { +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { ids := make(map[common.Pgid]struct{}) for end, size := range f.backwardMap { for i := 0; i < int(size); i++ { @@ -279,3 +279,14 @@ func (f *freelist) idsFromBackwardMap() map[common.Pgid]struct{} { } return ids } + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/internal/freelist/hashmap_test.go b/internal/freelist/hashmap_test.go new file mode 100644 index 000000000..32cc5dfa0 --- /dev/null +++ b/internal/freelist/hashmap_test.go @@ -0,0 +1,155 @@ +package freelist + +import ( + "math/rand" + "reflect" + "sort" + "testing" + + "go.etcd.io/bbolt/internal/common" +) + +func TestFreelistHashmap_allocate(t *testing.T) { + f := NewHashMapFreelist() + + ids := []common.Pgid{3, 4, 5, 6, 7, 9, 12, 13, 18} + f.Init(ids) + + f.Allocate(1, 3) + if x := f.FreeCount(); x != 6 { + t.Fatalf("exp=6; got=%v", x) + } + + f.Allocate(1, 2) + if x := f.FreeCount(); x != 4 { + t.Fatalf("exp=4; got=%v", x) + } + f.Allocate(1, 1) + if x := f.FreeCount(); x != 3 { + t.Fatalf("exp=3; got=%v", x) + } + + f.Allocate(1, 0) + if x := f.FreeCount(); x != 3 { + t.Fatalf("exp=3; got=%v", x) + } +} + +func TestFreelistHashmap_mergeWithExist(t *testing.T) { + bm1 := pidSet{1: struct{}{}} + + bm2 := pidSet{5: struct{}{}} + tests := []struct { + name string + ids common.Pgids + pgid common.Pgid + want common.Pgids + wantForwardmap map[common.Pgid]uint64 + wantBackwardmap map[common.Pgid]uint64 + wantfreemap map[uint64]pidSet + }{ + { + name: "test1", + ids: []common.Pgid{1, 2, 4, 5, 6}, + pgid: 3, + want: []common.Pgid{1, 2, 3, 4, 5, 6}, + wantForwardmap: map[common.Pgid]uint64{1: 6}, + wantBackwardmap: map[common.Pgid]uint64{6: 6}, + wantfreemap: map[uint64]pidSet{6: bm1}, + }, + { + name: "test2", + ids: []common.Pgid{1, 2, 5, 6}, + pgid: 3, + want: []common.Pgid{1, 2, 3, 5, 6}, + wantForwardmap: map[common.Pgid]uint64{1: 3, 5: 2}, + wantBackwardmap: map[common.Pgid]uint64{6: 2, 3: 3}, + wantfreemap: map[uint64]pidSet{3: bm1, 2: bm2}, + }, + { + name: "test3", + ids: []common.Pgid{1, 2}, + pgid: 3, + want: []common.Pgid{1, 2, 3}, + wantForwardmap: map[common.Pgid]uint64{1: 3}, + wantBackwardmap: map[common.Pgid]uint64{3: 3}, + wantfreemap: map[uint64]pidSet{3: bm1}, + }, + { + name: "test4", + ids: []common.Pgid{2, 3}, + pgid: 1, + want: []common.Pgid{1, 2, 3}, + wantForwardmap: map[common.Pgid]uint64{1: 3}, + wantBackwardmap: map[common.Pgid]uint64{3: 3}, + wantfreemap: map[uint64]pidSet{3: bm1}, + }, + } + for _, tt := range tests { + f := newTestHashMapFreelist() + f.Init(tt.ids) + + f.mergeWithExistingSpan(tt.pgid) + + if got := f.freePageIds(); !reflect.DeepEqual(tt.want, got) { + t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.want, got) + } + if got := f.forwardMap; !reflect.DeepEqual(tt.wantForwardmap, got) { + t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantForwardmap, got) + } + if got := f.backwardMap; !reflect.DeepEqual(tt.wantBackwardmap, got) { + t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantBackwardmap, got) + } + if got := f.freemaps; !reflect.DeepEqual(tt.wantfreemap, got) { + t.Fatalf("name %s; exp=%v; got=%v", tt.name, tt.wantfreemap, got) + } + } +} + +func TestFreelistHashmap_GetFreePageIDs(t *testing.T) { + f := newTestHashMapFreelist() + + N := int32(100000) + fm := make(map[common.Pgid]uint64) + i := int32(0) + val := int32(0) + for i = 0; i < N; { + val = rand.Int31n(1000) + fm[common.Pgid(i)] = uint64(val) + i += val + f.freePagesCount += uint64(val) + } + + f.forwardMap = fm + res := f.freePageIds() + + if !sort.SliceIsSorted(res, func(i, j int) bool { return res[i] < res[j] }) { + t.Fatalf("pgids not sorted") + } +} + +func Benchmark_freelist_hashmapGetFreePageIDs(b *testing.B) { + f := newTestHashMapFreelist() + N := int32(100000) + fm := make(map[common.Pgid]uint64) + i := int32(0) + val := int32(0) + for i = 0; i < N; { + val = rand.Int31n(1000) + fm[common.Pgid(i)] = uint64(val) + i += val + } + + f.forwardMap = fm + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + f.freePageIds() + } +} + +func newTestHashMapFreelist() *hashMap { + f := NewHashMapFreelist() + return f.(*hashMap) +} diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go new file mode 100644 index 000000000..ac06309df --- /dev/null +++ b/internal/freelist/shared.go @@ -0,0 +1,321 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + if ok { + delete(t.allocs, p.Id()) + } else if p.IsFreelistPage() { + // Freelist is always allocated by prior tx. + allocTxid = txid - 1 + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + var m common.Pgids + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // Freed page was allocated by this txn; OK to throw away. + m = append(m, pgid) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + t.mergeSpans(m) +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + var m common.Pgids + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []common.Pgid + for _, id := range t.freePageIds() { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + var a []common.Pgid + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init(nil) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/node.go b/node.go index fe67c3c89..022b1001e 100644 --- a/node.go +++ b/node.go @@ -316,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.Txid(), tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -493,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } diff --git a/tx.go b/tx.go index 011e2c382..e03db9154 100644 --- a/tx.go +++ b/tx.go @@ -213,7 +213,7 @@ func (tx *Tx) Commit() (err error) { // Free the old freelist because commit writes out a fresh freelist. if tx.meta.Freelist() != common.PgidNoFreelist { - tx.db.freelist.free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { @@ -285,15 +285,13 @@ func (tx *Tx) Commit() (err error) { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } + + tx.db.freelist.Write(p) tx.meta.SetFreelist(p.Id()) return nil @@ -316,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.Txid()) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -327,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.Txid()) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().Freelist())) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -350,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -639,7 +637,7 @@ func (tx *Tx) Page(id int) (*common.PageInfo, error) { } // Determine the type (or if it's free). - if tx.db.freelist.freed(common.Pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { info.Type = p.Typ() diff --git a/tx_check.go b/tx_check.go index 4e3c41ae4..c3ecbb975 100644 --- a/tx_check.go +++ b/tx_check.go @@ -41,8 +41,8 @@ func (tx *Tx) check(cfg checkConfig, ch chan error) { // Check if any pages are double freed. freed := make(map[common.Pgid]bool) - all := make([]common.Pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) From d1cd0deee630863cf950eb0b94f0f42a76f7a70a Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 17 Jul 2024 13:25:50 +0100 Subject: [PATCH 273/439] No need to handle freelist as a specical case when freeing a page Signed-off-by: Benjamin Wang --- internal/freelist/shared.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index ac06309df..9914cc7af 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -67,9 +67,6 @@ func (t *shared) Free(txid common.Txid, p *common.Page) { allocTxid, ok := t.allocs[p.Id()] if ok { delete(t.allocs, p.Id()) - } else if p.IsFreelistPage() { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 } for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { From 7b031d53c9f4919675fdf9c0c6f6210be31d3c25 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 17 Jul 2024 19:51:42 +0100 Subject: [PATCH 274/439] add test case to verify freelist in case of TXN rollback Signed-off-by: Benjamin Wang --- internal/guts_cli/guts_cli.go | 17 +++-- tests/failpoint/db_failpoint_test.go | 96 ++++++++++++++++++++++++++++ tx.go | 3 + 3 files changed, 112 insertions(+), 4 deletions(-) diff --git a/internal/guts_cli/guts_cli.go b/internal/guts_cli/guts_cli.go index 20b74b081..3ecfdeeaa 100644 --- a/internal/guts_cli/guts_cli.go +++ b/internal/guts_cli/guts_cli.go @@ -114,19 +114,28 @@ func ReadPageAndHWMSize(path string) (uint64, common.Pgid, error) { // GetRootPage returns the root-page (according to the most recent transaction). func GetRootPage(path string) (root common.Pgid, activeMeta common.Pgid, err error) { + m, id, err := GetActiveMetaPage(path) + if err != nil { + return 0, id, err + } + return m.RootBucket().RootPage(), id, nil +} + +// GetActiveMetaPage returns the active meta page and its page ID (0 or 1). +func GetActiveMetaPage(path string) (*common.Meta, common.Pgid, error) { _, buf0, err0 := ReadPage(path, 0) if err0 != nil { - return 0, 0, err0 + return nil, 0, err0 } m0 := common.LoadPageMeta(buf0) _, buf1, err1 := ReadPage(path, 1) if err1 != nil { - return 0, 1, err1 + return nil, 1, err1 } m1 := common.LoadPageMeta(buf1) if m0.Txid() < m1.Txid() { - return m1.RootBucket().RootPage(), 1, nil + return m1, 1, nil } else { - return m0.RootBucket().RootPage(), 0, nil + return m0, 0, nil } } diff --git a/tests/failpoint/db_failpoint_test.go b/tests/failpoint/db_failpoint_test.go index 3255ba21c..e12566d3e 100644 --- a/tests/failpoint/db_failpoint_test.go +++ b/tests/failpoint/db_failpoint_test.go @@ -1,6 +1,7 @@ package failpoint import ( + crand "crypto/rand" "fmt" "path/filepath" "testing" @@ -11,6 +12,8 @@ import ( bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" gofail "go.etcd.io/gofail/runtime" ) @@ -267,6 +270,99 @@ func TestIssue72(t *testing.T) { require.NoError(t, err) } +func TestTx_Rollback_Freelist(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + + bucketName := []byte("data") + + t.Log("Populate some data to have at least 5 leaf pages.") + var keys []string + err := db.Update(func(tx *bolt.Tx) error { + b, terr := tx.CreateBucket(bucketName) + if terr != nil { + return terr + } + for i := 0; i <= 10; i++ { + k := fmt.Sprintf("t1_k%02d", i) + keys = append(keys, k) + + v := make([]byte, 1500) + if _, terr := crand.Read(v); terr != nil { + return terr + } + + if terr := b.Put([]byte(k), v); terr != nil { + return terr + } + } + return nil + }) + require.NoError(t, err) + + t.Log("Remove some keys to have at least 3 more free pages.") + err = db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + for i := 0; i < 6; i++ { + if terr := b.Delete([]byte(keys[i])); terr != nil { + return terr + } + } + return nil + }) + require.NoError(t, err) + + t.Log("Close and then reopen the db to release all pending free pages.") + db.MustClose() + db.MustReopen() + + t.Log("Enable the `beforeWriteMetaError` failpoint.") + require.NoError(t, gofail.Enable("beforeWriteMetaError", `return("writeMeta somehow failed")`)) + defer func() { + t.Log("Disable the `beforeWriteMetaError` failpoint.") + require.NoError(t, gofail.Disable("beforeWriteMetaError")) + }() + + beforeFreelistPgids, err := readFreelistPageIds(db.Path()) + require.NoError(t, err) + require.Greater(t, len(beforeFreelistPgids), 0) + + t.Log("Simulate TXN rollback") + err = db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + for i := 6; i < len(keys); i++ { + v := make([]byte, 1500) + if _, terr := crand.Read(v); terr != nil { + return terr + } + // update the keys + if terr := b.Put([]byte(keys[i]), v); terr != nil { + return terr + } + } + return nil + }) + require.Error(t, err) + + afterFreelistPgids, err := readFreelistPageIds(db.Path()) + require.NoError(t, err) + + require.Equal(t, beforeFreelistPgids, afterFreelistPgids) +} + func idToBytes(id int) []byte { return []byte(fmt.Sprintf("%010d", id)) } + +func readFreelistPageIds(path string) ([]common.Pgid, error) { + m, _, err := guts_cli.GetActiveMetaPage(path) + if err != nil { + return nil, err + } + + p, _, err := guts_cli.ReadPage(path, uint64(m.Freelist())) + if err != nil { + return nil, err + } + + return p.FreelistPageIds(), nil +} diff --git a/tx.go b/tx.go index e03db9154..7b5db7727 100644 --- a/tx.go +++ b/tx.go @@ -551,6 +551,9 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) From ce50f55c6ead8cb3ab5443bd826788558259c709 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Mon, 22 Jul 2024 16:20:01 +0200 Subject: [PATCH 275/439] ensure hashmap init clears maps This reorders some statements in the hashmap initialization to ensure we always start fresh, even when no pageids were passed to it. fixes #791 Signed-off-by: Thomas Jungblut --- internal/freelist/hashmap.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/internal/freelist/hashmap.go b/internal/freelist/hashmap.go index a6bad8976..8d471f4b5 100644 --- a/internal/freelist/hashmap.go +++ b/internal/freelist/hashmap.go @@ -21,22 +21,22 @@ type hashMap struct { } func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + if len(pgids) == 0 { return } - size := uint64(1) - start := pgids[0] - // reset the counter when freelist init - f.freePagesCount = 0 - if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { panic("pgids not sorted") } - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[common.Pgid]uint64) - f.backwardMap = make(map[common.Pgid]uint64) + size := uint64(1) + start := pgids[0] for i := 1; i < len(pgids); i++ { // continuous page @@ -117,7 +117,7 @@ func (f *hashMap) FreeCount() int { func (f *hashMap) freePageIds() common.Pgids { count := f.FreeCount() if count == 0 { - return nil + return common.Pgids{} } m := make([]common.Pgid, 0, count) From f4de460a1b974631abb7f33cfc9844dae3ffffe9 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Mon, 22 Jul 2024 16:20:19 +0200 Subject: [PATCH 276/439] add testcases for hashmap init This also rectifies a bunch of nil/empty differences between the implementation that show up during init and page releases. Signed-off-by: Thomas Jungblut --- internal/freelist/array.go | 1 + internal/freelist/freelist_test.go | 91 ++++++++++++++++++++++-------- internal/freelist/shared.go | 2 +- 3 files changed, 68 insertions(+), 26 deletions(-) diff --git a/internal/freelist/array.go b/internal/freelist/array.go index 93ccc5edc..0cc1ba715 100644 --- a/internal/freelist/array.go +++ b/internal/freelist/array.go @@ -101,6 +101,7 @@ func (f *array) mergeSpans(ids common.Pgids) { func NewArrayFreelist() Interface { a := &array{ shared: newShared(), + ids: []common.Pgid{}, } a.Interface = a return a diff --git a/internal/freelist/freelist_test.go b/internal/freelist/freelist_test.go index df7c7697e..181e0932e 100644 --- a/internal/freelist/freelist_test.go +++ b/internal/freelist/freelist_test.go @@ -8,6 +8,8 @@ import ( "testing" "unsafe" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt/internal/common" ) @@ -85,7 +87,7 @@ func TestFreelist_releaseRange(t *testing.T) { title: "Single pending outsize minimum end range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, releaseRanges: []testRange{{1, 199}}, - wantFree: nil, + wantFree: []common.Pgid{}, }, { title: "Single pending with minimum begin range", @@ -97,7 +99,7 @@ func TestFreelist_releaseRange(t *testing.T) { title: "Single pending outside minimum begin range", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 100, freeTxn: 200}}, releaseRanges: []testRange{{101, 300}}, - wantFree: nil, + wantFree: []common.Pgid{}, }, { title: "Single pending in minimum range", @@ -109,7 +111,7 @@ func TestFreelist_releaseRange(t *testing.T) { title: "Single pending and read transaction at 199", pagesIn: []testPage{{id: 3, n: 1, allocTxn: 199, freeTxn: 200}}, releaseRanges: []testRange{{100, 198}, {200, 300}}, - wantFree: nil, + wantFree: []common.Pgid{}, }, { title: "Adjacent pending and read transactions at 199, 200", @@ -122,7 +124,7 @@ func TestFreelist_releaseRange(t *testing.T) { {200, 199}, // Simulate the ranges db.freePages might produce. {201, 300}, }, - wantFree: nil, + wantFree: []common.Pgid{}, }, { title: "Out of order ranges", @@ -135,7 +137,7 @@ func TestFreelist_releaseRange(t *testing.T) { {201, 200}, {200, 200}, }, - wantFree: nil, + wantFree: []common.Pgid{}, }, { title: "Multiple pending, read transaction at 150", @@ -153,32 +155,71 @@ func TestFreelist_releaseRange(t *testing.T) { } for _, c := range releaseRangeTests { - f := newTestFreelist() - var ids []common.Pgid - for _, p := range c.pagesIn { - for i := uint64(0); i < uint64(p.n); i++ { - ids = append(ids, common.Pgid(uint64(p.id)+i)) + t.Run(c.title, func(t *testing.T) { + f := newTestFreelist() + var ids []common.Pgid + for _, p := range c.pagesIn { + for i := uint64(0); i < uint64(p.n); i++ { + ids = append(ids, common.Pgid(uint64(p.id)+i)) + } + } + f.Init(ids) + for _, p := range c.pagesIn { + f.Allocate(p.allocTxn, p.n) } - } - f.Init(ids) - for _, p := range c.pagesIn { - f.Allocate(p.allocTxn, p.n) - } - for _, p := range c.pagesIn { - f.Free(p.freeTxn, common.NewPage(p.id, 0, 0, uint32(p.n-1))) - } + for _, p := range c.pagesIn { + f.Free(p.freeTxn, common.NewPage(p.id, 0, 0, uint32(p.n-1))) + } - for _, r := range c.releaseRanges { - f.releaseRange(r.begin, r.end) - } + for _, r := range c.releaseRanges { + f.releaseRange(r.begin, r.end) + } - if exp := common.Pgids(c.wantFree); !reflect.DeepEqual(exp, f.freePageIds()) { - t.Errorf("exp=%v; got=%v for %s", exp, f.freePageIds(), c.title) - } + require.Equal(t, common.Pgids(c.wantFree), f.freePageIds()) + }) } } +func TestFreeList_init(t *testing.T) { + buf := make([]byte, 4096) + f := newTestFreelist() + f.Init(common.Pgids{5, 6, 8}) + + p := common.LoadPage(buf) + f.Write(p) + + f2 := newTestFreelist() + f2.Read(p) + require.Equal(t, common.Pgids{5, 6, 8}, f2.freePageIds()) + + // When initializing the freelist with an empty list of page ID, + // it should reset the freelist page IDs. + f2.Init([]common.Pgid{}) + require.Equal(t, common.Pgids{}, f2.freePageIds()) +} + +func TestFreeList_reload(t *testing.T) { + buf := make([]byte, 4096) + f := newTestFreelist() + f.Init(common.Pgids{5, 6, 8}) + + p := common.LoadPage(buf) + f.Write(p) + + f2 := newTestFreelist() + f2.Read(p) + require.Equal(t, common.Pgids{5, 6, 8}, f2.freePageIds()) + + f2.Free(common.Txid(5), common.NewPage(10, common.LeafPageFlag, 0, 2)) + + // reload shouldn't affect the pending list + f2.Reload(p) + + require.Equal(t, common.Pgids{5, 6, 8}, f2.freePageIds()) + require.Equal(t, []common.Pgid{10, 11, 12}, f2.pendingPageIds()[5].ids) +} + // Ensure that a freelist can deserialize from a freelist page. func TestFreelist_read(t *testing.T) { // Create a page. @@ -263,7 +304,7 @@ func Test_freelist_ReadIDs_and_getFreePageIDs(t *testing.T) { } f2 := newTestFreelist() - var exp2 []common.Pgid + exp2 := []common.Pgid{} f2.Init(exp2) if got2 := f2.freePageIds(); !reflect.DeepEqual(got2, common.Pgids(exp2)) { diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index ac06309df..ba405239c 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -167,7 +167,7 @@ func (t *shared) releaseRange(begin, end common.Txid) { if begin > end { return } - var m common.Pgids + m := common.Pgids{} for tid, txp := range t.pending { if tid < begin || tid > end { continue From 2f15c08377d0bf1aff88ae65a79c12d932110d05 Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Tue, 9 Jul 2024 16:22:42 +0200 Subject: [PATCH 277/439] add nightly benchmark This should ensure we don't creep little percentages over the course of multiple commits into main, compared to the last release branch. Signed-off-by: Thomas Jungblut --- .github/workflows/benchmark-pr.yaml | 43 ++--------------- .github/workflows/benchmark-releases.yaml | 13 ++++++ .github/workflows/benchmark-template.yaml | 57 +++++++++++++++++++++++ 3 files changed, 75 insertions(+), 38 deletions(-) create mode 100644 .github/workflows/benchmark-releases.yaml create mode 100644 .github/workflows/benchmark-template.yaml diff --git a/.github/workflows/benchmark-pr.yaml b/.github/workflows/benchmark-pr.yaml index fb6728d46..95de955fd 100644 --- a/.github/workflows/benchmark-pr.yaml +++ b/.github/workflows/benchmark-pr.yaml @@ -1,42 +1,9 @@ --- -name: Benchmarks on AMD64 +name: Benchmarks on PRs (AMD64) permissions: read-all on: [pull_request] jobs: - benchmark-pull-request: - runs-on: ubuntu-latest-8-cores - steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - with: - fetch-depth: 0 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run Benchmarks - run: | - BENCHSTAT_OUTPUT_FILE=result.txt make test-benchmark-compare REF=${{ github.event.pull_request.base.sha }} - - run: | - echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" - cat result.txt >> "$GITHUB_STEP_SUMMARY" - echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" - cat <> "$GITHUB_STEP_SUMMARY" -
- The table shows the median and 90% confidence interval (CI) summaries for each benchmark comparing the HEAD and the BASE of the pull request, and an A/B comparison under "vs base". The last column shows the statistical p-value with ten runs (n=10). - The last row has the Geometric Mean (geomean) for the given rows in the table. - Refer to [benchstat's documentation](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) for more help. - EOL - - name: Validate results under acceptable limit - run: | - export MAX_ACCEPTABLE_DIFFERENCE=5 - while IFS= read -r line; do - # Get fourth value, which is the comparison with the base. - value="$(echo "$line" | awk '{print $4}')" - if [[ "$value" = +* ]] || [[ "$value" = -* ]]; then - if (( $(echo "${value//[^0-9.]/}"'>'"$MAX_ACCEPTABLE_DIFFERENCE" | bc -l) )); then - echo "::error::$value is above the maximum acceptable difference ($MAX_ACCEPTABLE_DIFFERENCE)" - exit 1 - fi - fi - done < <(grep geomean result.txt) + amd64: + uses: ./.github/workflows/benchmark-template.yaml + with: + benchGitRef: ${{ github.event.pull_request.base.sha }} diff --git a/.github/workflows/benchmark-releases.yaml b/.github/workflows/benchmark-releases.yaml new file mode 100644 index 000000000..6cc1c1f8f --- /dev/null +++ b/.github/workflows/benchmark-releases.yaml @@ -0,0 +1,13 @@ +--- +name: Nightly Benchmarks against last release (AMD64) +permissions: read-all +on: + schedule: + - cron: '10 5 * * *' # runs every day at 05:10 UTC + # workflow_dispatch enables manual testing of this job by maintainers + workflow_dispatch: +jobs: + amd64: + uses: ./.github/workflows/benchmark-template.yaml + with: + benchGitRef: release-1.3 diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml new file mode 100644 index 000000000..08adf44eb --- /dev/null +++ b/.github/workflows/benchmark-template.yaml @@ -0,0 +1,57 @@ +--- +name: Reusable Benchmark Template +on: + workflow_call: + inputs: + # which git reference to benchmark against + benchGitRef: + required: true + type: string + maxAcceptableDifferencePercent: + required: false + type: number + default: 5 + runs-on: + required: false + type: string + default: "['ubuntu-latest-8-cores']" +permissions: read-all + +jobs: + benchmark: + runs-on: ${{ fromJson(inputs.runs-on) }} + steps: + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + with: + fetch-depth: 0 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run Benchmarks + run: | + BENCHSTAT_OUTPUT_FILE=result.txt make test-benchmark-compare REF=${{ inputs.benchGitRef }} + - run: | + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + cat result.txt >> "$GITHUB_STEP_SUMMARY" + echo "\`\`\`" >> "$GITHUB_STEP_SUMMARY" + cat <> "$GITHUB_STEP_SUMMARY" +
+ The table shows the median and 90% confidence interval (CI) summaries for each benchmark comparing the HEAD and the BASE, and an A/B comparison under "vs base". The last column shows the statistical p-value with ten runs (n=10). + The last row has the Geometric Mean (geomean) for the given rows in the table. + Refer to [benchstat's documentation](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) for more help. + EOL + - name: Validate results under acceptable limit + run: | + export MAX_ACCEPTABLE_DIFFERENCE=${{ inputs.maxAcceptableDifferencePercent }} + while IFS= read -r line; do + # Get fourth value, which is the comparison with the base. + value="$(echo "$line" | awk '{print $4}')" + if [[ "$value" = +* ]] || [[ "$value" = -* ]]; then + if (( $(echo "${value//[^0-9.]/}"'>'"$MAX_ACCEPTABLE_DIFFERENCE" | bc -l) )); then + echo "::error::$value is above the maximum acceptable difference ($MAX_ACCEPTABLE_DIFFERENCE)" + exit 1 + fi + fi + done < <(grep geomean result.txt) From da1c83cbeb3e76f337db48daf7b73fac5fdc600d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 22 Jul 2024 13:01:23 +0100 Subject: [PATCH 278/439] panicking when a write txn tries to free a page which was allocated by itself Signed-off-by: Benjamin Wang --- internal/freelist/shared.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index c70faf8bf..008483441 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -65,6 +65,11 @@ func (t *shared) Free(txid common.Txid, p *common.Page) { t.pending[txid] = txp } allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) if ok { delete(t.allocs, p.Id()) } @@ -87,7 +92,6 @@ func (t *shared) Rollback(txid common.Txid) { if txp == nil { return } - var m common.Pgids for i, pgid := range txp.ids { delete(t.cache, pgid) tx := txp.alloctx[i] @@ -98,13 +102,12 @@ func (t *shared) Rollback(txid common.Txid) { // Pending free aborted; restore page back to alloc list. t.allocs[pgid] = tx } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) } } // Remove pages from pending list and mark as free if allocated by txid. delete(t.pending, txid) - t.mergeSpans(m) } func (t *shared) AddReadonlyTXID(tid common.Txid) { From 5378ea12fd1a8ce0c844d32835647f107363bf43 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 30 Jul 2024 08:58:49 -0700 Subject: [PATCH 279/439] github/workflows: pin dependency versions Signed-off-by: Ivan Valdes --- .github/workflows/failpoint_test.yaml | 4 ++-- .github/workflows/robustness_template.yaml | 4 ++-- .github/workflows/stale.yaml | 2 +- .github/workflows/tests-template.yml | 4 ++-- .github/workflows/tests_amd64.yaml | 4 ++-- .github/workflows/tests_windows.yml | 8 ++++---- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 4de9c5008..685e40ae5 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,10 +9,10 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 132a804ed..baa5794fb 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,10 +23,10 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index adef90226..f00b33dfb 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v9 + - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 with: days-before-stale: 90 days-before-close: 21 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 59f5b05c3..d4ce77355 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,10 +23,10 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index c174565ca..744530218 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,10 +17,10 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest-8-cores steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 406a8b11a..6c5f6fc11 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -18,10 +18,10 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -45,10 +45,10 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@v5 + - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From 83aba89820b4890c39d8083419d78b622912b819 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Fri, 2 Aug 2024 10:37:02 -0700 Subject: [PATCH 280/439] github/workflows: set top-level file permissions The gh-workflow-approve and tests_windows actions didnt't specify top-level permissions. This is an improvement towards having a better OpenSSF Scorecard Report score. Signed-off-by: Ivan Valdes --- .github/workflows/gh-workflow-approve.yaml | 2 +- .github/workflows/tests_windows.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml index fa1fdd12d..4da2e4f79 100644 --- a/.github/workflows/gh-workflow-approve.yaml +++ b/.github/workflows/gh-workflow-approve.yaml @@ -1,6 +1,6 @@ --- name: Approve GitHub Workflows - +permissions: read-all on: pull_request_target: types: diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 6c5f6fc11..d354fd49c 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -1,6 +1,7 @@ --- name: Tests on: [push, pull_request] +permissions: read-all jobs: test-windows: strategy: From 6f4e0e5c5cec8ce16fc9dd97c86b4980dc995f6a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:44:44 +0000 Subject: [PATCH 281/439] build(deps): Bump golangci/golangci-lint-action from 6.0.1 to 6.1.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/a4f60bb28d35aeee14e6880718e0c85ff1882e64...aaa42aa0628b4ae2578232a66b541047968fac86) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index d4ce77355..14f691e58 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,4 +52,4 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index d354fd49c..e099b13f5 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@a4f60bb28d35aeee14e6880718e0c85ff1882e64 # v6.0.1 + uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 coverage: needs: ["test-windows"] From 8a64275c926d84bfbd07cfabe278b9f66b6fb39f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:44:48 +0000 Subject: [PATCH 282/439] build(deps): Bump actions/checkout from 4.1.1 to 4.1.7 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.1 to 4.1.7. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4.1.1...692973e3d937129bcbf40652eb9f2f61becf3332) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 08adf44eb..cbea4b1e3 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -21,7 +21,7 @@ jobs: benchmark: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: fetch-depth: 0 - id: goversion diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 685e40ae5..3e7eeb9e1 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index baa5794fb..298edf5f7 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index d4ce77355..f9e9a4528 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,7 +23,7 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 744530218..59d2d3be8 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,7 +17,7 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest-8-cores steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index d354fd49c..2b3ce02ef 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -19,7 +19,7 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 @@ -46,7 +46,7 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 From 88233131eb90b8fa4df2c8876e299abf6d9957f3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 14:54:02 +0000 Subject: [PATCH 283/439] build(deps): Bump golang.org/x/sync from 0.7.0 to 0.8.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.7.0 to 0.8.0. - [Commits](https://github.com/golang/sync/compare/v0.7.0...v0.8.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 410bd1469..ff0c9c72d 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.7.0 + golang.org/x/sync v0.8.0 golang.org/x/sys v0.22.0 ) diff --git a/go.sum b/go.sum index a636cca70..876a49bc6 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 6f8410bbb0c5d4622968da0376b14df9125d8b89 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 20:38:04 +0000 Subject: [PATCH 284/439] build(deps): Bump actions/setup-go from 5.0.0 to 5.0.2 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.0 to 5.0.2. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0c52d547c9bc32b1aa3301fd7a9cb496313a4491...0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index cbea4b1e3..83283ef38 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 3e7eeb9e1..0bcac6152 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 298edf5f7..5ce49737b 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index f44891d75..3d34476d5 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 59d2d3be8..0dd355eef 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index fcc52f979..d03b6cc0b 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -49,7 +49,7 @@ jobs: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From f950a8814856b01d192eeb2e53cd87037ab36cb0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 Aug 2024 20:49:58 +0000 Subject: [PATCH 285/439] build(deps): Bump golang.org/x/sys from 0.22.0 to 0.23.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.22.0 to 0.23.0. - [Commits](https://github.com/golang/sys/compare/v0.22.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ff0c9c72d..b29c4abf8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.22.0 + golang.org/x/sys v0.23.0 ) require ( diff --git a/go.sum b/go.sum index 876a49bc6..974bb4e29 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 33b71a50af6bdb9dfcf0b79fa2b4ed378e5b679d Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Mon, 5 Aug 2024 10:49:05 +0200 Subject: [PATCH 286/439] Dedupe Reload/NoSyncReload, prefer empty instead of nil init Reload and NoSyncReload have duplicated code, this unifies both for later refactoring. This PR is split from #786, where the tests found differences on reloading and nil/empty initializations. Added some more clarifications in godocs for certain panic behavior and expected returns on the interface. Signed-off-by: Thomas Jungblut --- internal/freelist/freelist.go | 6 +++--- internal/freelist/shared.go | 24 +++--------------------- 2 files changed, 6 insertions(+), 24 deletions(-) diff --git a/internal/freelist/freelist.go b/internal/freelist/freelist.go index 3d77d8f94..2b819506b 100644 --- a/internal/freelist/freelist.go +++ b/internal/freelist/freelist.go @@ -11,7 +11,7 @@ type ReadWriter interface { // Write writes the freelist into the given page. Write(page *common.Page) - // EstimatedWritePageSize returns the size of the freelist after serialization in Write. + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. // This should never underestimate the size. EstimatedWritePageSize() int } @@ -46,7 +46,7 @@ type Interface interface { ReleasePendingPages() // Free releases a page and its overflow for a given transaction id. - // If the page is already free then a panic will occur. + // If the page is already free or is one of the meta pages, then a panic will occur. Free(txId common.Txid, p *common.Page) // Freed returns whether a given page is in the free list. @@ -65,7 +65,7 @@ type Interface interface { // NoSyncReload reads the freelist from Pgids and filters out pending items. NoSyncReload(pgIds common.Pgids) - // freePageIds returns the IDs of all free pages. + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. freePageIds() common.Pgids // pendingPageIds returns all pending pages by transaction id. diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index 008483441..16a5b3286 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -208,25 +208,7 @@ func (t *shared) Copyall(dst []common.Pgid) { func (t *shared) Reload(p *common.Page) { t.Read(p) - - // Build a cache of only pending pages. - pcache := make(map[common.Pgid]bool) - for _, txp := range t.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []common.Pgid - for _, id := range t.freePageIds() { - if !pcache[id] { - a = append(a, id) - } - } - - t.Init(a) + t.NoSyncReload(t.freePageIds()) } func (t *shared) NoSyncReload(pgIds common.Pgids) { @@ -240,7 +222,7 @@ func (t *shared) NoSyncReload(pgIds common.Pgids) { // Check each page in the freelist and build a new available freelist // with any pages not in the pending lists. - var a []common.Pgid + a := []common.Pgid{} for _, id := range pgIds { if !pcache[id] { a = append(a, id) @@ -274,7 +256,7 @@ func (t *shared) Read(p *common.Page) { // Copy the list of page ids from the freelist. if len(ids) == 0 { - t.Init(nil) + t.Init([]common.Pgid{}) } else { // copy the ids, so we don't modify on the freelist page directly idsCopy := make([]common.Pgid, len(ids)) From 4272a9c897d3162c26f6efc4930438b76fc9663c Mon Sep 17 00:00:00 2001 From: Jeffrey Sica Date: Wed, 7 Aug 2024 08:53:55 -0500 Subject: [PATCH 287/439] Update GitHub runners to use ubuntu-latest since they have nested virt (#811) * update github runners to use ubuntu-latest since they are 4c and have nested virt Signed-off-by: Jeffrey Sica --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/robustness_nightly.yaml | 2 +- .github/workflows/robustness_test.yaml | 2 +- .github/workflows/tests_amd64.yaml | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 83283ef38..caca4d4b1 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -14,7 +14,7 @@ on: runs-on: required: false type: string - default: "['ubuntu-latest-8-cores']" + default: "['ubuntu-latest']" permissions: read-all jobs: diff --git a/.github/workflows/robustness_nightly.yaml b/.github/workflows/robustness_nightly.yaml index 96a519afa..0812f6230 100644 --- a/.github/workflows/robustness_nightly.yaml +++ b/.github/workflows/robustness_nightly.yaml @@ -14,7 +14,7 @@ jobs: with: count: 100 testTimeout: 200m - runs-on: "['ubuntu-latest-8-cores']" + runs-on: "['ubuntu-latest']" arm64: # GHA has a maximum amount of 6h execution time, we try to get done within 3h uses: ./.github/workflows/robustness_template.yaml diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index 4d6afd9e8..1e0aa5007 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -7,7 +7,7 @@ jobs: with: count: 10 testTimeout: 30m - runs-on: "['ubuntu-latest-8-cores']" + runs-on: "['ubuntu-latest']" arm64: uses: ./.github/workflows/robustness_template.yaml with: diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 0dd355eef..08e442cd4 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -8,14 +8,14 @@ jobs: test-linux-amd64-race: uses: ./.github/workflows/tests-template.yml with: - runs-on: ubuntu-latest-8-cores + runs-on: ubuntu-latest targets: "['linux-unit-test-4-cpu-race']" coverage: needs: - test-linux-amd64 - test-linux-amd64-race - runs-on: ubuntu-latest-8-cores + runs-on: ubuntu-latest steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - id: goversion From f2559dcecb01fc98e09c30d03f7b814e98ad5022 Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Thu, 8 Aug 2024 21:54:03 +0200 Subject: [PATCH 288/439] go version bump from 1.22.5 to 1.22.6 Reference: https://github.com/etcd-io/etcd/issues/18419 Signed-off-by: Chun-Hung Tseng --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index da9594fd6..013173af5 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.5 +1.22.6 diff --git a/go.mod b/go.mod index b29c4abf8..ea35d8ace 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.22 -toolchain go1.22.5 +toolchain go1.22.6 require ( github.com/spf13/cobra v1.8.1 From 5831e3eb622a202d24572ebadac1cad47ebf359d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:18:53 +0000 Subject: [PATCH 289/439] build(deps): Bump golang.org/x/sys from 0.23.0 to 0.24.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.23.0 to 0.24.0. - [Commits](https://github.com/golang/sys/compare/v0.23.0...v0.24.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ea35d8ace..d193e4a6e 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.23.0 + golang.org/x/sys v0.24.0 ) require ( diff --git a/go.sum b/go.sum index 974bb4e29..36dbfae10 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 257eadc0543164d1a524ffa10469cd6dcc3dfda8 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 12 Aug 2024 15:57:07 +0100 Subject: [PATCH 290/439] rollback alloc map: remove all page ids which are allocated by the txid Signed-off-by: Benjamin Wang --- internal/freelist/array_test.go | 29 +++++++++++++++++++++++++++++ internal/freelist/hashmap_test.go | 24 ++++++++++++++++++++++++ internal/freelist/shared.go | 7 +++++++ 3 files changed, 60 insertions(+) diff --git a/internal/freelist/array_test.go b/internal/freelist/array_test.go index 31b0702dc..4d1306102 100644 --- a/internal/freelist/array_test.go +++ b/internal/freelist/array_test.go @@ -4,6 +4,8 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt/internal/common" ) @@ -50,3 +52,30 @@ func TestFreelistArray_allocate(t *testing.T) { t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) } } + +func Test_Freelist_Array_Rollback(t *testing.T) { + f := newTestArrayFreelist() + + f.Init([]common.Pgid{3, 5, 6, 7, 12, 13}) + + f.Free(100, common.NewPage(20, 0, 0, 1)) + f.Allocate(100, 3) + f.Free(100, common.NewPage(25, 0, 0, 0)) + f.Allocate(100, 2) + + require.Equal(t, map[common.Pgid]common.Txid{5: 100, 12: 100}, f.allocs) + require.Equal(t, map[common.Txid]*txPending{100: { + ids: []common.Pgid{20, 21, 25}, + alloctx: []common.Txid{0, 0, 0}, + }}, f.pending) + + f.Rollback(100) + + require.Equal(t, map[common.Pgid]common.Txid{}, f.allocs) + require.Equal(t, map[common.Txid]*txPending{}, f.pending) +} + +func newTestArrayFreelist() *array { + f := NewArrayFreelist() + return f.(*array) +} diff --git a/internal/freelist/hashmap_test.go b/internal/freelist/hashmap_test.go index 32cc5dfa0..c77a05800 100644 --- a/internal/freelist/hashmap_test.go +++ b/internal/freelist/hashmap_test.go @@ -6,6 +6,8 @@ import ( "sort" "testing" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt/internal/common" ) @@ -128,6 +130,28 @@ func TestFreelistHashmap_GetFreePageIDs(t *testing.T) { } } +func Test_Freelist_Hashmap_Rollback(t *testing.T) { + f := newTestHashMapFreelist() + + f.Init([]common.Pgid{3, 5, 6, 7, 12, 13}) + + f.Free(100, common.NewPage(20, 0, 0, 1)) + f.Allocate(100, 3) + f.Free(100, common.NewPage(25, 0, 0, 0)) + f.Allocate(100, 2) + + require.Equal(t, map[common.Pgid]common.Txid{5: 100, 12: 100}, f.allocs) + require.Equal(t, map[common.Txid]*txPending{100: { + ids: []common.Pgid{20, 21, 25}, + alloctx: []common.Txid{0, 0, 0}, + }}, f.pending) + + f.Rollback(100) + + require.Equal(t, map[common.Pgid]common.Txid{}, f.allocs) + require.Equal(t, map[common.Txid]*txPending{}, f.pending) +} + func Benchmark_freelist_hashmapGetFreePageIDs(b *testing.B) { f := newTestHashMapFreelist() N := int32(100000) diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index 16a5b3286..f2d113008 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -108,6 +108,13 @@ func (t *shared) Rollback(txid common.Txid) { } // Remove pages from pending list and mark as free if allocated by txid. delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } } func (t *shared) AddReadonlyTXID(tid common.Txid) { From e52dec6c83b696424a0a23b8ae327686928104c9 Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Thu, 15 Aug 2024 00:00:45 +0200 Subject: [PATCH 291/439] Bump go toolchain to 1.23.0 Reference: - https://github.com/etcd-io/etcd/issues/18443 Signed-off-by: Chun-Hung Tseng --- .go-version | 2 +- go.mod | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index 013173af5..a6c2798a4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.6 +1.23.0 diff --git a/go.mod b/go.mod index d193e4a6e..347d9fc48 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module go.etcd.io/bbolt -go 1.22 +go 1.23 -toolchain go1.22.6 +toolchain go1.23.0 require ( github.com/spf13/cobra v1.8.1 From 406670b70eda5f7440367d99732d13980955017c Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Thu, 15 Aug 2024 00:28:10 +0200 Subject: [PATCH 292/439] Update golangci-lint to v1.60.1 Reference: - https://github.com/golangci/golangci-lint/releases/tag/v1.60.1 Signed-off-by: Chun-Hung Tseng --- .github/workflows/tests-template.yml | 2 ++ .github/workflows/tests_windows.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 3d34476d5..39fc4082d 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -53,3 +53,5 @@ jobs: esac - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + with: + version: v1.60.1 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index d03b6cc0b..ed96d9d16 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -41,6 +41,8 @@ jobs: shell: bash - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + with: + version: v1.60.1 coverage: needs: ["test-windows"] From a4adf65d6edcaf2f8a9fc4bab4a305b52465daf3 Mon Sep 17 00:00:00 2001 From: Chun-Hung Tseng Date: Fri, 16 Aug 2024 17:12:29 +0200 Subject: [PATCH 293/439] Fix linter reported issues Signed-off-by: Chun-Hung Tseng --- db.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/db.go b/db.go index 349f187ae..5c1947e99 100644 --- a/db.go +++ b/db.go @@ -545,7 +545,7 @@ func (db *DB) munmap() error { // return errors.New(unmapError) if err := munmap(db); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) - return fmt.Errorf("unmap error: " + err.Error()) + return fmt.Errorf("unmap error: %v", err.Error()) } return nil @@ -593,7 +593,7 @@ func (db *DB) munlock(fileSize int) error { // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) - return fmt.Errorf("munlock error: " + err.Error()) + return fmt.Errorf("munlock error: %v", err.Error()) } return nil } @@ -603,7 +603,7 @@ func (db *DB) mlock(fileSize int) error { // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) - return fmt.Errorf("mlock error: " + err.Error()) + return fmt.Errorf("mlock error: %v", err.Error()) } return nil } From 3ce0fd0ad500baa32032f8d7a36f31adee31855a Mon Sep 17 00:00:00 2001 From: Thomas Jungblut Date: Tue, 20 Aug 2024 10:44:09 +0200 Subject: [PATCH 294/439] add freelist interface unit tests adding more unit tests for better coverage of the interface. Signed-off-by: Thomas Jungblut --- internal/freelist/array_test.go | 10 + internal/freelist/freelist_test.go | 299 +++++++++++++++++++++++++++++ internal/freelist/hashmap_test.go | 8 + 3 files changed, 317 insertions(+) diff --git a/internal/freelist/array_test.go b/internal/freelist/array_test.go index 4d1306102..904123042 100644 --- a/internal/freelist/array_test.go +++ b/internal/freelist/array_test.go @@ -53,6 +53,16 @@ func TestFreelistArray_allocate(t *testing.T) { } } +func TestInvalidArrayAllocation(t *testing.T) { + f := NewArrayFreelist() + // page 0 and 1 are reserved for meta pages, so they should never be free pages. + ids := []common.Pgid{1} + f.Init(ids) + require.Panics(t, func() { + f.Allocate(common.Txid(1), 1) + }) +} + func Test_Freelist_Array_Rollback(t *testing.T) { f := newTestArrayFreelist() diff --git a/internal/freelist/freelist_test.go b/internal/freelist/freelist_test.go index 181e0932e..12ac4b6b2 100644 --- a/internal/freelist/freelist_test.go +++ b/internal/freelist/freelist_test.go @@ -1,11 +1,15 @@ package freelist import ( + "fmt" + "math" "math/rand" "os" "reflect" + "slices" "sort" "testing" + "testing/quick" "unsafe" "github.com/stretchr/testify/require" @@ -34,6 +38,55 @@ func TestFreelist_free_overflow(t *testing.T) { } } +// Ensure that double freeing a page is causing a panic +func TestFreelist_free_double_free_panics(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, 0, 0, 3)) + require.Panics(t, func() { + f.Free(100, common.NewPage(12, 0, 0, 3)) + }) +} + +// Ensure that attempting to free the meta page panics +func TestFreelist_free_meta_panics(t *testing.T) { + f := newTestFreelist() + require.Panics(t, func() { + f.Free(100, common.NewPage(0, 0, 0, 0)) + }) + require.Panics(t, func() { + f.Free(100, common.NewPage(1, 0, 0, 0)) + }) +} + +func TestFreelist_free_freelist(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, common.FreelistPageFlag, 0, 0)) + pp := f.pendingPageIds()[100] + require.Equal(t, []common.Pgid{12}, pp.ids) + require.Equal(t, []common.Txid{0}, pp.alloctx) +} + +func TestFreelist_free_freelist_alloctx(t *testing.T) { + f := newTestFreelist() + f.Free(100, common.NewPage(12, common.FreelistPageFlag, 0, 0)) + f.Rollback(100) + require.Empty(t, f.freePageIds()) + require.Empty(t, f.pendingPageIds()) + require.False(t, f.Freed(12)) + + f.Free(101, common.NewPage(12, common.FreelistPageFlag, 0, 0)) + require.True(t, f.Freed(12)) + if exp := []common.Pgid{12}; !reflect.DeepEqual(exp, f.pendingPageIds()[101].ids) { + t.Fatalf("exp=%v; got=%v", exp, f.pendingPageIds()[101].ids) + } + f.ReleasePendingPages() + require.True(t, f.Freed(12)) + require.Empty(t, f.pendingPageIds()) + if exp := common.Pgids([]common.Pgid{12}); !reflect.DeepEqual(exp, f.freePageIds()) { + t.Fatalf("exp=%v; got=%v", exp, f.freePageIds()) + } +} + // Ensure that a transaction's free pages can be released. func TestFreelist_release(t *testing.T) { f := newTestFreelist() @@ -220,6 +273,30 @@ func TestFreeList_reload(t *testing.T) { require.Equal(t, []common.Pgid{10, 11, 12}, f2.pendingPageIds()[5].ids) } +// Ensure that the txIDx swap, less and len are properly implemented +func TestTxidSorting(t *testing.T) { + require.NoError(t, quick.Check(func(a []uint64) bool { + var txids []common.Txid + for _, txid := range a { + txids = append(txids, common.Txid(txid)) + } + + sort.Sort(txIDx(txids)) + + var r []uint64 + for _, txid := range txids { + r = append(r, uint64(txid)) + } + + if !slices.IsSorted(r) { + t.Errorf("txids were not sorted correctly=%v", txids) + return false + } + + return true + }, nil)) +} + // Ensure that a freelist can deserialize from a freelist page. func TestFreelist_read(t *testing.T) { // Create a page. @@ -243,6 +320,18 @@ func TestFreelist_read(t *testing.T) { } } +// Ensure that we never read a non-freelist page +func TestFreelist_read_panics(t *testing.T) { + buf := make([]byte, 4096) + page := common.LoadPage(buf) + page.SetFlags(common.BranchPageFlag) + page.SetCount(2) + f := newTestFreelist() + require.Panics(t, func() { + f.Read(page) + }) +} + // Ensure that a freelist can serialize into a freelist page. func TestFreelist_write(t *testing.T) { // Create a freelist and write it to a page. @@ -266,6 +355,216 @@ func TestFreelist_write(t *testing.T) { } } +func TestFreelist_E2E_HappyPath(t *testing.T) { + f := newTestFreelist() + f.Init([]common.Pgid{}) + requirePages(t, f, common.Pgids{}, common.Pgids{}) + + allocated := f.Allocate(common.Txid(1), 5) + require.Equal(t, common.Pgid(0), allocated) + // tx.go may now allocate more space, and eventually we need to delete a page again + f.Free(common.Txid(2), common.NewPage(5, common.LeafPageFlag, 0, 0)) + f.Free(common.Txid(2), common.NewPage(3, common.LeafPageFlag, 0, 0)) + f.Free(common.Txid(2), common.NewPage(8, common.LeafPageFlag, 0, 0)) + // the above will only mark the pages as pending, so free pages should not return anything + requirePages(t, f, common.Pgids{}, common.Pgids{3, 5, 8}) + + // someone wants to do a read on top of the next tx id + f.AddReadonlyTXID(common.Txid(3)) + // this should free the above pages for tx 2 entirely + f.ReleasePendingPages() + requirePages(t, f, common.Pgids{3, 5, 8}, common.Pgids{}) + + // no span of two pages available should yield a zero-page result + require.Equal(t, common.Pgid(0), f.Allocate(common.Txid(4), 2)) + // we should be able to allocate those pages independently however, + // map and array differ in the order they return the pages + expectedPgids := map[common.Pgid]struct{}{3: {}, 5: {}, 8: {}} + for i := 0; i < 3; i++ { + allocated = f.Allocate(common.Txid(4), 1) + require.Contains(t, expectedPgids, allocated, "expected to find pgid %d", allocated) + require.False(t, f.Freed(allocated)) + delete(expectedPgids, allocated) + } + require.Emptyf(t, expectedPgids, "unexpectedly more than one page was still found") + // no more free pages to allocate + require.Equal(t, common.Pgid(0), f.Allocate(common.Txid(4), 1)) +} + +func TestFreelist_E2E_MultiSpanOverflows(t *testing.T) { + f := newTestFreelist() + f.Init([]common.Pgid{}) + f.Free(common.Txid(10), common.NewPage(20, common.LeafPageFlag, 0, 1)) + f.Free(common.Txid(10), common.NewPage(25, common.LeafPageFlag, 0, 2)) + f.Free(common.Txid(10), common.NewPage(35, common.LeafPageFlag, 0, 3)) + f.Free(common.Txid(10), common.NewPage(39, common.LeafPageFlag, 0, 2)) + f.Free(common.Txid(10), common.NewPage(45, common.LeafPageFlag, 0, 4)) + requirePages(t, f, common.Pgids{}, common.Pgids{20, 21, 25, 26, 27, 35, 36, 37, 38, 39, 40, 41, 45, 46, 47, 48, 49}) + f.ReleasePendingPages() + requirePages(t, f, common.Pgids{20, 21, 25, 26, 27, 35, 36, 37, 38, 39, 40, 41, 45, 46, 47, 48, 49}, common.Pgids{}) + + // that sequence, regardless of implementation, should always yield the same blocks of pages + allocSequence := []int{7, 5, 3, 2} + expectedSpanStarts := []common.Pgid{35, 45, 25, 20} + for i, pageNums := range allocSequence { + allocated := f.Allocate(common.Txid(11), pageNums) + require.Equal(t, expectedSpanStarts[i], allocated) + // ensure all pages in that span are not considered free anymore + for i := 0; i < pageNums; i++ { + require.False(t, f.Freed(allocated+common.Pgid(i))) + } + } +} + +func TestFreelist_E2E_Rollbacks(t *testing.T) { + freelist := newTestFreelist() + freelist.Init([]common.Pgid{}) + freelist.Free(common.Txid(2), common.NewPage(5, common.LeafPageFlag, 0, 1)) + freelist.Free(common.Txid(2), common.NewPage(8, common.LeafPageFlag, 0, 0)) + requirePages(t, freelist, common.Pgids{}, common.Pgids{5, 6, 8}) + freelist.Rollback(common.Txid(2)) + requirePages(t, freelist, common.Pgids{}, common.Pgids{}) + + // unknown transaction should not trigger anything + freelist.Free(common.Txid(4), common.NewPage(13, common.LeafPageFlag, 0, 3)) + requirePages(t, freelist, common.Pgids{}, common.Pgids{13, 14, 15, 16}) + freelist.ReleasePendingPages() + requirePages(t, freelist, common.Pgids{13, 14, 15, 16}, common.Pgids{}) + freelist.Rollback(common.Txid(1337)) + requirePages(t, freelist, common.Pgids{13, 14, 15, 16}, common.Pgids{}) +} + +func TestFreelist_E2E_RollbackPanics(t *testing.T) { + freelist := newTestFreelist() + freelist.Init([]common.Pgid{5}) + requirePages(t, freelist, common.Pgids{5}, common.Pgids{}) + + _ = freelist.Allocate(common.Txid(5), 1) + require.Panics(t, func() { + // depending on the verification level, either should panic + freelist.Free(common.Txid(5), common.NewPage(5, common.LeafPageFlag, 0, 0)) + freelist.Rollback(5) + }) +} + +// tests the reloading from another physical page +func TestFreelist_E2E_Reload(t *testing.T) { + freelist := newTestFreelist() + freelist.Init([]common.Pgid{}) + freelist.Free(common.Txid(2), common.NewPage(5, common.LeafPageFlag, 0, 1)) + freelist.Free(common.Txid(2), common.NewPage(8, common.LeafPageFlag, 0, 0)) + freelist.ReleasePendingPages() + requirePages(t, freelist, common.Pgids{5, 6, 8}, common.Pgids{}) + buf := make([]byte, 4096) + p := common.LoadPage(buf) + freelist.Write(p) + + freelist.Free(common.Txid(3), common.NewPage(3, common.LeafPageFlag, 0, 1)) + freelist.Free(common.Txid(3), common.NewPage(10, common.LeafPageFlag, 0, 2)) + requirePages(t, freelist, common.Pgids{5, 6, 8}, common.Pgids{3, 4, 10, 11, 12}) + + otherBuf := make([]byte, 4096) + px := common.LoadPage(otherBuf) + freelist.Write(px) + + loadFreeList := newTestFreelist() + loadFreeList.Init([]common.Pgid{}) + loadFreeList.Read(px) + requirePages(t, loadFreeList, common.Pgids{3, 4, 5, 6, 8, 10, 11, 12}, common.Pgids{}) + // restore the original freelist again + loadFreeList.Reload(p) + requirePages(t, loadFreeList, common.Pgids{5, 6, 8}, common.Pgids{}) + + // reload another page with different free pages to test we are deduplicating the free pages with the pending ones correctly + freelist = newTestFreelist() + freelist.Init([]common.Pgid{}) + freelist.Free(common.Txid(5), common.NewPage(5, common.LeafPageFlag, 0, 4)) + freelist.Reload(p) + requirePages(t, freelist, common.Pgids{}, common.Pgids{5, 6, 7, 8, 9}) +} + +// tests the loading and reloading from physical pages +func TestFreelist_E2E_SerDe_HappyPath(t *testing.T) { + freelist := newTestFreelist() + freelist.Init([]common.Pgid{}) + freelist.Free(common.Txid(2), common.NewPage(5, common.LeafPageFlag, 0, 1)) + freelist.Free(common.Txid(2), common.NewPage(8, common.LeafPageFlag, 0, 0)) + freelist.ReleasePendingPages() + requirePages(t, freelist, common.Pgids{5, 6, 8}, common.Pgids{}) + + freelist.Free(common.Txid(3), common.NewPage(3, common.LeafPageFlag, 0, 1)) + freelist.Free(common.Txid(3), common.NewPage(10, common.LeafPageFlag, 0, 2)) + requirePages(t, freelist, common.Pgids{5, 6, 8}, common.Pgids{3, 4, 10, 11, 12}) + + buf := make([]byte, 4096) + p := common.LoadPage(buf) + require.Equal(t, 80, freelist.EstimatedWritePageSize()) + freelist.Write(p) + + loadFreeList := newTestFreelist() + loadFreeList.Init([]common.Pgid{}) + loadFreeList.Read(p) + requirePages(t, loadFreeList, common.Pgids{3, 4, 5, 6, 8, 10, 11, 12}, common.Pgids{}) +} + +// tests the loading of a freelist against other implementations with various sizes +func TestFreelist_E2E_SerDe_AcrossImplementations(t *testing.T) { + testSizes := []int{0, 1, 10, 100, 1000, math.MaxUint16, math.MaxUint16 + 1, math.MaxUint16 * 2} + for _, size := range testSizes { + t.Run(fmt.Sprintf("n=%d", size), func(t *testing.T) { + freelist := newTestFreelist() + expectedFreePgids := common.Pgids{} + for i := 0; i < size; i++ { + pgid := common.Pgid(i + 2) + freelist.Free(common.Txid(1), common.NewPage(pgid, common.LeafPageFlag, 0, 0)) + expectedFreePgids = append(expectedFreePgids, pgid) + } + freelist.ReleasePendingPages() + requirePages(t, freelist, expectedFreePgids, common.Pgids{}) + buf := make([]byte, freelist.EstimatedWritePageSize()) + p := common.LoadPage(buf) + freelist.Write(p) + + for n, loadFreeList := range map[string]Interface{ + "hashmap": NewHashMapFreelist(), + "array": NewArrayFreelist(), + } { + t.Run(n, func(t *testing.T) { + loadFreeList.Read(p) + requirePages(t, loadFreeList, expectedFreePgids, common.Pgids{}) + }) + } + }) + } +} + +func requirePages(t *testing.T, f Interface, freePageIds common.Pgids, pendingPageIds common.Pgids) { + require.Equal(t, f.FreeCount()+f.PendingCount(), f.Count()) + require.Equalf(t, freePageIds, f.freePageIds(), "unexpected free pages") + require.Equal(t, len(freePageIds), f.FreeCount()) + + pp := allPendingPages(f.pendingPageIds()) + require.Equalf(t, pendingPageIds, pp, "unexpected pending pages") + require.Equal(t, len(pp), f.PendingCount()) + + for _, pgid := range f.freePageIds() { + require.Truef(t, f.Freed(pgid), "expected free page to return true on Freed") + } + + for _, pgid := range pp { + require.Truef(t, f.Freed(pgid), "expected pending page to return true on Freed") + } +} + +func allPendingPages(p map[common.Txid]*txPending) common.Pgids { + pgids := common.Pgids{} + for _, pending := range p { + pgids = append(pgids, pending.ids...) + } + sort.Sort(pgids) + return pgids +} + func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } diff --git a/internal/freelist/hashmap_test.go b/internal/freelist/hashmap_test.go index c77a05800..de1954abb 100644 --- a/internal/freelist/hashmap_test.go +++ b/internal/freelist/hashmap_test.go @@ -11,6 +11,14 @@ import ( "go.etcd.io/bbolt/internal/common" ) +func TestFreelistHashmap_init_panics(t *testing.T) { + f := NewHashMapFreelist() + require.Panics(t, func() { + // init expects sorted input + f.Init([]common.Pgid{25, 5}) + }) +} + func TestFreelistHashmap_allocate(t *testing.T) { f := NewHashMapFreelist() From 7d5cd63a8f844e744a27105d6160a61cdca778f1 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 20 Aug 2024 10:31:40 +0100 Subject: [PATCH 295/439] add changelog for 1.3.11 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 0d6b4a30f..180485556 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,6 +2,20 @@ Note that we start to track changes starting from v1.3.7.
+## v1.3.11(TBD) + +### BoltDB +- Fix [the `freelist.allocs` isn't rollbacked when a tx is rollbacked](https://github.com/etcd-io/bbolt/pull/823). + +### CMD +- Add [`-gobench-output` option for bench command to adapt to benchstat](https://github.com/etcd-io/bbolt/pull/802). + +### Other +- [Bump go version to 1.22.x](https://github.com/etcd-io/bbolt/pull/822). +- This patch also added `dmflakey` package, which can be reused by other projects. See https://github.com/etcd-io/bbolt/pull/812. + +
+ ## v1.3.10(2024-05-06) ### BoltDB From 5baf4d2c19cc31d1400bf9e3f2b16766700ab9f9 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 21 Aug 2024 09:40:58 +0100 Subject: [PATCH 296/439] Update release date of 1.3.11 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 180485556..23009eb95 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,7 +2,7 @@ Note that we start to track changes starting from v1.3.7.
-## v1.3.11(TBD) +## v1.3.11(2024-08-21) ### BoltDB - Fix [the `freelist.allocs` isn't rollbacked when a tx is rollbacked](https://github.com/etcd-io/bbolt/pull/823). From 0cecda66e016f212c9d13aa74cfd93cb07948ff6 Mon Sep 17 00:00:00 2001 From: Erik Kalkoken Date: Thu, 29 Aug 2024 14:47:38 +0200 Subject: [PATCH 297/439] Add info on how to iterate over existing buckets (#828) * Add info on how to list existing top-level buckets Signed-off-by: ErikKalkoken --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index c4ee84875..d27d3a783 100644 --- a/README.md +++ b/README.md @@ -315,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -452,7 +463,7 @@ key and the cursor still points to the first element if present. If you remove key/value pairs during iteration, the cursor may automatically move to the next position if present in current node each time removing a key. -When you call `c.Next()` after removing a key, it may skip one key/value pair. +When you call `c.Next()` after removing a key, it may skip one key/value pair. Refer to [pull/611](https://github.com/etcd-io/bbolt/pull/611) to get more detailed info. During iteration, if the key is non-`nil` but the value is `nil`, that means From d70d6d1082a6457974233d065513cad11b5db801 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Sep 2024 14:49:05 +0000 Subject: [PATCH 298/439] build(deps): Bump golang.org/x/sys from 0.24.0 to 0.25.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.24.0 to 0.25.0. - [Commits](https://github.com/golang/sys/compare/v0.24.0...v0.25.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 347d9fc48..3ddffe1a2 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 + golang.org/x/sys v0.25.0 ) require ( diff --git a/go.sum b/go.sum index 36dbfae10..4fa2e6c24 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= +golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From e9f1b0d0d7e564044fcd62dc943a68b04750fd1d Mon Sep 17 00:00:00 2001 From: ArkaSaha30 Date: Tue, 10 Sep 2024 22:07:54 +0530 Subject: [PATCH 299/439] [main] Bump go toolchain to 1.23.1 This commit will bump go toolchain to 1.23.1 Signed-off-by: ArkaSaha30 --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index a6c2798a4..49e0a31d4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.0 +1.23.1 diff --git a/go.mod b/go.mod index 347d9fc48..b44b9be54 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.0 +toolchain go1.23.1 require ( github.com/spf13/cobra v1.8.1 From 7b2154f4665ad18493b6743bc44f09e927029a54 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:09:43 +0000 Subject: [PATCH 300/439] build(deps): Bump actions/checkout from 4.1.7 to 4.2.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.1.7 to 4.2.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/692973e3d937129bcbf40652eb9f2f61becf3332...d632683dd7b4114ad314bca15554477dd762a938) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index caca4d4b1..8cb7869a3 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -21,7 +21,7 @@ jobs: benchmark: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 with: fetch-depth: 0 - id: goversion diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 0bcac6152..b0c7033c0 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 5ce49737b..09467d3c5 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 39fc4082d..2d1cdd7ca 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,7 +23,7 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 08e442cd4..592c11b2a 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,7 +17,7 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index ed96d9d16..eed55a8da 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -19,7 +19,7 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 @@ -48,7 +48,7 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 From bb993663a7aece02871967eaf5051ee71901e1aa Mon Sep 17 00:00:00 2001 From: Agni Date: Wed, 2 Oct 2024 23:57:59 +0530 Subject: [PATCH 301/439] Bump go version to 1.23.2 Signed-off-by: Agni --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- .go-version | 2 +- go.mod | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 2d1cdd7ca..3c22fd221 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -54,4 +54,4 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: - version: v1.60.1 + version: v1.61.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index eed55a8da..31c637712 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -42,7 +42,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 with: - version: v1.60.1 + version: v1.61.0 coverage: needs: ["test-windows"] diff --git a/.go-version b/.go-version index 49e0a31d4..14bee92c9 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.1 +1.23.2 diff --git a/go.mod b/go.mod index 24fadec27..7a2b68d16 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.1 +toolchain go1.23.2 require ( github.com/spf13/cobra v1.8.1 From 1eb67ec10ec97264858fa280073330dc70ef0a7a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:26:07 +0000 Subject: [PATCH 302/439] build(deps): Bump golang.org/x/sys from 0.25.0 to 0.26.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.25.0 to 0.26.0. - [Commits](https://github.com/golang/sys/compare/v0.25.0...v0.26.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7a2b68d16..842d8d2ea 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.8.0 - golang.org/x/sys v0.25.0 + golang.org/x/sys v0.26.0 ) require ( diff --git a/go.sum b/go.sum index 4fa2e6c24..376cec914 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From a00ee9779755f098500ebfbfb3a447ce2faee107 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Oct 2024 14:28:33 +0000 Subject: [PATCH 303/439] build(deps): Bump golangci/golangci-lint-action from 6.1.0 to 6.1.1 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.0 to 6.1.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/aaa42aa0628b4ae2578232a66b541047968fac86...971e284b6050e8a5849b72094c50ab08da042db8) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 3c22fd221..2223186ad 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,6 +52,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: version: v1.61.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 31c637712..a942e4b05 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@aaa42aa0628b4ae2578232a66b541047968fac86 # v6.1.0 + uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 with: version: v1.61.0 From ed9a15fb47f2b18dbac96d0acaafcae8b437a03f Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 8 Oct 2024 10:58:12 -0700 Subject: [PATCH 304/439] github: enable ok-to-test for release-1.3 PRs Signed-off-by: Ivan Valdes --- .github/workflows/gh-workflow-approve.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml index 4da2e4f79..4a51970b8 100644 --- a/.github/workflows/gh-workflow-approve.yaml +++ b/.github/workflows/gh-workflow-approve.yaml @@ -8,6 +8,7 @@ on: - synchronize branches: - main + - release-1.3 jobs: approve: From 811e7a1826d844799ae3ce553fac4f321e95ef02 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 14:59:24 +0000 Subject: [PATCH 305/439] build(deps): Bump actions/checkout from 4.2.0 to 4.2.1 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.0 to 4.2.1. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/d632683dd7b4114ad314bca15554477dd762a938...eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 8cb7869a3..487b72f50 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -21,7 +21,7 @@ jobs: benchmark: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 with: fetch-depth: 0 - id: goversion diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index b0c7033c0..fe1db413e 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 09467d3c5..9e8e3ec26 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 2223186ad..eed382f74 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,7 +23,7 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 592c11b2a..cd6e9212f 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,7 +17,7 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index a942e4b05..da3a55922 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -19,7 +19,7 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 @@ -48,7 +48,7 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 From 67826a9c2cee8468aa4ac323f04da912ab3e2224 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 24 Oct 2024 10:34:28 -0700 Subject: [PATCH 306/439] github/workflows: remove arm64 jobs Signed-off-by: Ivan Valdes --- .github/workflows/tests_arm64.yaml | 14 -------------- 1 file changed, 14 deletions(-) delete mode 100644 .github/workflows/tests_arm64.yaml diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml deleted file mode 100644 index 6498c0c0f..000000000 --- a/.github/workflows/tests_arm64.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Tests ARM64 -permissions: read-all -on: [push, pull_request] -jobs: - test-linux-arm64: - uses: ./.github/workflows/tests-template.yml - with: - runs-on: actuated-arm64-4cpu-8gb - test-linux-arm64-race: - uses: ./.github/workflows/tests-template.yml - with: - runs-on: actuated-arm64-8cpu-8gb - targets: "['linux-unit-test-4-cpu-race']" From 6664240e05a14333b4a6cd7434b2977dc0a1f82e Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 24 Oct 2024 15:31:01 -0700 Subject: [PATCH 307/439] Add comment regarding sudo in Makefile robustness target Signed-off-by: Ivan Valdes --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2e0c09fa8..f5a6703a0 100644 --- a/Makefile +++ b/Makefile @@ -90,7 +90,8 @@ test-failpoint: @echo "[failpoint] array freelist test" BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint -.PHONY: test-robustness # Running robustness tests requires root permission +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure test-robustness: gofail-enable build sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root From a1d8a84c2478e4671f47cf53d937bca1e49d9569 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 29 Oct 2024 09:39:49 -0700 Subject: [PATCH 308/439] github/workflows: remove arm64 robustness tests jobs Signed-off-by: Ivan Valdes --- .github/workflows/robustness_nightly.yaml | 7 ------- .github/workflows/robustness_test.yaml | 6 ------ 2 files changed, 13 deletions(-) diff --git a/.github/workflows/robustness_nightly.yaml b/.github/workflows/robustness_nightly.yaml index 0812f6230..df04e7842 100644 --- a/.github/workflows/robustness_nightly.yaml +++ b/.github/workflows/robustness_nightly.yaml @@ -15,10 +15,3 @@ jobs: count: 100 testTimeout: 200m runs-on: "['ubuntu-latest']" - arm64: - # GHA has a maximum amount of 6h execution time, we try to get done within 3h - uses: ./.github/workflows/robustness_template.yaml - with: - count: 100 - testTimeout: 200m - runs-on: "['actuated-arm64-4cpu-8gb']" diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index 1e0aa5007..03392859d 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -8,9 +8,3 @@ jobs: count: 10 testTimeout: 30m runs-on: "['ubuntu-latest']" - arm64: - uses: ./.github/workflows/robustness_template.yaml - with: - count: 10 - testTimeout: 30m - runs-on: "['actuated-arm64-4cpu-8gb']" From 1a4cb44ba35754f909623ab3c59466ded7492e07 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 29 Oct 2024 14:56:18 +0000 Subject: [PATCH 309/439] Add changelog for 1.4.0-beta.0 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 317f9befb..01684e3c5 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,26 @@
+## v1.4.0-beta.0(TBD) + +### BoltDB +- Reorganized the directory structure of freelist source code + - [Move array related freelist source code into a separate file](https://github.com/etcd-io/bbolt/pull/777) + - [Move method `freePages` into freelist.go](https://github.com/etcd-io/bbolt/pull/783) + - [Add an interface for freelist](https://github.com/etcd-io/bbolt/pull/775) +- [Rollback alloc map when a transaction is rollbacked](https://github.com/etcd-io/bbolt/pull/819) +- [No handling freelist as a special case when freeing a page](https://github.com/etcd-io/bbolt/pull/788) +- [Ensure hashmap init method clears the data structures](https://github.com/etcd-io/bbolt/pull/794) +- [Panicking when a write transaction tries to free a page allocated by itself](https://github.com/etcd-io/bbolt/pull/792) + +### CMD +- [Add `-gobench-output` flag for `bbolt bench` command](https://github.com/etcd-io/bbolt/pull/765) + +### Other +- [Bump go version to 1.23.x](https://github.com/etcd-io/bbolt/pull/821) + +
+ ## v1.4.0-alpha.1(2024-05-06) ### BoltDB From 62dd113a384da922042b5dcfd8c871f962e51d10 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:33:03 +0000 Subject: [PATCH 310/439] build(deps): Bump actions/checkout from 4.2.1 to 4.2.2 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.1 to 4.2.2. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871...11bd71901bbe5b1630ceea73d27597364c9af683) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 487b72f50..b0df06be4 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -21,7 +21,7 @@ jobs: benchmark: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: 0 - id: goversion diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index fe1db413e..a321a3f93 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 9e8e3ec26..67a835812 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index eed382f74..0a0966d5c 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -23,7 +23,7 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index cd6e9212f..f8c504c7a 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,7 +17,7 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index da3a55922..fedf213b0 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -19,7 +19,7 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 @@ -48,7 +48,7 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 From 3ab9912297cebdbbafeef8995cf7ac36d0af1276 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:33:06 +0000 Subject: [PATCH 311/439] build(deps): Bump actions/setup-go from 5.0.2 to 5.1.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.0.2 to 5.1.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32...41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 487b72f50..312650561 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index fe1db413e..7d9c6c6a7 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 9e8e3ec26..a3a945746 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index eed382f74..b41a2eba1 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index cd6e9212f..8b0e08a47 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index da3a55922..b4d0cf615 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@eef61447b9ff4aafe5dcd4e0bbf5d482be7e7871 # v4.2.1 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From ef63d9772889189a692cccdfb94771d28906bf55 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 4 Nov 2024 13:51:00 +0000 Subject: [PATCH 312/439] Update release date for v1.4.0-beta.0 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 01684e3c5..f44371adb 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,7 +1,7 @@
-## v1.4.0-beta.0(TBD) +## v1.4.0-beta.0(2024-11-04) ### BoltDB - Reorganized the directory structure of freelist source code From a9ef55e7777f6a0222b2fcae5a7c5682ef324f6e Mon Sep 17 00:00:00 2001 From: Cancai Cai <77189278+caicancai@users.noreply.github.com> Date: Wed, 6 Nov 2024 23:39:46 +0800 Subject: [PATCH 313/439] chore/docs: installing document description should use bbolt Signed-off-by: cancaicai <2356672992@qq.com> --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d27d3a783..f365e51e3 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ New minor versions may add additional features to the API. ### Installing -To start using Bolt, install Go and run `go get`: +To start using `bbolt`, install Go and run `go get`: ```sh $ go get go.etcd.io/bbolt@latest ``` From 0abe63fc11410a7a55bd2737a7491ec49862ac57 Mon Sep 17 00:00:00 2001 From: samuelbartels20 Date: Sun, 10 Nov 2024 15:45:13 +0000 Subject: [PATCH 314/439] Bump go toolchain to 1.23.3 Signed-off-by: Samuel Bartels Signed-off-by: samuelbartels20 --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 14bee92c9..ac1df3fce 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.2 +1.23.3 diff --git a/go.mod b/go.mod index 842d8d2ea..c22bdc55a 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.2 +toolchain go1.23.3 require ( github.com/spf13/cobra v1.8.1 From 879d9c3eb84e61a40ef5929c583f4229e576cd17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 15:01:41 +0000 Subject: [PATCH 315/439] build(deps): Bump golang.org/x/sync from 0.8.0 to 0.9.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.8.0 to 0.9.0. - [Commits](https://github.com/golang/sync/compare/v0.8.0...v0.9.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index c22bdc55a..d57c4c3c9 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/sys v0.26.0 ) diff --git a/go.sum b/go.sum index 376cec914..6bc668ed9 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From ea2ba20a44df2c28bce27ea6bc4f3adada4edee3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 08:00:52 +0000 Subject: [PATCH 316/439] build(deps): Bump golang.org/x/sys from 0.26.0 to 0.27.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.26.0 to 0.27.0. - [Commits](https://github.com/golang/sys/compare/v0.26.0...v0.27.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d57c4c3c9..e78b7939f 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.9.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.9.0 - golang.org/x/sys v0.26.0 + golang.org/x/sys v0.27.0 ) require ( diff --git a/go.sum b/go.sum index 6bc668ed9..faa87670b 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 98cc22d9b9ef20c13d1a75921b70f6363be26f68 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 15:39:09 +0000 Subject: [PATCH 317/439] build(deps): Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.9.0 to 1.10.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.9.0...v1.10.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e78b7939f..52cd83f31 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.3 require ( github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.9.0 golang.org/x/sys v0.27.0 diff --git a/go.sum b/go.sum index faa87670b..4805c1e5d 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= From ca0d74cde48b110d011bf80b71a8e5eb5644ec46 Mon Sep 17 00:00:00 2001 From: Anurag De Date: Wed, 4 Dec 2024 14:29:57 +0530 Subject: [PATCH 318/439] Updated Go toolchain to 1.23.4 Signed-off-by: Anurag De --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index ac1df3fce..27ddcc14d 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.3 +1.23.4 diff --git a/go.mod b/go.mod index 52cd83f31..d3474520e 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.3 +toolchain go1.23.4 require ( github.com/spf13/cobra v1.8.1 From 03867337686ad035731e724a424d44f46c2dc78b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 14:12:51 +0000 Subject: [PATCH 319/439] build(deps): Bump golang.org/x/sync from 0.9.0 to 0.10.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.9.0 to 0.10.0. - [Commits](https://github.com/golang/sync/compare/v0.9.0...v0.10.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d3474520e..4a306035c 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.9.0 + golang.org/x/sync v0.10.0 golang.org/x/sys v0.27.0 ) diff --git a/go.sum b/go.sum index 4805c1e5d..b3ba81e12 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= -golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 27e20b25fd39ec8344f156439bb5466d3b5de8df Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Dec 2024 15:37:35 +0000 Subject: [PATCH 320/439] build(deps): Bump golang.org/x/sys from 0.27.0 to 0.28.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.27.0 to 0.28.0. - [Commits](https://github.com/golang/sys/compare/v0.27.0...v0.28.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 4a306035c..5edd207cc 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.27.0 + golang.org/x/sys v0.28.0 ) require ( diff --git a/go.sum b/go.sum index b3ba81e12..df9f353ee 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= -golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 2e4a831e0717fd121214c35308fa7e0276324983 Mon Sep 17 00:00:00 2001 From: RiceChuan Date: Thu, 12 Dec 2024 10:55:37 +0800 Subject: [PATCH 321/439] docs: remove repetitive words Signed-off-by: RiceChuan --- cursor_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cursor_test.go b/cursor_test.go index 581700149..73e8492f0 100644 --- a/cursor_test.go +++ b/cursor_test.go @@ -332,7 +332,7 @@ func TestCursor_Seek_Large(t *testing.T) { k, _ := c.Seek(seek) - // The last seek is beyond the end of the the range so + // The last seek is beyond the end of the range so // it should return nil. if i == count-1 { if k != nil { From 52699ce8fa2ebec5036048e25432e6b55ec5f9ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Dec 2024 14:48:37 +0000 Subject: [PATCH 322/439] build(deps): Bump actions/setup-go from 5.1.0 to 5.2.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.1.0 to 5.2.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed...3041bf56c941b39c61721a86cd11f3bb1338122a) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 6a8473b6a..598f331cf 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 692bb3a32..8b4b611e1 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 8d065e615..99f241792 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 73468a78b..114003858 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 2c7e9dcdb..456c3af71 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 96a8093ed..6870d089d 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@41dfa10bad2bb2ae585af6ee5bb4d7d973ad74ed # v5.1.0 + - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From 248fddc1ec48a98df9516320c663532682a1c8c1 Mon Sep 17 00:00:00 2001 From: Mark Ayers Date: Sat, 28 Dec 2024 17:29:07 -0500 Subject: [PATCH 323/439] Update buildtags Signed-off-by: Mark Ayers --- bolt_aix.go | 1 - bolt_arm64.go | 1 - bolt_loong64.go | 1 - bolt_mips64x.go | 1 - bolt_mipsx.go | 1 - bolt_ppc.go | 1 - bolt_ppc64.go | 1 - bolt_ppc64le.go | 1 - bolt_riscv64.go | 1 - bolt_s390x.go | 1 - bolt_unix.go | 1 - boltsync_unix.go | 1 - mlock_unix.go | 1 - unix_test.go | 1 - 14 files changed, 14 deletions(-) diff --git a/bolt_aix.go b/bolt_aix.go index 6dea4294d..4b424ed4c 100644 --- a/bolt_aix.go +++ b/bolt_aix.go @@ -1,5 +1,4 @@ //go:build aix -// +build aix package bbolt diff --git a/bolt_arm64.go b/bolt_arm64.go index 447bc1973..2c67ab10c 100644 --- a/bolt_arm64.go +++ b/bolt_arm64.go @@ -1,5 +1,4 @@ //go:build arm64 -// +build arm64 package bbolt diff --git a/bolt_loong64.go b/bolt_loong64.go index 31c17c1d0..1ef2145c6 100644 --- a/bolt_loong64.go +++ b/bolt_loong64.go @@ -1,5 +1,4 @@ //go:build loong64 -// +build loong64 package bbolt diff --git a/bolt_mips64x.go b/bolt_mips64x.go index a9385beb6..f28a0512a 100644 --- a/bolt_mips64x.go +++ b/bolt_mips64x.go @@ -1,5 +1,4 @@ //go:build mips64 || mips64le -// +build mips64 mips64le package bbolt diff --git a/bolt_mipsx.go b/bolt_mipsx.go index ed734ff7f..708fccdc0 100644 --- a/bolt_mipsx.go +++ b/bolt_mipsx.go @@ -1,5 +1,4 @@ //go:build mips || mipsle -// +build mips mipsle package bbolt diff --git a/bolt_ppc.go b/bolt_ppc.go index e403f57d8..6a21cf33c 100644 --- a/bolt_ppc.go +++ b/bolt_ppc.go @@ -1,5 +1,4 @@ //go:build ppc -// +build ppc package bbolt diff --git a/bolt_ppc64.go b/bolt_ppc64.go index fcd86529f..a32f24622 100644 --- a/bolt_ppc64.go +++ b/bolt_ppc64.go @@ -1,5 +1,4 @@ //go:build ppc64 -// +build ppc64 package bbolt diff --git a/bolt_ppc64le.go b/bolt_ppc64le.go index 20234aca4..8fb60dddc 100644 --- a/bolt_ppc64le.go +++ b/bolt_ppc64le.go @@ -1,5 +1,4 @@ //go:build ppc64le -// +build ppc64le package bbolt diff --git a/bolt_riscv64.go b/bolt_riscv64.go index 060f30c73..a63d26ab2 100644 --- a/bolt_riscv64.go +++ b/bolt_riscv64.go @@ -1,5 +1,4 @@ //go:build riscv64 -// +build riscv64 package bbolt diff --git a/bolt_s390x.go b/bolt_s390x.go index 92d2755ad..749ea97e3 100644 --- a/bolt_s390x.go +++ b/bolt_s390x.go @@ -1,5 +1,4 @@ //go:build s390x -// +build s390x package bbolt diff --git a/bolt_unix.go b/bolt_unix.go index aaa48d241..d1922c2d9 100644 --- a/bolt_unix.go +++ b/bolt_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !solaris && !aix && !android -// +build !windows,!plan9,!solaris,!aix,!android package bbolt diff --git a/boltsync_unix.go b/boltsync_unix.go index 81e09a531..27face752 100644 --- a/boltsync_unix.go +++ b/boltsync_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/mlock_unix.go b/mlock_unix.go index 744a972f5..9a0fd332c 100644 --- a/mlock_unix.go +++ b/mlock_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt diff --git a/unix_test.go b/unix_test.go index 8924abf79..ac53ad559 100644 --- a/unix_test.go +++ b/unix_test.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt_test From dcfec949542ce494e1028b51569a809d59ff6a9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Jan 2025 14:45:57 +0000 Subject: [PATCH 324/439] build(deps): Bump golang.org/x/sys from 0.28.0 to 0.29.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.28.0 to 0.29.0. - [Commits](https://github.com/golang/sys/compare/v0.28.0...v0.29.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 5edd207cc..8932fd740 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( diff --git a/go.sum b/go.sum index df9f353ee..f4f4e194e 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 6bc997cd9c5d7f22c3cd5b8f7893cb9e12020cd8 Mon Sep 17 00:00:00 2001 From: "ajaysundar.k" Date: Sat, 18 Jan 2025 01:27:48 +0000 Subject: [PATCH 325/439] bbolt: update golang toolchain to 1.23.5 Signed-off-by: ajaysundar.k --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 27ddcc14d..ca8ec414e 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.4 +1.23.5 diff --git a/go.mod b/go.mod index 8932fd740..a7ba0ca80 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.4 +toolchain go1.23.5 require ( github.com/spf13/cobra v1.8.1 From 41a1050ba34f039d272d107420e24d81a8d06fca Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:30:54 +0000 Subject: [PATCH 326/439] build(deps): Bump golangci/golangci-lint-action from 6.1.1 to 6.2.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.1.1 to 6.2.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/971e284b6050e8a5849b72094c50ab08da042db8...ec5d18412c0aeab7936cb16880d708ba2a64e1ae) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 114003858..31a4e438f 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,6 +52,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: version: v1.61.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 6870d089d..e5a3b66de 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@971e284b6050e8a5849b72094c50ab08da042db8 # v6.1.1 + uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 with: version: v1.61.0 From b61058275cac6bc58883c788ffaf4f718a138624 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 14:39:07 +0000 Subject: [PATCH 327/439] build(deps): Bump actions/stale from 9.0.0 to 9.1.0 Bumps [actions/stale](https://github.com/actions/stale) from 9.0.0 to 9.1.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/28ca1036281a5e5922ead5184a1bbf96e5fc984e...5bef64f19d7facfb25b37b414482c7164d639639) --- updated-dependencies: - dependency-name: actions/stale dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index f00b33dfb..1abb63ab8 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@28ca1036281a5e5922ead5184a1bbf96e5fc984e # v9.0.0 + - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 with: days-before-stale: 90 days-before-close: 21 From 539c45600b359d81d5e4a907bfe9681f668bba30 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Jan 2025 14:39:11 +0000 Subject: [PATCH 328/439] build(deps): Bump actions/setup-go from 5.2.0 to 5.3.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.2.0 to 5.3.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/3041bf56c941b39c61721a86cd11f3bb1338122a...f111f3307d8850f501ac008e886eec1fd1932a34) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 598f331cf..460b6d5b7 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 8b4b611e1..994c76252 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 99f241792..ba275fda0 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 31a4e438f..29b46bbc4 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 456c3af71..f0201fc7e 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index e5a3b66de..f2f48dbe2 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From 33910ccbb81bef5e2e4e4a04861f0cef513557c5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 14:50:17 +0000 Subject: [PATCH 329/439] build(deps): Bump github.com/spf13/pflag from 1.0.5 to 1.0.6 Bumps [github.com/spf13/pflag](https://github.com/spf13/pflag) from 1.0.5 to 1.0.6. - [Release notes](https://github.com/spf13/pflag/releases) - [Commits](https://github.com/spf13/pflag/compare/v1.0.5...v1.0.6) --- updated-dependencies: - dependency-name: github.com/spf13/pflag dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index a7ba0ca80..7924975a9 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.23.5 require ( github.com/spf13/cobra v1.8.1 - github.com/spf13/pflag v1.0.5 + github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.10.0 diff --git a/go.sum b/go.sum index f4f4e194e..5180c5558 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= From 8c64d067f68cf1c41c0c479babe5a3c14dfc063b Mon Sep 17 00:00:00 2001 From: Marcel Franca Date: Tue, 4 Feb 2025 21:28:44 -0300 Subject: [PATCH 330/439] update goland toolchain to 1.23.6 Signed-off-by: Marcel Franca --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index ca8ec414e..d8c40e539 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.5 +1.23.6 diff --git a/go.mod b/go.mod index 7924975a9..1425ff228 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.23 -toolchain go1.23.5 +toolchain go1.23.6 require ( github.com/spf13/cobra v1.8.1 From a106ab74390b40f83fb8b17e0b4021daa5f01522 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 5 Feb 2025 13:22:54 +0000 Subject: [PATCH 331/439] Update changelog for v1.4.0 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index f44371adb..f5cb940d7 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,13 @@
+## v1.4.0(2025-02-05) +There isn't any production code change since v1.4.0-beta.0. Only some dependencies +are bumped, also updated some typos in comment and readme, and removed the legacy +build tag `// +build` in https://github.com/etcd-io/bbolt/pull/879. + +
+ ## v1.4.0-beta.0(2024-11-04) ### BoltDB From 7f0904bd3866079475bb5c1aa85b0c3c6d4fbe0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 14:12:46 +0000 Subject: [PATCH 332/439] build(deps): Bump golang.org/x/sys from 0.29.0 to 0.30.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.29.0 to 0.30.0. - [Commits](https://github.com/golang/sys/compare/v0.29.0...v0.30.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1425ff228..d94b1afe5 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.29.0 + golang.org/x/sys v0.30.0 ) require ( diff --git a/go.sum b/go.sum index 5180c5558..ec899d387 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 8653d6582c1cbe47fa5a3eca87de7ae9dd930865 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 14:59:11 +0000 Subject: [PATCH 333/439] build(deps): Bump golangci/golangci-lint-action from 6.2.0 to 6.3.2 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.2.0 to 6.3.2. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/ec5d18412c0aeab7936cb16880d708ba2a64e1ae...051d91933864810ecd5e2ea2cfd98f6a5bca5347) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 29b46bbc4..d47049170 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,6 +52,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 + uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 with: version: v1.61.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index f2f48dbe2..2f8df96a9 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@ec5d18412c0aeab7936cb16880d708ba2a64e1ae # v6.2.0 + uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 with: version: v1.61.0 From bef55a5a1a7e252bd99c40e93983914038a71498 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 10 Feb 2025 16:11:15 +0000 Subject: [PATCH 334/439] build(deps): Bump golang.org/x/sync from 0.10.0 to 0.11.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.10.0 to 0.11.0. - [Commits](https://github.com/golang/sync/compare/v0.10.0...v0.11.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index d94b1afe5..a7c4e0373 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.10.0 + golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 ) diff --git a/go.sum b/go.sum index ec899d387..26b0d07e3 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 7eb11b0d67544e22cba2b8017a2a8aa071a164b1 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 10 Feb 2025 14:32:07 -0800 Subject: [PATCH 335/439] Fix debug sync log line Signed-off-by: Ivan Valdes --- db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db.go b/db.go index 5c1947e99..cf6d7bdcc 100644 --- a/db.go +++ b/db.go @@ -1080,7 +1080,7 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // then it allows you to force the database file to sync against the disk. func (db *DB) Sync() (err error) { if lg := db.Logger(); lg != discardLogger { - lg.Debug("Syncing bbolt db (%s)", db.path) + lg.Debugf("Syncing bbolt db (%s)", db.path) defer func() { if err != nil { lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) From a7dd9f3e35e6e15aeb15e7080a75db61716cd75e Mon Sep 17 00:00:00 2001 From: Sahdev Zala Date: Tue, 11 Feb 2025 17:42:56 -0500 Subject: [PATCH 336/439] Add code of conduct Add etcd community code of conduct. Modeled after Kubernetes repos. Signed-off-by: Sahdev Zala --- code-of-conduct.md | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 code-of-conduct.md diff --git a/code-of-conduct.md b/code-of-conduct.md new file mode 100644 index 000000000..f78dd84bc --- /dev/null +++ b/code-of-conduct.md @@ -0,0 +1,3 @@ +# etcd Community Code of Conduct + +Please refer to [etcd Community Code of Conduct](https://github.com/etcd-io/etcd/blob/main/code-of-conduct.md). From 4254016893c5bd532515e58a06714a2b781b70e6 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sun, 16 Feb 2025 16:53:33 +0100 Subject: [PATCH 337/439] Bump go linter 1.24 Signed-off-by: Mustafa Elbehery --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index d47049170..75adb91b4 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -54,4 +54,4 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 with: - version: v1.61.0 + version: v1.63.4 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 2f8df96a9..8f7a4775e 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -42,7 +42,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 with: - version: v1.61.0 + version: v1.63.4 coverage: needs: ["test-windows"] From 9929d7660315954eb832950fc16ba4f8bd670313 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 15:17:38 +0000 Subject: [PATCH 338/439] build(deps): Bump github.com/spf13/cobra from 1.8.1 to 1.9.1 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.8.1 to 1.9.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.8.1...v1.9.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index a7c4e0373..0e439ce68 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.23 toolchain go1.23.6 require ( - github.com/spf13/cobra v1.8.1 + github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 diff --git a/go.sum b/go.sum index 26b0d07e3..d73f79fee 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,4 @@ -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -6,9 +6,8 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= From 1698251cb7cd025c91816e4bda498877effd0461 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Feb 2025 15:21:13 +0000 Subject: [PATCH 339/439] build(deps): Bump golangci/golangci-lint-action from 6.3.2 to 6.5.0 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.3.2 to 6.5.0. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/051d91933864810ecd5e2ea2cfd98f6a5bca5347...2226d7cb06a077cd73e56eedd38eecad18e5d837) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 75adb91b4..9e06165fc 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -52,6 +52,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 + uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: version: v1.63.4 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 8f7a4775e..6d36446e2 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@051d91933864810ecd5e2ea2cfd98f6a5bca5347 # v6.3.2 + uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: version: v1.63.4 From 47910f18f8facf3b11db769f5bba89bc2c2969e1 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Wed, 19 Feb 2025 20:44:44 +0100 Subject: [PATCH 340/439] chore: setup golangci-lint config file Signed-off-by: Matthieu MOREL --- .gitattributes | 4 ++++ .golangci.yaml | 16 ++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 .gitattributes create mode 100644 .golangci.yaml diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..a681ce365 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +# ensure that line endings for Windows builds are properly formatted +# see https://github.com/golangci/golangci-lint-action?tab=readme-ov-file#how-to-use +# at "Multiple OS Example" section +*.go text eol=lf diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 000000000..822693aca --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,16 @@ +issues: + max-same-issues: 0 +linters: + disable-all: true + enable: # please keep this alphabetized + - errcheck + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - staticcheck + - unused +linters-settings: # please keep this alphabetized + goimports: + local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages. From a8be651e3b20cdc19346e7e9aba750c7fff1dfc3 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Fri, 21 Feb 2025 10:09:51 +0100 Subject: [PATCH 341/439] bump golangci-lint v1.64.5 Signed-off-by: Mustafa Elbehery --- .github/workflows/tests-template.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 9e06165fc..191b4fbf6 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -54,4 +54,4 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: - version: v1.63.4 + version: v1.64.5 From 09ce777911e107153d1ae365edb06c6d482daa65 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Fri, 21 Feb 2025 18:56:01 +0100 Subject: [PATCH 342/439] bump golangci-lint v1.64.5-win Signed-off-by: Mustafa Elbehery --- .github/workflows/tests_windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 6d36446e2..85a0b0c10 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -42,7 +42,7 @@ jobs: - name: golangci-lint uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 with: - version: v1.63.4 + version: v1.64.5 coverage: needs: ["test-windows"] From d08e835e29d61610957281e9b7a9a8f22c2f63f2 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 10 Mar 2025 16:25:04 -0700 Subject: [PATCH 343/439] build(deps): Bump golang.org/x/sync from 0.11.0 to 0.12.0 Dependency update from https://github.com/etcd-io/bbolt/pull/923 Signed-off-by: Ivan Valdes --- go.mod | 4 ++-- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 0e439ce68..dd32350c8 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module go.etcd.io/bbolt -go 1.23 +go 1.23.0 toolchain go1.23.6 @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.11.0 + golang.org/x/sync v0.12.0 golang.org/x/sys v0.30.0 ) diff --git a/go.sum b/go.sum index d73f79fee..e1f540035 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 55ed00504eeb1327c5bd4ccc219ffd20d08520f0 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Mon, 10 Mar 2025 16:46:55 -0700 Subject: [PATCH 344/439] build(deps): Bump golang.org/x/sys from 0.30.0 to 0.31.0 Dependency update from: https://github.com/etcd-io/bbolt/pull/922 Signed-off-by: Ivan Valdes --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index dd32350c8..630843ea8 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.30.0 + golang.org/x/sys v0.31.0 ) require ( diff --git a/go.sum b/go.sum index e1f540035..6a883ee92 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 24c5a2be462a6ee4efeb3d1554b7abc8f87c835d Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sun, 16 Feb 2025 15:59:28 +0100 Subject: [PATCH 345/439] Bump go 1.24.0 Signed-off-by: Mustafa Elbehery --- .go-version | 2 +- go.mod | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.go-version b/.go-version index d8c40e539..53cc1a6f9 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.6 +1.24.0 diff --git a/go.mod b/go.mod index 630843ea8..218cdfb48 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module go.etcd.io/bbolt -go 1.23.0 +go 1.24 -toolchain go1.23.6 +toolchain go1.24.0 require ( github.com/spf13/cobra v1.9.1 From dc3e34b3188ad8a0cf2321317d719248d46ab7ad Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 11 Mar 2025 21:23:04 +0100 Subject: [PATCH 346/439] Bump go 1.24.1 Signed-off-by: Mustafa Elbehery --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 53cc1a6f9..f9e8384bb 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.0 +1.24.1 diff --git a/go.mod b/go.mod index 218cdfb48..71a8fbb2b 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.0 +toolchain go1.24.1 require ( github.com/spf13/cobra v1.9.1 From 67b9853f124290b3d57bffb4831b2e94ba8ec3f6 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 5 Mar 2025 23:29:18 -0800 Subject: [PATCH 347/439] github/workflows: use ARM64 runners for robustness tests Signed-off-by: Ivan Valdes --- .github/workflows/robustness_test.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/robustness_test.yaml b/.github/workflows/robustness_test.yaml index 03392859d..635d4e825 100644 --- a/.github/workflows/robustness_test.yaml +++ b/.github/workflows/robustness_test.yaml @@ -8,3 +8,9 @@ jobs: count: 10 testTimeout: 30m runs-on: "['ubuntu-latest']" + arm64: + uses: ./.github/workflows/robustness_template.yaml + with: + count: 10 + testTimeout: 30m + runs-on: "['ubuntu-24.04-arm']" From 36a2a0eda48b52f52ff4ab37d7ff7de3e8b16bd7 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Thu, 6 Mar 2025 22:01:14 -0800 Subject: [PATCH 348/439] github/workflows: add ARM64 tests Restore ARM64 test workflows. Remove the conditional to run only ARM tests in the upstream repository, as GitHub ARM runners are now publicly available. Signed-off-by: Ivan Valdes --- .github/workflows/tests-template.yml | 2 -- .github/workflows/tests_arm64.yaml | 26 ++++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/tests_arm64.yaml diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 191b4fbf6..c0b031120 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -15,8 +15,6 @@ permissions: read-all jobs: test-linux: - # this is to prevent arm64 jobs from running at forked projects - if: ${{ github.repository == 'etcd-io/bbolt' || inputs.runs-on == 'ubuntu-latest' }} strategy: fail-fast: false matrix: diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml new file mode 100644 index 000000000..aa6adc5bc --- /dev/null +++ b/.github/workflows/tests_arm64.yaml @@ -0,0 +1,26 @@ +--- +name: Tests ARM64 +permissions: read-all +on: [push, pull_request] +jobs: + test-linux-arm64: + uses: ./.github/workflows/tests-template.yml + test-linux-arm64-race: + uses: ./.github/workflows/tests-template.yml + with: + runs-on: ubuntu-24.04-arm + targets: "['linux-unit-test-4-cpu-race']" + + coverage: + needs: + - test-linux-arm64 + - test-linux-arm64-race + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - run: make coverage From 99c51dc98c1394ccaefa13527493c377818eb7b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 17 Mar 2025 14:51:54 +0000 Subject: [PATCH 349/439] build(deps): Bump golangci/golangci-lint-action from 6.5.0 to 6.5.1 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.5.0 to 6.5.1. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/2226d7cb06a077cd73e56eedd38eecad18e5d837...4696ba8babb6127d732c3c6dde519db15edab9ea) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index c0b031120..1d35f3ad8 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -50,6 +50,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 + uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea # v6.5.1 with: version: v1.64.5 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 85a0b0c10..0aba8ad7f 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,7 +40,7 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@2226d7cb06a077cd73e56eedd38eecad18e5d837 # v6.5.0 + uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea # v6.5.1 with: version: v1.64.5 From c25038e34c8a1d1139cfa25919ae10e3d5c4a542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 31 Mar 2025 15:02:32 +0000 Subject: [PATCH 350/439] build(deps): Bump actions/setup-go from 5.3.0 to 5.4.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.3.0 to 5.4.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/f111f3307d8850f501ac008e886eec1fd1932a34...0aaccfd150d50ccaeb58ebd88d36e91967a5f35b) --- updated-dependencies: - dependency-name: actions/setup-go dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_arm64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 460b6d5b7..6675fbd99 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 994c76252..34fce4a8c 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index ba275fda0..c0e6841ef 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index 1d35f3ad8..aaa7134ea 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index f0201fc7e..b0c21e7d7 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index aa6adc5bc..15f85f7e6 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 0aba8ad7f..8cccdce6e 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@f111f3307d8850f501ac008e886eec1fd1932a34 # v5.3.0 + - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From b4a445301930bac1397bc9d61e10adeb77dade60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 7 Apr 2025 14:59:38 +0000 Subject: [PATCH 351/439] build(deps): Bump golang.org/x/sys from 0.31.0 to 0.32.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.31.0 to 0.32.0. - [Commits](https://github.com/golang/sys/compare/v0.31.0...v0.32.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.32.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 71a8fbb2b..af9e359c6 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.12.0 - golang.org/x/sys v0.31.0 + golang.org/x/sys v0.32.0 ) require ( diff --git a/go.sum b/go.sum index 6a883ee92..cf744161e 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 65cc569500f5cc354fbeb13608fcd4423eede95a Mon Sep 17 00:00:00 2001 From: joshjms Date: Tue, 8 Apr 2025 14:37:28 +0800 Subject: [PATCH 352/439] bump go 1.24.2 Signed-off-by: joshjms --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index f9e8384bb..e4a973f91 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.1 +1.24.2 diff --git a/go.mod b/go.mod index 71a8fbb2b..303f82df2 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.1 +toolchain go1.24.2 require ( github.com/spf13/cobra v1.9.1 From 0431f4f7448c9f524a089b463eac583a77847cb4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 07:22:31 +0000 Subject: [PATCH 353/439] build(deps): Bump golang.org/x/sync from 0.12.0 to 0.13.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.12.0 to 0.13.0. - [Commits](https://github.com/golang/sync/compare/v0.12.0...v0.13.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.13.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index af9e359c6..62fabb652 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.12.0 + golang.org/x/sync v0.13.0 golang.org/x/sys v0.32.0 ) diff --git a/go.sum b/go.sum index cf744161e..43aea69db 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 8a03cee17b3d33f5730d6281db5b58ff0b3b93d1 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Wed, 9 Apr 2025 10:13:46 +0100 Subject: [PATCH 354/439] Clarify the effect of InitialMmapSize on Windows platform Signed-off-by: Benjamin Wang --- db.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/db.go b/db.go index cf6d7bdcc..4171983bc 100644 --- a/db.go +++ b/db.go @@ -1309,6 +1309,12 @@ type Options struct { // If <=0, the initial map size is 0. // If initialMmapSize is smaller than the previous database size, // it takes no effect. + // + // Note: On Windows, due to platform limitations, the database file size + // will be immediately resized to match `InitialMmapSize` (aligned to page size) + // when the DB is opened. On non-Windows platforms, the file size will grow + // dynamically based on the actual amount of written data, regardless of `InitialMmapSize`. + // Refer to https://github.com/etcd-io/bbolt/issues/378#issuecomment-1378121966. InitialMmapSize int // PageSize overrides the default OS page size. From 156953cb2df852280c454fd476fffd4b07302a58 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Wed, 23 Apr 2025 10:18:33 -0400 Subject: [PATCH 355/439] .github: dump dmesg if failure Signed-off-by: Wei Fu --- .github/workflows/robustness_template.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index c0e6841ef..0f3ffec48 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -35,3 +35,16 @@ jobs: sudo apt-get install -y dmsetup xfsprogs ROBUSTNESS_TESTFLAGS="--count ${{ inputs.count }} --timeout ${{ inputs.testTimeout }} -failfast" make test-robustness + + - name: Host Status + if: always() + run: | + set -x + mount + df + losetup -l + - name: Kernel Message + if: failure() + run: | + sudo lsmod + sudo dmesg -T -f kern From 50a6d91afbb80b876a14931fbae1c60cc884bd02 Mon Sep 17 00:00:00 2001 From: Wei Fu Date: Wed, 23 Apr 2025 22:12:33 -0400 Subject: [PATCH 356/439] tests: deflaky dmflakey.TearDown issue REF: #947 Signed-off-by: Wei Fu --- tests/dmflakey/dmflakey.go | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tests/dmflakey/dmflakey.go b/tests/dmflakey/dmflakey.go index 88c3c2d48..593b6ff8b 100644 --- a/tests/dmflakey/dmflakey.go +++ b/tests/dmflakey/dmflakey.go @@ -261,11 +261,28 @@ func (f *flakey) ErrorWrites(opts ...FeatOpt) error { // Teardown releases the flakey device. func (f *flakey) Teardown() error { - if err := deleteFlakeyDevice(f.flakeyDevice); err != nil { - if !strings.Contains(err.Error(), "No such device or address") { - return err + // FIXME(XXX): Even though we umount device successfully, it's still + // possible to run into `Device or resource busy` issue. It's easy to + // reproduce it in slow storage or 2-4 cores ARM64 host with xfs. We + // should retry it to fix transisent issue. + var derr error + for i := 0; i < 10; i++ { + derr = deleteFlakeyDevice(f.flakeyDevice) + if derr != nil { + if strings.Contains(derr.Error(), "Device or resource busy") { + time.Sleep(1 * time.Second) + continue + } + if strings.Contains(derr.Error(), "No such device or address") { + derr = nil + } } + break + } + if derr != nil { + return derr } + if err := detachLoopDevice(f.loopDevice); err != nil { if !errors.Is(err, unix.ENXIO) { return err From 88d2b54695213e3788d66f0a9afde3111a432a19 Mon Sep 17 00:00:00 2001 From: Matthew Sainsbury Date: Fri, 25 Apr 2025 10:46:51 -0700 Subject: [PATCH 357/439] add support for data file size limit (#929) * add support for data file size limit closes #928 Signed-off-by: Matthew Sainsbury * respond to PR feedback Signed-off-by: Matthew Sainsbury --------- Signed-off-by: Matthew Sainsbury --- bolt_windows.go | 13 +++ db.go | 25 ++++- db_test.go | 174 ++++++++++++++++++++++++++++++++++ errors/errors.go | 3 + internal/btesting/btesting.go | 13 ++- 5 files changed, 223 insertions(+), 5 deletions(-) diff --git a/bolt_windows.go b/bolt_windows.go index ec21ecb85..bba0f8809 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -67,6 +67,19 @@ func mmap(db *DB, sz int) error { var sizelo, sizehi uint32 if !db.readOnly { + if db.MaxSize > 0 && sz > db.MaxSize { + // The max size only limits future writes; however, we don’t block opening + // and mapping the database if it already exceeds the limit. + fileSize, err := db.fileSize() + if err != nil { + return fmt.Errorf("could not check existing db file size: %s", err) + } + + if sz > fileSize { + return errors.ErrMaxSizeReached + } + } + // Truncate the database to the size of the mmap. if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("truncate: %s", err) diff --git a/db.go b/db.go index 4171983bc..9e379ac34 100644 --- a/db.go +++ b/db.go @@ -110,6 +110,12 @@ type DB struct { // of truncate() and fsync() when growing the data file. AllocSize int + // MaxSize is the maximum size (in bytes) allowed for the data file. + // If a caller's attempt to add data results in the need to grow + // the data file, an error will be returned and the data file will not grow. + // <=0 means no limit. + MaxSize int + // Mlock locks database file in memory when set to true. // It prevents major page faults, however used memory can't be reclaimed. // @@ -191,6 +197,7 @@ func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { db.PreLoadFreelist = options.PreLoadFreelist db.FreelistType = options.FreelistType db.Mlock = options.Mlock + db.MaxSize = options.MaxSize // Set default values for later DB operations. db.MaxBatchSize = common.DefaultMaxBatchSize @@ -1166,7 +1173,11 @@ func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) + if err == berrors.ErrMaxSizeReached { + return nil, err + } else { + return nil, fmt.Errorf("mmap allocate error: %s", err) + } } } @@ -1198,6 +1209,11 @@ func (db *DB) grow(sz int) error { sz += db.AllocSize } + if !db.readOnly && db.MaxSize > 0 && sz > db.MaxSize { + lg.Errorf("[GOOS: %s, GOARCH: %s] maximum db size reached, size: %d, db.MaxSize: %d", runtime.GOOS, runtime.GOARCH, sz, db.MaxSize) + return berrors.ErrMaxSizeReached + } + // Truncate and fsync to ensure file size metadata is flushed. // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { @@ -1320,6 +1336,9 @@ type Options struct { // PageSize overrides the default OS page size. PageSize int + // MaxSize sets the maximum size of the data file. <=0 means no maximum. + MaxSize int + // NoSync sets the initial value of DB.NoSync. Normally this can just be // set directly on the DB itself when returned from Open(), but this option // is useful in APIs which expose Options but not the underlying DB. @@ -1343,8 +1362,8 @@ func (o *Options) String() string { return "{}" } - return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", - o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, MaxSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.MaxSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) } diff --git a/db_test.go b/db_test.go index 757b896e8..53d877e11 100644 --- a/db_test.go +++ b/db_test.go @@ -11,6 +11,7 @@ import ( "os" "path/filepath" "reflect" + "runtime" "strings" "sync" "testing" @@ -1373,6 +1374,179 @@ func TestDBUnmap(t *testing.T) { db.DB = nil } +// Convenience function for inserting a bunch of keys with 1000 byte values +func fillDBWithKeys(db *btesting.DB, numKeys int) error { + return db.Fill([]byte("data"), 1, numKeys, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 1000) }, + ) +} + +// Creates a new database size, forces a specific allocation size jump, and fills it with the number of keys specified +func createFilledDB(t testing.TB, o *bolt.Options, allocSize int, numKeys int) *btesting.DB { + // Open a data file. + db := btesting.MustCreateDBWithOption(t, o) + db.AllocSize = allocSize + + // Insert a reasonable amount of data below the max size. + err := db.Fill([]byte("data"), 1, numKeys, + func(tx int, k int) []byte { return []byte(fmt.Sprintf("%04d", k)) }, + func(tx int, k int) []byte { return make([]byte, 1000) }, + ) + if err != nil { + t.Fatal(err) + } + return db +} + +// Ensure that a database cannot exceed its maximum size +// https://github.com/etcd-io/bbolt/issues/928 +func TestDB_MaxSizeNotExceeded(t *testing.T) { + testCases := []struct { + name string + options bolt.Options + }{ + { + name: "Standard case", + options: bolt.Options{ + MaxSize: 5 * 1024 * 1024, // 5 MiB + PageSize: 4096, + }, + }, + { + name: "NoGrowSync", + options: bolt.Options{ + MaxSize: 5 * 1024 * 1024, // 5 MiB + PageSize: 4096, + NoGrowSync: true, + }, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + db := createFilledDB(t, + &testCase.options, + 4*1024*1024, // adjust allocation jumps to 4 MiB + 2000, + ) + + path := db.Path() + + // The data file should be 4 MiB now (expanded once from zero). + // It should have space for roughly 16 more entries before trying to grow + // Keep inserting until grow is required + err := fillDBWithKeys(db, 100) + assert.ErrorIs(t, err, berrors.ErrMaxSizeReached) + + newSz := fileSize(path) + require.Greater(t, newSz, int64(0), "unexpected new file size: %d", newSz) + assert.LessOrEqual(t, newSz, int64(db.MaxSize), "The size of the data file should not exceed db.MaxSize") + + err = db.Close() + require.NoError(t, err, "Closing the re-opened database should succeed") + }) + } +} + +// Ensure that opening a database that is beyond the maximum size succeeds +// The maximum size should only apply to growing the data file +// https://github.com/etcd-io/bbolt/issues/928 +func TestDB_MaxSizeExceededCanOpen(t *testing.T) { + // Open a data file. + db := createFilledDB(t, nil, 4*1024*1024, 2000) // adjust allocation jumps to 4 MiB, fill with 2000, 1KB keys + path := db.Path() + + // Insert a reasonable amount of data below the max size. + err := fillDBWithKeys(db, 2000) + require.NoError(t, err, "fillDbWithKeys should succeed") + + err = db.Close() + require.NoError(t, err, "Close should succeed") + + // The data file should be 4 MiB now (expanded once from zero). + minimumSizeForTest := int64(1024 * 1024) + newSz := fileSize(path) + require.GreaterOrEqual(t, newSz, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", newSz, minimumSizeForTest) + + // Now try to re-open the database with an extremely small max size + t.Logf("Reopening bbolt DB at: %s", path) + db, err = btesting.OpenDBWithOption(t, path, &bolt.Options{ + MaxSize: 1, + }) + assert.NoError(t, err, "Should be able to open database bigger than MaxSize") + + err = db.Close() + require.NoError(t, err, "Closing the re-opened database should succeed") +} + +// Ensure that opening a database that is beyond the maximum size succeeds, +// even when InitialMmapSize is above the limit (mmaps should not affect file size) +// This test exists for platforms where Truncate should not be called during mmap +// https://github.com/etcd-io/bbolt/issues/928 +func TestDB_MaxSizeExceededCanOpenWithHighMmap(t *testing.T) { + if runtime.GOOS == "windows" { + // In Windows, the file must be expanded to the mmap initial size, + // so this test doesn't run in Windows. + t.SkipNow() + } + + // Open a data file. + db := createFilledDB(t, nil, 4*1024*1024, 2000) // adjust allocation jumps to 4 MiB, fill with 2000 1KB entries + path := db.Path() + + err := db.Close() + require.NoError(t, err, "Close should succeed") + + // The data file should be 4 MiB now (expanded once from zero). + minimumSizeForTest := int64(1024 * 1024) + newSz := fileSize(path) + require.GreaterOrEqual(t, newSz, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", newSz, minimumSizeForTest) + + // Now try to re-open the database with an extremely small max size + t.Logf("Reopening bbolt DB at: %s", path) + db, err = btesting.OpenDBWithOption(t, path, &bolt.Options{ + MaxSize: 1, + InitialMmapSize: int(minimumSizeForTest) * 2, + }) + assert.NoError(t, err, "Should be able to open database bigger than MaxSize when InitialMmapSize set high") + + err = db.Close() + require.NoError(t, err, "Closing the re-opened database should succeed") +} + +// Ensure that when InitialMmapSize is above the limit, opening a database +// that is beyond the maximum size fails in Windows. +// In Windows, the file must be expanded to the mmap initial size. +// https://github.com/etcd-io/bbolt/issues/928 +func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) { + if runtime.GOOS != "windows" { + // This test is only relevant on Windows + t.SkipNow() + } + + // Open a data file. + db := createFilledDB(t, nil, 4*1024*1024, 2000) // adjust allocation jumps to 4 MiB, fill with 2000 1KB entries + path := db.Path() + + err := db.Close() + require.NoError(t, err, "Close should succeed") + + // The data file should be 4 MiB now (expanded once from zero). + minimumSizeForTest := int64(1024 * 1024) + newSz := fileSize(path) + assert.GreaterOrEqual(t, newSz, minimumSizeForTest, "unexpected new file size: %d. Expected at least %d", newSz, minimumSizeForTest) + + // Now try to re-open the database with an extremely small max size and + // an initial mmap size to be greater than the actual file size, forcing an illegal grow on open + t.Logf("Reopening bbolt DB at: %s", path) + _, err = btesting.OpenDBWithOption(t, path, &bolt.Options{ + MaxSize: 1, + InitialMmapSize: int(newSz) * 2, + }) + assert.Error(t, err, "Opening the DB with InitialMmapSize > MaxSize should cause an error on Windows") +} + func ExampleDB_Update() { // Open the database. db, err := bolt.Open(tempfile(), 0600, nil) diff --git a/errors/errors.go b/errors/errors.go index c115289e5..dbebd6330 100644 --- a/errors/errors.go +++ b/errors/errors.go @@ -69,6 +69,9 @@ var ( // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. ErrValueTooLarge = errors.New("value too large") + // ErrMaxSizeReached is returned when the configured maximum size of the data file is reached. + ErrMaxSizeReached = errors.New("database reached maximum size") + // ErrIncompatibleValue is returned when trying to create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. diff --git a/internal/btesting/btesting.go b/internal/btesting/btesting.go index c83369f09..3b3d23660 100644 --- a/internal/btesting/btesting.go +++ b/internal/btesting/btesting.go @@ -44,6 +44,13 @@ func MustCreateDBWithOption(t testing.TB, o *bolt.Options) *DB { } func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { + db, err := OpenDBWithOption(t, f, o) + require.NoError(t, err) + require.NotNil(t, db) + return db +} + +func OpenDBWithOption(t testing.TB, f string, o *bolt.Options) (*DB, error) { t.Logf("Opening bbolt DB at: %s", f) if o == nil { o = bolt.DefaultOptions @@ -57,7 +64,9 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { o.FreelistType = freelistType db, err := bolt.Open(f, 0600, o) - require.NoError(t, err) + if err != nil { + return nil, err + } resDB := &DB{ DB: db, f: f, @@ -66,7 +75,7 @@ func MustOpenDBWithOption(t testing.TB, f string, o *bolt.Options) *DB { } resDB.strictModeEnabledDefault() t.Cleanup(resDB.PostTestCleanup) - return resDB + return resDB, nil } func (db *DB) PostTestCleanup() { From 78840345814346f1fae72c63e2d49ad27d132022 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 15:07:46 +0000 Subject: [PATCH 358/439] build(deps): Bump golang.org/x/sync from 0.13.0 to 0.14.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.13.0 to 0.14.0. - [Commits](https://github.com/golang/sync/compare/v0.13.0...v0.14.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.14.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8c64f438a..3142229e5 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.13.0 + golang.org/x/sync v0.14.0 golang.org/x/sys v0.32.0 ) diff --git a/go.sum b/go.sum index 43aea69db..8b43a1484 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= -golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 675a3be11525d7faba36ef81ab297a70a24ceb39 Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Mon, 5 May 2025 17:41:48 +0000 Subject: [PATCH 359/439] chore: bump golangci-lint to v2.1.6 Signed-off-by: Matthieu MOREL --- .github/workflows/tests-template.yml | 4 +-- .github/workflows/tests_windows.yml | 4 +-- .golangci.yaml | 37 ++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 11 deletions(-) diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index aaa7134ea..a425447b3 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -50,6 +50,6 @@ jobs: ;; esac - name: golangci-lint - uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea # v6.5.1 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: - version: v1.64.5 + version: v2.1.6 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 8cccdce6e..f2173dbf0 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -40,9 +40,9 @@ jobs: esac shell: bash - name: golangci-lint - uses: golangci/golangci-lint-action@4696ba8babb6127d732c3c6dde519db15edab9ea # v6.5.1 + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: - version: v1.64.5 + version: v2.1.6 coverage: needs: ["test-windows"] diff --git a/.golangci.yaml b/.golangci.yaml index 822693aca..c066e249e 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,16 +1,39 @@ +formatters: + enable: + - gofmt + - goimports + settings: # please keep this alphabetized + goimports: + local-prefixes: + - go.etcd.io # Put imports beginning with prefix after 3rd-party packages. issues: max-same-issues: 0 linters: - disable-all: true + default: none enable: # please keep this alphabetized - errcheck - - gofmt - - goimports - - gosimple - govet - ineffassign - staticcheck - unused -linters-settings: # please keep this alphabetized - goimports: - local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages. + exclusions: + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + settings: # please keep this alphabetized + staticcheck: + checks: + - all + - -QF1003 # Convert if/else-if chain to tagged switch + - -QF1004 # Use strings.ReplaceAll instead of strings.Replace with n == -1 + - -QF1010 # Convert slice of bytes to string when printing it + - -QF1011 # Omit redundant type from variable declaration + - -ST1003 # Poorly chosen identifier + - -ST1005 # Incorrectly formatted error string + - -ST1006 # Poorly chosen receiver name + - -ST1012 # Poorly chosen name for error variable + - -ST1016 # Use consistent method receiver names + - -ST1023 # Redundant type in variable declaration +version: "2" From 028d0897b3d561d4f8cdf9819b9f387488fe905a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 5 May 2025 18:26:46 +0000 Subject: [PATCH 360/439] build(deps): Bump golang.org/x/sys from 0.32.0 to 0.33.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.32.0 to 0.33.0. - [Commits](https://github.com/golang/sys/compare/v0.32.0...v0.33.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.33.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 3142229e5..157bd0789 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.14.0 - golang.org/x/sys v0.32.0 + golang.org/x/sys v0.33.0 ) require ( diff --git a/go.sum b/go.sum index 8b43a1484..de26c3eb9 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 87e2705e388870b5c4ffb96a511445756e425c05 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 7 May 2025 14:28:40 +0200 Subject: [PATCH 361/439] Bump Go to 1.24.3 Signed-off-by: Mustafa Elbehery --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index e4a973f91..ae96cc731 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.2 +1.24.3 diff --git a/go.mod b/go.mod index 157bd0789..bde761ec7 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.2 +toolchain go1.24.3 require ( github.com/spf13/cobra v1.9.1 From 96234b3415f8e8bcb55beab05d6ee591d1fe2f64 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 May 2025 14:16:52 +0000 Subject: [PATCH 362/439] build(deps): Bump actions/setup-go from 5.4.0 to 5.5.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.4.0 to 5.5.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/0aaccfd150d50ccaeb58ebd88d36e91967a5f35b...d35c59abb061a4a6fb18e82ac0862c26744d6ab5) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 5.5.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_arm64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 7 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 6675fbd99..057286be1 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 34fce4a8c..ce626ca45 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: | diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 0f3ffec48..befe7dfe2 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: test-robustness diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index a425447b3..ad92c8c70 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index b0c21e7d7..7372dd7b2 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index 15f85f7e6..c89b322fc 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index f2173dbf0..54546e146 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5.4.0 + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make coverage From bf4a727b926666650ee58bf632ee12df6ec1f522 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 19 May 2025 21:10:53 +0200 Subject: [PATCH 363/439] fix QF1004, QF1011, ST1006, ST1016 and ST1023 issues Signed-off-by: Mustafa Elbehery Co-authored-by: Matthieu MOREL --- .golangci.yaml | 5 ----- cmd/bbolt/main.go | 6 +++--- internal/common/page.go | 10 +++++----- tests/robustness/powerfailure_test.go | 2 +- tx_check.go | 4 ++-- 5 files changed, 11 insertions(+), 16 deletions(-) diff --git a/.golangci.yaml b/.golangci.yaml index c066e249e..68fc13184 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -27,13 +27,8 @@ linters: checks: - all - -QF1003 # Convert if/else-if chain to tagged switch - - -QF1004 # Use strings.ReplaceAll instead of strings.Replace with n == -1 - -QF1010 # Convert slice of bytes to string when printing it - - -QF1011 # Omit redundant type from variable declaration - -ST1003 # Poorly chosen identifier - -ST1005 # Incorrectly formatted error string - - -ST1006 # Poorly chosen receiver name - -ST1012 # Poorly chosen name for error variable - - -ST1016 # Use consistent method receiver names - - -ST1023 # Redundant type in variable declaration version: "2" diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a9256a699..37324369d 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1768,11 +1768,11 @@ Additional options include: type cmdKvStringer struct{} -func (_ cmdKvStringer) KeyToString(key []byte) string { +func (cmdKvStringer) KeyToString(key []byte) string { return bytesToAsciiOrHex(key) } -func (_ cmdKvStringer) ValueToString(value []byte) string { +func (cmdKvStringer) ValueToString(value []byte) string { return bytesToAsciiOrHex(value) } @@ -1781,7 +1781,7 @@ func CmdKvStringer() bolt.KVStringer { } func findLastBucket(tx *bolt.Tx, bucketNames []string) (*bolt.Bucket, error) { - var lastbucket *bolt.Bucket = tx.Bucket([]byte(bucketNames[0])) + lastbucket := tx.Bucket([]byte(bucketNames[0])) if lastbucket == nil { return nil, berrors.ErrBucketNotFound } diff --git a/internal/common/page.go b/internal/common/page.go index ee808967c..4453160bb 100644 --- a/internal/common/page.go +++ b/internal/common/page.go @@ -335,16 +335,16 @@ func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } // Merge returns the sorted union of a and b. -func (a Pgids) Merge(b Pgids) Pgids { +func (s Pgids) Merge(b Pgids) Pgids { // Return the opposite slice if one is nil. - if len(a) == 0 { + if len(s) == 0 { return b } if len(b) == 0 { - return a + return s } - merged := make(Pgids, len(a)+len(b)) - Mergepgids(merged, a, b) + merged := make(Pgids, len(s)+len(b)) + Mergepgids(merged, s, b) return merged } diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index d8c497e0a..4d960b325 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -140,7 +140,7 @@ func TestRestartFromPowerFailureXFS(t *testing.T) { } func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfsOpt string, fsMountOpt string, useFailpoint bool) { - flakey := initFlakeyDevice(t, strings.Replace(t.Name(), "/", "_", -1), fsType, mkfsOpt, fsMountOpt) + flakey := initFlakeyDevice(t, strings.ReplaceAll(t.Name(), "/", "_"), fsType, mkfsOpt, fsMountOpt) root := flakey.RootFS() dbPath := filepath.Join(root, "boltdb") diff --git a/tx_check.go b/tx_check.go index c3ecbb975..59edf3573 100644 --- a/tx_check.go +++ b/tx_check.go @@ -281,10 +281,10 @@ func HexKVStringer() KVStringer { type hexKvStringer struct{} -func (_ hexKvStringer) KeyToString(key []byte) string { +func (hexKvStringer) KeyToString(key []byte) string { return hex.EncodeToString(key) } -func (_ hexKvStringer) ValueToString(value []byte) string { +func (hexKvStringer) ValueToString(value []byte) string { return hex.EncodeToString(value) } From 2064f0547321be6326b225c641b19f37c152a681 Mon Sep 17 00:00:00 2001 From: wangxiang Date: Thu, 22 May 2025 13:47:53 +0800 Subject: [PATCH 364/439] chore: use %w to wrap returned err by unix funcs Signed-off-by: wangxiang --- db.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/db.go b/db.go index 9e379ac34..43b3f4f9a 100644 --- a/db.go +++ b/db.go @@ -552,7 +552,7 @@ func (db *DB) munmap() error { // return errors.New(unmapError) if err := munmap(db); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) - return fmt.Errorf("unmap error: %v", err.Error()) + return fmt.Errorf("unmap error: %w", err) } return nil @@ -600,7 +600,7 @@ func (db *DB) munlock(fileSize int) error { // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) - return fmt.Errorf("munlock error: %v", err.Error()) + return fmt.Errorf("munlock error: %w", err) } return nil } @@ -610,7 +610,7 @@ func (db *DB) mlock(fileSize int) error { // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) - return fmt.Errorf("mlock error: %v", err.Error()) + return fmt.Errorf("mlock error: %w", err) } return nil } From 6ab5f84f9371733d542280bd505900f14fcc7886 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 12 Feb 2025 21:55:37 +0100 Subject: [PATCH 365/439] release: add release script Signed-off-by: Mustafa Elbehery --- scripts/release.sh | 69 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100755 scripts/release.sh diff --git a/scripts/release.sh b/scripts/release.sh new file mode 100755 index 000000000..723c51242 --- /dev/null +++ b/scripts/release.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +set -o errexit +set -o nounset +set -o pipefail + +# === Function Definitions === +function get_gpg_key { + local git_email + local key_id + + git_email=$(git config --get user.email) + key_id=$(gpg --list-keys --with-colons "${git_email}" | awk -F: '/^pub:/ { print $5 }') + if [[ -z "${key_id}" ]]; then + echo "Failed to load gpg key. Is gpg set up correctly for etcd releases?" + return 2 + fi + echo "${key_id}" +} + +# === Main Script Logic === +echo "enter release string according to semantic versioning (e.g. v1.2.3)." +read -r INPUT +if [[ ! "${INPUT}" =~ ^v[0-9]+.[0-9]+.[0-9]+ ]]; then + echo "Expected 'version' param of the form 'v..' but got '${INPUT}'" + exit 1 +fi + +VERSION=${INPUT#v} +RELEASE_VERSION="${VERSION}" +MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2) + +REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/bbolt.git"} +REMOTE="${REMOTE:-"origin"}" + +remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${INPUT}" || true) +if [ "${remote_tag_exists}" -gt 0 ]; then + echo "Release version tag exists on remote." + exit 1 +fi + +# ensuring the minor-version is identical. +source_version=$(grep -E "\s+Version\s*=" ./version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g") +if [[ "${source_version}" != "${RELEASE_VERSION}" ]]; then + source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2) + if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then + echo "Wrong bbolt minor version in version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting." + exit 1 + fi +fi + +# bump 'version.go'. +echo "Updating version from '${source_version}' to '${RELEASE_VERSION}' in 'version.go'" +sed -i "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + +# push 'version.go' to remote. +echo "committing 'version.go'" +git add ./version/version.go +git commit -s -m "Update version to ${VERSION}" +git push "${REMOTE}" "${INPUT}" +echo "'version.go' has been committed to remote repo." + +# create tag and push to remote. +echo "Creating new tag for '${INPUT}'" +key_id=$(get_gpg_key) || return 2 +git tag --local-user "${key_id}" --sign "${INPUT}" --message "${INPUT}" +git push "${REMOTE}" "${INPUT}" +echo "Tag '${INPUT}' has been created and pushed to remote repo." +echo "SUCCESS" From 624e8a28c39ddb20ee5d787fbaf2192fcc0128ae Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 5 Jun 2025 14:33:23 +0100 Subject: [PATCH 366/439] Add an unit test to reproduce the panick caused by huge value size $ go test -run TestDB_HugeValue -v seed: 44000 quick settings: count=5, items=1000, ksize=1024, vsize=1024 === RUN TestDB_HugeValue --- FAIL: TestDB_HugeValue (0.06s) panic: runtime error: slice bounds out of range [::268435459] with length 268435455 [recovered] panic: runtime error: slice bounds out of range [::268435459] with length 268435455 goroutine 7 [running]: testing.tRunner.func1.2({0x1031ae420, 0x14000016090}) /Users/wachao/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.24.3.darwin-arm64/src/testing/testing.go:1734 +0x1ac testing.tRunner.func1() /Users/wachao/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.24.3.darwin-arm64/src/testing/testing.go:1737 +0x334 panic({0x1031ae420?, 0x14000016090?}) /Users/wachao/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.24.3.darwin-arm64/src/runtime/panic.go:792 +0x124 go.etcd.io/bbolt/internal/common.UnsafeByteSlice(...) /Users/wachao/go/src/github.com/ahrtr/bbolt/internal/common/unsafe.go:26 go.etcd.io/bbolt/internal/common.WriteInodeToPage({0x14000104f80?, 0x1, 0x4?}, 0x14010210000) /Users/wachao/go/src/github.com/ahrtr/bbolt/internal/common/inode.go:81 +0x288 go.etcd.io/bbolt.(*node).write(0x1400017a000?, 0x4001?) /Users/wachao/go/src/github.com/ahrtr/bbolt/node.go:199 +0xa0 go.etcd.io/bbolt.(*node).spill(0x1400014e0e0) /Users/wachao/go/src/github.com/ahrtr/bbolt/node.go:334 +0x1dc go.etcd.io/bbolt.(*Bucket).spill(0x14000104f40) /Users/wachao/go/src/github.com/ahrtr/bbolt/bucket.go:786 +0x278 go.etcd.io/bbolt.(*Bucket).spill(0x1400017a018) /Users/wachao/go/src/github.com/ahrtr/bbolt/bucket.go:753 +0xc0 go.etcd.io/bbolt.(*Tx).Commit(0x1400017a000) /Users/wachao/go/src/github.com/ahrtr/bbolt/tx.go:204 +0x260 go.etcd.io/bbolt.(*DB).Update(0x1031cdf90?, 0x1400007cf28) /Users/wachao/go/src/github.com/ahrtr/bbolt/db.go:922 +0xc4 go.etcd.io/bbolt_test.TestDB_HugeValue(0x14000003c00) /Users/wachao/go/src/github.com/ahrtr/bbolt/db_test.go:1560 +0x110 testing.tRunner(0x14000003c00, 0x1031c9ef0) /Users/wachao/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.24.3.darwin-arm64/src/testing/testing.go:1792 +0xe4 created by testing.(*T).Run in goroutine 1 /Users/wachao/go/pkg/mod/golang.org/toolchain@v0.0.1-go1.24.3.darwin-arm64/src/testing/testing.go:1851 +0x374 exit status 2 FAIL go.etcd.io/bbolt 0.285s Signed-off-by: Benjamin Wang --- db_test.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/db_test.go b/db_test.go index 53d877e11..c6d604488 100644 --- a/db_test.go +++ b/db_test.go @@ -1547,6 +1547,27 @@ func TestDB_MaxSizeExceededDoesNotGrow(t *testing.T) { assert.Error(t, err, "Opening the DB with InitialMmapSize > MaxSize should cause an error on Windows") } +func TestDB_HugeValue(t *testing.T) { + dbPath := filepath.Join(t.TempDir(), "db") + db, err := bolt.Open(dbPath, 0600, nil) + require.NoError(t, err) + defer func() { + require.NoError(t, db.Close()) + }() + + data := make([]byte, 0xFFFFFFF+1) + + _ = db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("data")) + require.NoError(t, err) + + err = b.Put([]byte("key"), data) + require.NoError(t, err) + + return nil + }) +} + func ExampleDB_Update() { // Open the database. db, err := bolt.Open(tempfile(), 0600, nil) From 8f3c534f9a634679ff9b5e079afcc7a58420c103 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 5 Jun 2025 15:11:34 +0100 Subject: [PATCH 367/439] Move MaxMapSize and MaxAllocSize into internal/common Signed-off-by: Benjamin Wang --- bolt_386.go | 7 ------- bolt_amd64.go | 7 ------- bolt_arm.go | 7 ------- bolt_arm64.go | 9 --------- bolt_loong64.go | 9 --------- bolt_mips64x.go | 9 --------- bolt_mipsx.go | 9 --------- bolt_ppc.go | 9 --------- bolt_ppc64.go | 9 --------- bolt_ppc64le.go | 9 --------- bolt_riscv64.go | 9 --------- bolt_s390x.go | 9 --------- bolt_unix.go | 3 ++- bolt_windows.go | 3 ++- db.go | 8 ++++---- internal/common/bolt_386.go | 7 +++++++ internal/common/bolt_amd64.go | 7 +++++++ internal/common/bolt_arm.go | 7 +++++++ internal/common/bolt_arm64.go | 9 +++++++++ internal/common/bolt_loong64.go | 9 +++++++++ internal/common/bolt_mips64x.go | 9 +++++++++ internal/common/bolt_mipsx.go | 9 +++++++++ internal/common/bolt_ppc.go | 9 +++++++++ internal/common/bolt_ppc64.go | 9 +++++++++ internal/common/bolt_ppc64le.go | 9 +++++++++ internal/common/bolt_riscv64.go | 9 +++++++++ internal/common/bolt_s390x.go | 9 +++++++++ internal/common/types.go | 3 --- internal/common/unsafe.go | 2 +- tx.go | 4 ++-- 30 files changed, 113 insertions(+), 114 deletions(-) delete mode 100644 bolt_386.go delete mode 100644 bolt_amd64.go delete mode 100644 bolt_arm.go delete mode 100644 bolt_arm64.go delete mode 100644 bolt_loong64.go delete mode 100644 bolt_mips64x.go delete mode 100644 bolt_mipsx.go delete mode 100644 bolt_ppc.go delete mode 100644 bolt_ppc64.go delete mode 100644 bolt_ppc64le.go delete mode 100644 bolt_riscv64.go delete mode 100644 bolt_s390x.go create mode 100644 internal/common/bolt_386.go create mode 100644 internal/common/bolt_amd64.go create mode 100644 internal/common/bolt_arm.go create mode 100644 internal/common/bolt_arm64.go create mode 100644 internal/common/bolt_loong64.go create mode 100644 internal/common/bolt_mips64x.go create mode 100644 internal/common/bolt_mipsx.go create mode 100644 internal/common/bolt_ppc.go create mode 100644 internal/common/bolt_ppc64.go create mode 100644 internal/common/bolt_ppc64le.go create mode 100644 internal/common/bolt_riscv64.go create mode 100644 internal/common/bolt_s390x.go diff --git a/bolt_386.go b/bolt_386.go deleted file mode 100644 index aee25960f..000000000 --- a/bolt_386.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/bolt_amd64.go b/bolt_amd64.go deleted file mode 100644 index 5dd8f3f2a..000000000 --- a/bolt_amd64.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_arm.go b/bolt_arm.go deleted file mode 100644 index aee25960f..000000000 --- a/bolt_arm.go +++ /dev/null @@ -1,7 +0,0 @@ -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/bolt_arm64.go b/bolt_arm64.go deleted file mode 100644 index 2c67ab10c..000000000 --- a/bolt_arm64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build arm64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_loong64.go b/bolt_loong64.go deleted file mode 100644 index 1ef2145c6..000000000 --- a/bolt_loong64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build loong64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_mips64x.go b/bolt_mips64x.go deleted file mode 100644 index f28a0512a..000000000 --- a/bolt_mips64x.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build mips64 || mips64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x8000000000 // 512GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_mipsx.go b/bolt_mipsx.go deleted file mode 100644 index 708fccdc0..000000000 --- a/bolt_mipsx.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build mips || mipsle - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x40000000 // 1GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/bolt_ppc.go b/bolt_ppc.go deleted file mode 100644 index 6a21cf33c..000000000 --- a/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/bolt_ppc64.go b/bolt_ppc64.go deleted file mode 100644 index a32f24622..000000000 --- a/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_ppc64le.go b/bolt_ppc64le.go deleted file mode 100644 index 8fb60dddc..000000000 --- a/bolt_ppc64le.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build ppc64le - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_riscv64.go b/bolt_riscv64.go deleted file mode 100644 index a63d26ab2..000000000 --- a/bolt_riscv64.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build riscv64 - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_s390x.go b/bolt_s390x.go deleted file mode 100644 index 749ea97e3..000000000 --- a/bolt_s390x.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build s390x - -package bbolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/bolt_unix.go b/bolt_unix.go index d1922c2d9..f68e721f5 100644 --- a/bolt_unix.go +++ b/bolt_unix.go @@ -11,6 +11,7 @@ import ( "golang.org/x/sys/unix" "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. @@ -67,7 +68,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/bolt_windows.go b/bolt_windows.go index bba0f8809..f810ac313 100644 --- a/bolt_windows.go +++ b/bolt_windows.go @@ -10,6 +10,7 @@ import ( "golang.org/x/sys/windows" "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // fdatasync flushes written data to a file descriptor. @@ -108,7 +109,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/db.go b/db.go index 9e379ac34..394873d2b 100644 --- a/db.go +++ b/db.go @@ -131,7 +131,7 @@ type DB struct { // always fails on Windows platform. //nolint dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte + data *[common.MaxMapSize]byte datasz int meta0 *common.Meta meta1 *common.Meta @@ -570,7 +570,7 @@ func (db *DB) mmapSize(size int) (int, error) { } // Verify the requested size is not above the maximum allowed. - if size > maxMapSize { + if size > common.MaxMapSize { return 0, errors.New("mmap too large") } @@ -588,8 +588,8 @@ func (db *DB) mmapSize(size int) (int, error) { } // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize + if sz > common.MaxMapSize { + sz = common.MaxMapSize } return int(sz), nil diff --git a/internal/common/bolt_386.go b/internal/common/bolt_386.go new file mode 100644 index 000000000..773175de3 --- /dev/null +++ b/internal/common/bolt_386.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/internal/common/bolt_amd64.go b/internal/common/bolt_amd64.go new file mode 100644 index 000000000..9f27d9199 --- /dev/null +++ b/internal/common/bolt_amd64.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_arm.go b/internal/common/bolt_arm.go new file mode 100644 index 000000000..773175de3 --- /dev/null +++ b/internal/common/bolt_arm.go @@ -0,0 +1,7 @@ +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/internal/common/bolt_arm64.go b/internal/common/bolt_arm64.go new file mode 100644 index 000000000..9022f6bca --- /dev/null +++ b/internal/common/bolt_arm64.go @@ -0,0 +1,9 @@ +//go:build arm64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_loong64.go b/internal/common/bolt_loong64.go new file mode 100644 index 000000000..31277523c --- /dev/null +++ b/internal/common/bolt_loong64.go @@ -0,0 +1,9 @@ +//go:build loong64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_mips64x.go b/internal/common/bolt_mips64x.go new file mode 100644 index 000000000..d930f4edd --- /dev/null +++ b/internal/common/bolt_mips64x.go @@ -0,0 +1,9 @@ +//go:build mips64 || mips64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x8000000000 // 512GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_mipsx.go b/internal/common/bolt_mipsx.go new file mode 100644 index 000000000..8b1934368 --- /dev/null +++ b/internal/common/bolt_mipsx.go @@ -0,0 +1,9 @@ +//go:build mips || mipsle + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x40000000 // 1GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/internal/common/bolt_ppc.go b/internal/common/bolt_ppc.go new file mode 100644 index 000000000..a374e1406 --- /dev/null +++ b/internal/common/bolt_ppc.go @@ -0,0 +1,9 @@ +//go:build ppc + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0x7FFFFFFF // 2GB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0xFFFFFFF diff --git a/internal/common/bolt_ppc64.go b/internal/common/bolt_ppc64.go new file mode 100644 index 000000000..80288a83a --- /dev/null +++ b/internal/common/bolt_ppc64.go @@ -0,0 +1,9 @@ +//go:build ppc64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_ppc64le.go b/internal/common/bolt_ppc64le.go new file mode 100644 index 000000000..77561d687 --- /dev/null +++ b/internal/common/bolt_ppc64le.go @@ -0,0 +1,9 @@ +//go:build ppc64le + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_riscv64.go b/internal/common/bolt_riscv64.go new file mode 100644 index 000000000..2a876e5f7 --- /dev/null +++ b/internal/common/bolt_riscv64.go @@ -0,0 +1,9 @@ +//go:build riscv64 + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/bolt_s390x.go b/internal/common/bolt_s390x.go new file mode 100644 index 000000000..982cb7558 --- /dev/null +++ b/internal/common/bolt_s390x.go @@ -0,0 +1,9 @@ +//go:build s390x + +package common + +// MaxMapSize represents the largest mmap size supported by Bolt. +const MaxMapSize = 0xFFFFFFFFFFFF // 256TB + +// MaxAllocSize is the size used when creating array pointers. +const MaxAllocSize = 0x7FFFFFFF diff --git a/internal/common/types.go b/internal/common/types.go index 8ad8279a0..18d6d69c2 100644 --- a/internal/common/types.go +++ b/internal/common/types.go @@ -17,9 +17,6 @@ const Magic uint32 = 0xED0CDAED const PgidNoFreelist Pgid = 0xffffffffffffffff -// DO NOT EDIT. Copied from the "bolt" package. -const pageMaxAllocSize = 0xFFFFFFF - // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes diff --git a/internal/common/unsafe.go b/internal/common/unsafe.go index 9b77dd7b2..740ffc707 100644 --- a/internal/common/unsafe.go +++ b/internal/common/unsafe.go @@ -23,5 +23,5 @@ func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] + return (*[MaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/tx.go b/tx.go index 7b5db7727..5eb383c4b 100644 --- a/tx.go +++ b/tx.go @@ -495,8 +495,8 @@ func (tx *Tx) write() error { // Write out page in "max allocation" sized chunks. for { sz := rem - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 + if sz > common.MaxAllocSize-1 { + sz = common.MaxAllocSize - 1 } buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) From d5b5bbcd8c0f67800c0ce00749f6805cf30502bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 14:43:16 +0000 Subject: [PATCH 368/439] build(deps): Bump golang.org/x/sync from 0.14.0 to 0.15.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.14.0 to 0.15.0. - [Commits](https://github.com/golang/sync/compare/v0.14.0...v0.15.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.15.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index bde761ec7..0c6915bb4 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.14.0 + golang.org/x/sync v0.15.0 golang.org/x/sys v0.33.0 ) diff --git a/go.sum b/go.sum index de26c3eb9..10457fcd4 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= -golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= +golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From d18aaf8060153957bc04801cd2bb571c7af621cb Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 29 May 2025 22:10:34 +0300 Subject: [PATCH 369/439] db: drop unused txs list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Seems like it was more useful before 263e75d0594f5397905c21e17cd34b68d68441d5, but now it's only used for statistics which can easily be managed in a different way. I see no other valid purposes for this list, a reference can have some value for GC, but if DB user loses a reference to transaction that is not closed there is not much DB can do. This improves ConcurrentView test from workers samples min avg 50% 80% 90% max 1 10 49.323µs 974.287µs 1.068978ms 1.112882ms 1.131938ms 1.131938ms 10 100 32.592µs 685.315µs 980.5µs 1.125385ms 1.137678ms 1.169789ms 100 1000 31.49µs 219.084µs 77.427µs 353.651µs 656.916µs 1.785808ms 1000 10000 30.668µs 1.639366ms 99.128µs 3.086665ms 5.031354ms 16.315849ms 10000 100000 30.818µs 40.893475ms 36.963667ms 78.650583ms 111.553136ms 302.412177ms to workers samples min avg 50% 80% 90% max 1 10 78.358µs 964.847µs 1.059159ms 1.073256ms 1.07551ms 1.07551ms 10 100 32.802µs 304.922µs 80.924µs 674.54µs 1.069298ms 1.220625ms 100 1000 30.758µs 304.541µs 64.192µs 397.094µs 1.101991ms 2.183302ms 1000 10000 30.558µs 1.05711ms 92.426µs 2.111896ms 3.317894ms 11.790014ms 10000 100000 30.548µs 10.98898ms 90.742µs 21.740659ms 33.020076ms 135.33094ms Signed-off-by: Roman Khimov --- db.go | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/db.go b/db.go index 6d5dbb05b..280ddc273 100644 --- a/db.go +++ b/db.go @@ -138,7 +138,6 @@ type DB struct { pageSize int opened bool rwtx *Tx - txs []*Tx freelist fl.Interface freelistLoad sync.Once @@ -801,9 +800,6 @@ func (db *DB) beginTx() (*Tx, error) { t := &Tx{} t.init(db) - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) if db.freelist != nil { db.freelist.AddReadonlyTXID(t.meta.Txid()) } @@ -814,7 +810,7 @@ func (db *DB) beginTx() (*Tx, error) { // Update the transaction stats. db.statlock.Lock() db.stats.TxN++ - db.stats.OpenTxN = n + db.stats.OpenTxN++ db.statlock.Unlock() return t, nil @@ -863,17 +859,6 @@ func (db *DB) removeTx(tx *Tx) { // Use the meta lock to restrict access to the DB object. db.metalock.Lock() - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - last := len(db.txs) - 1 - db.txs[i] = db.txs[last] - db.txs[last] = nil - db.txs = db.txs[:last] - break - } - } - n := len(db.txs) if db.freelist != nil { db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) } @@ -883,7 +868,7 @@ func (db *DB) removeTx(tx *Tx) { // Merge statistics. db.statlock.Lock() - db.stats.OpenTxN = n + db.stats.OpenTxN-- db.stats.TxStats.add(&tx.stats) db.statlock.Unlock() } From fea0c9a84009590de7e0719b73464ff4fc3176f7 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 10 Jun 2025 15:03:52 +0100 Subject: [PATCH 370/439] Update changelog for v1.4.1 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index f5cb940d7..58d470458 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,15 @@
+## v1.4.1(2025-06-10) + +### BoltDB +- [Correct the incorrect usage of debug method](https://github.com/etcd-io/bbolt/pull/905) +- [Add clarification on the option `InitialMmapSize`](https://github.com/etcd-io/bbolt/pull/943) +- [Fix the crash when writing huge values](https://github.com/etcd-io/bbolt/pull/978) + +
+ ## v1.4.0(2025-02-05) There isn't any production code change since v1.4.0-beta.0. Only some dependencies are bumped, also updated some typos in comment and readme, and removed the legacy From e39824005968b7a36edd99e6bc374ca9a3a82023 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 11 Jun 2025 09:38:03 -0700 Subject: [PATCH 371/439] Run ARM64 test workflow on Ubuntu ARM The test ARM64 workflow was running on a regular Ubuntu machine. Specify the ARM suffix to ensure that the tests run on the ARM architecture. Signed-off-by: Ivan Valdes --- .github/workflows/tests_arm64.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index c89b322fc..ceeb4fd96 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -5,6 +5,8 @@ on: [push, pull_request] jobs: test-linux-arm64: uses: ./.github/workflows/tests-template.yml + with: + runs-on: ubuntu-24.04-arm test-linux-arm64-race: uses: ./.github/workflows/tests-template.yml with: From b5df4c6cd7bd96248ba22689b706f3d1a2d88141 Mon Sep 17 00:00:00 2001 From: Gang Li Date: Tue, 10 Jun 2025 22:02:46 +0000 Subject: [PATCH 372/439] add page-size and initial-mmap-size flag to bench cmd Signed-off-by: Gang Li --- cmd/bbolt/main.go | 41 ++++++++++++++++++++++++----------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 37324369d..04050fe50 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1019,7 +1019,10 @@ func (cmd *benchCommand) Run(args ...string) error { } // Create database. - db, err := bolt.Open(options.Path, 0600, nil) + dbOptions := *bolt.DefaultOptions + dbOptions.PageSize = options.PageSize + dbOptions.InitialMmapSize = options.InitialMmapSize + db, err := bolt.Open(options.Path, 0600, &dbOptions) if err != nil { return err } @@ -1095,6 +1098,8 @@ func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { fs.BoolVar(&options.Work, "work", false, "") fs.StringVar(&options.Path, "path", "", "") fs.BoolVar(&options.GoBenchOutput, "gobench-output", false, "") + fs.IntVar(&options.PageSize, "page-size", common.DefaultPageSize, "Set page size in bytes.") + fs.IntVar(&options.InitialMmapSize, "initial-mmap-size", 0, "Set initial mmap size in bytes for database file.") fs.SetOutput(cmd.Stderr) if err := fs.Parse(args); err != nil { return nil, err @@ -1559,22 +1564,24 @@ func (cmd *benchCommand) stopProfiling() { // BenchOptions represents the set of options that can be passed to "bolt bench". type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int64 - BatchSize int64 - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string - GoBenchOutput bool + ProfileMode string + WriteMode string + ReadMode string + Iterations int64 + BatchSize int64 + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string + StatsInterval time.Duration + FillPercent float64 + NoSync bool + Work bool + Path string + GoBenchOutput bool + PageSize int + InitialMmapSize int } // BenchResults represents the performance results of the benchmark and is thread-safe. From 84f17a44e86cf63a982f3a769cdf3372c9db4de3 Mon Sep 17 00:00:00 2001 From: hwdef Date: Thu, 12 Jun 2025 16:32:32 +0800 Subject: [PATCH 373/439] Bump Go to 1.24.4 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index ae96cc731..2f4320f67 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.3 +1.24.4 diff --git a/go.mod b/go.mod index 0c6915bb4..995366134 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.3 +toolchain go1.24.4 require ( github.com/spf13/cobra v1.9.1 From 87d0cf7deba0803ce7e07445f63f18a88fca8482 Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 29 May 2025 22:20:58 +0300 Subject: [PATCH 374/439] db: make statistics optional MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I think most Bolt users never care about this data, so we're just wasting time for nothing. This is also one of the exclusive locks that we have on the View() path. While this patch doesn't change much on its own, because the other lock is still here (subject to a different patch), once that lock is removed the difference in concurrent View() test is pretty clear. With NoStatistics=false: workers samples min avg 50% 80% 90% max 1 10 123.905µs 969.042µs 1.062529ms 1.065585ms 1.071537ms 1.071537ms 10 100 34.636µs 178.176µs 89.7µs 110.439µs 943.753µs 1.055165ms 100 1000 31.79µs 280.166µs 51.358µs 526.992µs 1.034306ms 2.47819ms 1000 10000 30.608µs 818.098µs 86.464µs 935.799µs 2.681115ms 10.595186ms 10000 100000 30.569µs 3.060826ms 64.132µs 6.56151ms 11.199984ms 64.855384ms NoStatistics=true: workers samples min avg 50% 80% 90% max 1 10 68.049µs 962.039µs 1.060335ms 1.064633ms 1.066087ms 1.066087ms 10 100 34.846µs 315.346µs 90.943µs 862.499µs 1.00516ms 1.08366ms 100 1000 31.45µs 225.53µs 36.88µs 236.63µs 939.115µs 1.466286ms 1000 10000 30.539µs 207.383µs 43.643µs 110.841µs 408.146µs 5.689001ms 10000 100000 30.488µs 152.603µs 39.636µs 90.622µs 145.266µs 9.28235ms The default behavior is kept for compatibility. In future the option can be extended to avoid collecting transaction statistics as well. Now that stats is a pointer we can also revert a part of 26f89a595140f163a4e8a7c86b689990f6335788 and make the structure cleaner. Signed-off-by: Roman Khimov --- db.go | 54 ++++++++++++++++++++++++++++++++++-------------------- tx.go | 16 +++++++++------- 2 files changed, 43 insertions(+), 27 deletions(-) diff --git a/db.go b/db.go index 280ddc273..5d3e26496 100644 --- a/db.go +++ b/db.go @@ -36,12 +36,6 @@ const ( // All data access is performed through transactions which can be obtained through the DB. // All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. type DB struct { - // Put `stats` at the first field to ensure it's 64-bit aligned. Note that - // the first word in an allocated struct can be relied upon to be 64-bit - // aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. Also - // refer to discussion in https://github.com/etcd-io/bbolt/issues/577. - stats Stats - // When enabled, the database will perform a Check() after every commit. // A panic is issued if the database is in an inconsistent state. This // flag has a large performance impact so it should only be used for @@ -138,6 +132,7 @@ type DB struct { pageSize int opened bool rwtx *Tx + stats *Stats freelist fl.Interface freelistLoad sync.Once @@ -203,6 +198,10 @@ func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { db.MaxBatchDelay = common.DefaultMaxBatchDelay db.AllocSize = common.DefaultAllocSize + if !options.NoStatistics { + db.stats = new(Stats) + } + if options.Logger == nil { db.logger = getDiscardLogger() } else { @@ -430,7 +429,9 @@ func (db *DB) loadFreelist() { // Read free list from freelist page. db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.FreeCount() + if db.stats != nil { + db.stats.FreePageN = db.freelist.FreeCount() + } }) } @@ -808,10 +809,12 @@ func (db *DB) beginTx() (*Tx, error) { db.metalock.Unlock() // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN++ - db.statlock.Unlock() + if db.stats != nil { + db.statlock.Lock() + db.stats.TxN++ + db.stats.OpenTxN++ + db.statlock.Unlock() + } return t, nil } @@ -867,10 +870,12 @@ func (db *DB) removeTx(tx *Tx) { db.metalock.Unlock() // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN-- - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() + if db.stats != nil { + db.statlock.Lock() + db.stats.OpenTxN-- + db.stats.TxStats.add(&tx.stats) + db.statlock.Unlock() + } } // Update executes a function within the context of a read-write managed transaction. @@ -1088,9 +1093,13 @@ func (db *DB) Sync() (err error) { // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats + var s Stats + if db.stats != nil { + db.statlock.RLock() + s = *db.stats + db.statlock.RUnlock() + } + return s } // This is for internal access to the raw data bytes from the C cursor, use @@ -1340,6 +1349,11 @@ type Options struct { // Logger is the logger used for bbolt. Logger Logger + + // NoStatistics turns off statistics collection, Stats method will + // return empty structure in this case. This can be beneficial for + // performance under high-concurrency read-only transactions. + NoStatistics bool } func (o *Options) String() string { @@ -1347,8 +1361,8 @@ func (o *Options) String() string { return "{}" } - return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, MaxSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", - o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.MaxSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, MaxSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p, NoStatistics: %t}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.MaxSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger, o.NoStatistics) } diff --git a/tx.go b/tx.go index 5eb383c4b..38d34f8af 100644 --- a/tx.go +++ b/tx.go @@ -357,13 +357,15 @@ func (tx *Tx) close() { tx.db.rwlock.Unlock() // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() + if tx.db.stats != nil { + tx.db.statlock.Lock() + tx.db.stats.FreePageN = freelistFreeN + tx.db.stats.PendingPageN = freelistPendingN + tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize + tx.db.stats.FreelistInuse = freelistAlloc + tx.db.stats.TxStats.add(&tx.stats) + tx.db.statlock.Unlock() + } } else { tx.db.removeTx(tx) } From f2297c6fad41e9cefbcf216234d39d673f571375 Mon Sep 17 00:00:00 2001 From: Asutorufa <16442314+Asutorufa@users.noreply.github.com> Date: Fri, 13 Jun 2025 11:58:56 +0800 Subject: [PATCH 375/439] fix maxMapSize typo in aix, android, solaris Signed-off-by: Asutorufa <16442314+Asutorufa@users.noreply.github.com> --- bolt_aix.go | 3 ++- bolt_android.go | 3 ++- bolt_solaris.go | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/bolt_aix.go b/bolt_aix.go index 4b424ed4c..af37741da 100644 --- a/bolt_aix.go +++ b/bolt_aix.go @@ -8,6 +8,7 @@ import ( "time" "unsafe" + "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" ) @@ -69,7 +70,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/bolt_android.go b/bolt_android.go index 11890f0d7..1d095ae9b 100644 --- a/bolt_android.go +++ b/bolt_android.go @@ -6,6 +6,7 @@ import ( "time" "unsafe" + "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" ) @@ -69,7 +70,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } diff --git a/bolt_solaris.go b/bolt_solaris.go index babad6578..9fe1e7e92 100644 --- a/bolt_solaris.go +++ b/bolt_solaris.go @@ -6,6 +6,7 @@ import ( "time" "unsafe" + "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" ) @@ -67,7 +68,7 @@ func mmap(db *DB, sz int) error { // Save the original byte slice and convert to a byte array pointer. db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0])) db.datasz = sz return nil } From 01931e86ec834330b442f4f721840b820a4d7561 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 15 Jun 2025 19:39:02 +0100 Subject: [PATCH 376/439] Update changelog for 1.4.2 and 1.5.0 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 7 +++++++ CHANGELOG/CHANGELOG-1.5.md | 10 ++++++++++ 2 files changed, 17 insertions(+) create mode 100644 CHANGELOG/CHANGELOG-1.5.md diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 58d470458..cab08b859 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,13 @@
+## v1.4.2(TBD) + +### BoltDB +- [Fix the compilation issue on aix, android and solaris due to wrong use of `maxMapSize`](https://github.com/etcd-io/bbolt/pull/990) + +
+ ## v1.4.1(2025-06-10) ### BoltDB diff --git a/CHANGELOG/CHANGELOG-1.5.md b/CHANGELOG/CHANGELOG-1.5.md new file mode 100644 index 000000000..efa387aa9 --- /dev/null +++ b/CHANGELOG/CHANGELOG-1.5.md @@ -0,0 +1,10 @@ +
+ +## v1.5.0(TBD) + +### BoltDB +- [Add support for data file size limit](https://github.com/etcd-io/bbolt/pull/929) +- [Remove the unused txs list](https://github.com/etcd-io/bbolt/pull/973) +- [Add option `NoStatistics` to make the statistics optional](https://github.com/etcd-io/bbolt/pull/977) + +
\ No newline at end of file From 55e2494a558c41c1def7879ae0973746ff5d04f0 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sun, 15 Jun 2025 21:06:25 +0200 Subject: [PATCH 377/439] chore(CI): add goimports to linter Signed-off-by: Mustafa Elbehery --- .github/workflows/failpoint_test.yaml | 4 ++++ .github/workflows/robustness_template.yaml | 4 ++++ .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 4 ++++ .github/workflows/tests_arm64.yaml | 4 ++++ .github/workflows/tests_windows.yml | 6 +++++- .golangci.yaml | 6 ++++++ bolt_aix.go | 3 ++- bolt_android.go | 3 ++- bolt_solaris.go | 3 ++- cmd/bbolt/main_test.go | 5 ++--- movebucket_test.go | 4 ++-- tests/dmflakey/dmflakey_test.go | 4 ++-- tests/robustness/powerfailure_test.go | 4 ++-- 14 files changed, 42 insertions(+), 14 deletions(-) diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index ce626ca45..17546a8e7 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -15,6 +15,10 @@ jobs: - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 - run: | make gofail-enable make test-failpoint diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index befe7dfe2..f742476ae 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -29,6 +29,10 @@ jobs: - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 - name: test-robustness run: | set -euo pipefail diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index ad92c8c70..c39e701ea 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -49,7 +49,7 @@ jobs: exit 1 ;; esac - - name: golangci-lint + - name: Run golangci-lint uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: v2.1.6 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 7372dd7b2..ae80142d5 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -23,4 +23,8 @@ jobs: - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 - run: make coverage diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index ceeb4fd96..195786817 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -25,4 +25,8 @@ jobs: - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 - run: make coverage diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 54546e146..79f9163c5 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -39,7 +39,7 @@ jobs: ;; esac shell: bash - - name: golangci-lint + - name: Run golangci-lint uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 with: version: v2.1.6 @@ -54,4 +54,8 @@ jobs: - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 with: go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 - run: make coverage diff --git a/.golangci.yaml b/.golangci.yaml index 68fc13184..bef3f6de7 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,8 +1,14 @@ formatters: enable: + - gci - gofmt - goimports settings: # please keep this alphabetized + gci: + sections: + - standard + - default + - prefix(go.etcd.io) goimports: local-prefixes: - go.etcd.io # Put imports beginning with prefix after 3rd-party packages. diff --git a/bolt_aix.go b/bolt_aix.go index af37741da..596e54060 100644 --- a/bolt_aix.go +++ b/bolt_aix.go @@ -8,8 +8,9 @@ import ( "time" "unsafe" - "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. diff --git a/bolt_android.go b/bolt_android.go index 1d095ae9b..ac64fcf5b 100644 --- a/bolt_android.go +++ b/bolt_android.go @@ -6,8 +6,9 @@ import ( "time" "unsafe" - "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. diff --git a/bolt_solaris.go b/bolt_solaris.go index 9fe1e7e92..56b2ccab4 100644 --- a/bolt_solaris.go +++ b/bolt_solaris.go @@ -6,8 +6,9 @@ import ( "time" "unsafe" - "go.etcd.io/bbolt/internal/common" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/internal/common" ) // flock acquires an advisory lock on a file descriptor. diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 727b38f55..173d6595f 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -14,14 +14,13 @@ import ( "sync" "testing" - "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/guts_cli" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/guts_cli" ) // Ensure the "info" command can print information about a database. diff --git a/movebucket_test.go b/movebucket_test.go index a04e24c9c..9c09825e6 100644 --- a/movebucket_test.go +++ b/movebucket_test.go @@ -7,11 +7,11 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/require" + "go.etcd.io/bbolt" "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" - - "github.com/stretchr/testify/require" ) func TestTx_MoveBucket(t *testing.T) { diff --git a/tests/dmflakey/dmflakey_test.go b/tests/dmflakey/dmflakey_test.go index 99e2de062..9e4229534 100644 --- a/tests/dmflakey/dmflakey_test.go +++ b/tests/dmflakey/dmflakey_test.go @@ -12,11 +12,11 @@ import ( "testing" "time" - testutils "go.etcd.io/bbolt/tests/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" + + testutils "go.etcd.io/bbolt/tests/utils" ) func TestMain(m *testing.M) { diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index 4d960b325..54c611cbf 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -19,11 +19,11 @@ import ( "testing" "time" - "go.etcd.io/bbolt/tests/dmflakey" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/tests/dmflakey" ) var panicFailpoints = []string{ From 249746fef43a12e0b5368d98006be51d3afa4d3a Mon Sep 17 00:00:00 2001 From: Roman Khimov Date: Thu, 29 May 2025 23:46:58 +0300 Subject: [PATCH 378/439] tx: add missing lock on meta page update metalock is supposed to protect meta page, but it looks like the only place where we're modifying it is not protected in fact. Since page update is not atomic a concurrent reader (RO transaction) can get an inconsistent page. It's likely to fall back to the other one in this case, but still we better not allow this to happen. Signed-off-by: Roman Khimov --- tx.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tx.go b/tx.go index 5eb383c4b..7123ded8f 100644 --- a/tx.go +++ b/tx.go @@ -561,10 +561,13 @@ func (tx *Tx) writeMeta() error { tx.meta.Write(p) // Write the meta page to file. + tx.db.metalock.Lock() if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + tx.db.metalock.Unlock() lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } + tx.db.metalock.Unlock() if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { From 8723401c197dfa19c7dda35093360dc6dbe296e0 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 12 Jun 2025 14:00:04 +0200 Subject: [PATCH 379/439] chore(CI): Add QEMU to CI Workflow Signed-off-by: Mustafa Elbehery --- .github/workflows/cross-arch-test.yaml | 135 +++++++++++++++++++++++++ 1 file changed, 135 insertions(+) create mode 100644 .github/workflows/cross-arch-test.yaml diff --git a/.github/workflows/cross-arch-test.yaml b/.github/workflows/cross-arch-test.yaml new file mode 100644 index 000000000..197b259df --- /dev/null +++ b/.github/workflows/cross-arch-test.yaml @@ -0,0 +1,135 @@ +--- +name: Cross-Platform Build Tests + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + build-aix: + strategy: + matrix: + goarch: [ppc64] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for aix/${{ matrix.goarch }} + run: | + GOOS=aix GOARCH=${{ matrix.GOARCH }} go build ./... + + build-android: + strategy: + matrix: + goarch: [arm64] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for android/${{ matrix.goarch }} + run: | + GOOS=android GOARCH=${{ matrix.goarch }} go build ./... + + build-linux: + strategy: + matrix: + goarch: [386, amd64, arm, arm64, loong64, mips, mips64, mips64le, mipsle, ppc64, ppc64le, riscv64, s390x] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for linux/${{ matrix.goarch }} + run: | + GOOS=linux GOARCH=${{ matrix.GOARCH }} go build ./... + + build-openbsd: + strategy: + matrix: + goarch: [386, amd64, arm, arm64, ppc64, riscv64] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for openbsd/${{ matrix.goarch }} + run: | + GOOS=openbsd GOARCH=${{ matrix.GOARCH }} go build ./... + + build-solaris: + strategy: + matrix: + goarch: [amd64] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for solaris/${{ matrix.goarch }} + run: | + GOOS=solaris GOARCH=${{ matrix.GOARCH }} go build ./... + + build-windows: + strategy: + matrix: + goarch: [386, amd64, arm64] + runs-on: windows-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Run golangci-lint + uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 + with: + version: v2.1.6 + + - name: Build for windows/${{ matrix.goarch }} + run: | + $env:GOOS="windows"; $env:GOARCH="${{ matrix.GOARCH }}"; go build ./... From 685eda312d58d9e5f34424fbc1d43c6065c84755 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 24 Jun 2025 15:32:13 -0700 Subject: [PATCH 380/439] Add template to cross arch build tests * Create a template to simplify executing tests without repetition. * Run cross-compilation tests for Windows on Ubuntu (Linux), as every platform is being cross-compiled as a sanity check, it could also be done using Linux, and simplify the template. * Remove the GolangCI lint action, as it is linting in Linux (as this is not using QEMU, but only cross-compilation), and there's no benefit to adding this step, as it's already checked in other workflows. Signed-off-by: Ivan Valdes --- .github/workflows/cross-arch-template.yaml | 29 ++++ .github/workflows/cross-arch-test.yaml | 151 ++++----------------- 2 files changed, 55 insertions(+), 125 deletions(-) create mode 100644 .github/workflows/cross-arch-template.yaml diff --git a/.github/workflows/cross-arch-template.yaml b/.github/workflows/cross-arch-template.yaml new file mode 100644 index 000000000..a464e4d56 --- /dev/null +++ b/.github/workflows/cross-arch-template.yaml @@ -0,0 +1,29 @@ +--- +name: Reusable Cross-Platform Build Workflow +on: + workflow_call: + inputs: + archs: + required: true + type: string + os: + required: true + type: string +permissions: read-all + +jobs: + cross-build: + strategy: + matrix: + arch: ${{ fromJSON(inputs.archs) }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - id: goversion + run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" + - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + with: + go-version: ${{ steps.goversion.outputs.goversion }} + - name: Build for ${{ inputs.os }}/${{ matrix.arch }} + run: | + GOOS=${{ inputs.os }} GOARCH=${{ matrix.arch }} go build ./... diff --git a/.github/workflows/cross-arch-test.yaml b/.github/workflows/cross-arch-test.yaml index 197b259df..2402a04a7 100644 --- a/.github/workflows/cross-arch-test.yaml +++ b/.github/workflows/cross-arch-test.yaml @@ -1,135 +1,36 @@ --- name: Cross-Platform Build Tests - -on: - push: - branches: [ main ] - pull_request: - branches: [ main ] +permissions: read-all +on: [push, pull_request] jobs: build-aix: - strategy: - matrix: - goarch: [ppc64] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for aix/${{ matrix.goarch }} - run: | - GOOS=aix GOARCH=${{ matrix.GOARCH }} go build ./... - + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: aix + archs: "['ppc64']" build-android: - strategy: - matrix: - goarch: [arm64] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for android/${{ matrix.goarch }} - run: | - GOOS=android GOARCH=${{ matrix.goarch }} go build ./... - + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: android + archs: "['arm64']" build-linux: - strategy: - matrix: - goarch: [386, amd64, arm, arm64, loong64, mips, mips64, mips64le, mipsle, ppc64, ppc64le, riscv64, s390x] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for linux/${{ matrix.goarch }} - run: | - GOOS=linux GOARCH=${{ matrix.GOARCH }} go build ./... - + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: linux + archs: "['386','amd64','arm','arm64','loong64','mips','mips64','mips64le','mipsle','ppc64','ppc64le','riscv64','s390x']" build-openbsd: - strategy: - matrix: - goarch: [386, amd64, arm, arm64, ppc64, riscv64] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for openbsd/${{ matrix.goarch }} - run: | - GOOS=openbsd GOARCH=${{ matrix.GOARCH }} go build ./... - + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: openbsd + archs: "['386','amd64','arm','arm64','ppc64','riscv64']" build-solaris: - strategy: - matrix: - goarch: [amd64] - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for solaris/${{ matrix.goarch }} - run: | - GOOS=solaris GOARCH=${{ matrix.GOARCH }} go build ./... - + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: solaris + archs: "['amd64']" build-windows: - strategy: - matrix: - goarch: [386, amd64, arm64] - runs-on: windows-latest - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - - id: goversion - run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 - with: - go-version: ${{ steps.goversion.outputs.goversion }} - - name: Run golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0 - with: - version: v2.1.6 - - - name: Build for windows/${{ matrix.goarch }} - run: | - $env:GOOS="windows"; $env:GOARCH="${{ matrix.GOARCH }}"; go build ./... + uses: ./.github/workflows/cross-arch-template.yaml + with: + os: windows + archs: "['386','amd64','arm64']" From 550aa05cd293961d69ae158381a5fbbfccc135a8 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Tue, 10 Jun 2025 12:07:49 -0700 Subject: [PATCH 381/439] release: Use stage directory to build release Create a stage directory in the temp file system, and clone the repository there. Signed-off-by: Ivan Valdes --- scripts/release.sh | 100 ++++++++++++++++++++++++++------------------- 1 file changed, 59 insertions(+), 41 deletions(-) diff --git a/scripts/release.sh b/scripts/release.sh index 723c51242..47fb7aba8 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -19,51 +19,69 @@ function get_gpg_key { } # === Main Script Logic === -echo "enter release string according to semantic versioning (e.g. v1.2.3)." -read -r INPUT -if [[ ! "${INPUT}" =~ ^v[0-9]+.[0-9]+.[0-9]+ ]]; then - echo "Expected 'version' param of the form 'v..' but got '${INPUT}'" - exit 1 -fi +function main { + echo "enter release string according to semantic versioning (e.g. v1.2.3)." + read -r INPUT + if [[ ! "${INPUT}" =~ ^v[0-9]+.[0-9]+.[0-9]+ ]]; then + echo "Expected 'version' param of the form 'v..' but got '${INPUT}'" + exit 1 + fi -VERSION=${INPUT#v} -RELEASE_VERSION="${VERSION}" -MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2) + VERSION=${INPUT#v} + RELEASE_VERSION="${VERSION}" + MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2) + RELEASE_BRANCH="release-${MINOR_VERSION}" -REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/bbolt.git"} -REMOTE="${REMOTE:-"origin"}" + REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/bbolt.git"} + REMOTE="${REMOTE:-"origin"}" -remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${INPUT}" || true) -if [ "${remote_tag_exists}" -gt 0 ]; then - echo "Release version tag exists on remote." - exit 1 -fi + remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${INPUT}" || true) + if [ "${remote_tag_exists}" -gt 0 ]; then + echo "Release version tag exists on remote." + exit 1 + fi -# ensuring the minor-version is identical. -source_version=$(grep -E "\s+Version\s*=" ./version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g") -if [[ "${source_version}" != "${RELEASE_VERSION}" ]]; then - source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2) - if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then - echo "Wrong bbolt minor version in version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting." - exit 1 - fi -fi + # Set up release directory. + local reldir="/tmp/bbolt-release-${VERSION}" + echo "Preparing temporary directory: ${reldir}" + if [ ! -d "${reldir}/bbolt" ]; then + mkdir -p "${reldir}" + cd "${reldir}" + git clone "${REPOSITORY}" --branch "${RELEASE_BRANCH}" --depth 1 + fi + cd "${reldir}/bbolt" || exit 2 + git checkout "${RELEASE_BRANCH}" || exit 2 + git fetch origin + git reset --hard "origin/${RELEASE_BRANCH}" -# bump 'version.go'. -echo "Updating version from '${source_version}' to '${RELEASE_VERSION}' in 'version.go'" -sed -i "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + # ensuring the minor-version is identical. + source_version=$(grep -E "\s+Version\s*=" ./version/version.go | sed -e "s/.*\"\(.*\)\".*/\1/g") + if [[ "${source_version}" != "${RELEASE_VERSION}" ]]; then + source_minor_version=$(echo "${source_version}" | cut -d. -f 1-2) + if [[ "${source_minor_version}" != "${MINOR_VERSION}" ]]; then + echo "Wrong bbolt minor version in version.go. Expected ${MINOR_VERSION} but got ${source_minor_version}. Aborting." + exit 1 + fi + fi -# push 'version.go' to remote. -echo "committing 'version.go'" -git add ./version/version.go -git commit -s -m "Update version to ${VERSION}" -git push "${REMOTE}" "${INPUT}" -echo "'version.go' has been committed to remote repo." + # bump 'version.go'. + echo "Updating version from '${source_version}' to '${RELEASE_VERSION}' in 'version.go'" + sed -i "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + + # push 'version.go' to remote. + echo "committing 'version.go'" + git add ./version/version.go + git commit -s -m "Update version to ${VERSION}" + git push origin "${RELEASE_BRANCH}" + echo "'version.go' has been committed to remote repo." + + # create tag and push to remote. + echo "Creating new tag for '${INPUT}'" + key_id=$(get_gpg_key) || return 2 + git tag --local-user "${key_id}" --sign "${INPUT}" --message "${INPUT}" + git push origin "${INPUT}" + echo "Tag '${INPUT}' has been created and pushed to remote repo." + echo "SUCCESS" +} -# create tag and push to remote. -echo "Creating new tag for '${INPUT}'" -key_id=$(get_gpg_key) || return 2 -git tag --local-user "${key_id}" --sign "${INPUT}" --message "${INPUT}" -git push "${REMOTE}" "${INPUT}" -echo "Tag '${INPUT}' has been created and pushed to remote repo." -echo "SUCCESS" +main From 979ee92ac718217e93041a827ba91bef14ef2d3c Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 25 Jun 2025 12:19:50 -0700 Subject: [PATCH 382/439] release: accept release version as argument Make it consistent with other of our release scripts. Signed-off-by: Ivan Valdes --- scripts/release.sh | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/scripts/release.sh b/scripts/release.sh index 47fb7aba8..86baa1e0f 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -20,22 +20,25 @@ function get_gpg_key { # === Main Script Logic === function main { - echo "enter release string according to semantic versioning (e.g. v1.2.3)." - read -r INPUT - if [[ ! "${INPUT}" =~ ^v[0-9]+.[0-9]+.[0-9]+ ]]; then - echo "Expected 'version' param of the form 'v..' but got '${INPUT}'" - exit 1 + VERSION="$1" + + if [ -z "${VERSION}" ]; then + read -p "Release version (e.g., v1.2.3) " -r VERSION + if [[ ! "${VERSION}" =~ ^v[0-9]+.[0-9]+.[0-9]+ ]]; then + echo "Expected 'version' param of the form 'v..' but got '${VERSION}'" + exit 1 + fi fi - VERSION=${INPUT#v} - RELEASE_VERSION="${VERSION}" - MINOR_VERSION=$(echo "${VERSION}" | cut -d. -f 1-2) + VERSION=v${VERSION#v} + RELEASE_VERSION="${VERSION#v}" + MINOR_VERSION=$(echo "${RELEASE_VERSION}" | cut -d. -f 1-2) RELEASE_BRANCH="release-${MINOR_VERSION}" REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/bbolt.git"} REMOTE="${REMOTE:-"origin"}" - remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${INPUT}" || true) + remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${VERSION}" || true) if [ "${remote_tag_exists}" -gt 0 ]; then echo "Release version tag exists on remote." exit 1 @@ -76,12 +79,12 @@ function main { echo "'version.go' has been committed to remote repo." # create tag and push to remote. - echo "Creating new tag for '${INPUT}'" + echo "Creating new tag for '${VERSION}'" key_id=$(get_gpg_key) || return 2 - git tag --local-user "${key_id}" --sign "${INPUT}" --message "${INPUT}" - git push origin "${INPUT}" - echo "Tag '${INPUT}' has been created and pushed to remote repo." + git tag --local-user "${key_id}" --sign "${VERSION}" --message "${VERSION}" + git push origin "${VERSION}" + echo "Tag '${VERSION}' has been created and pushed to remote repo." echo "SUCCESS" } -main +main "$1" From a5955bdb5afa0cd4f3ba3aed9172b57dd886b4e1 Mon Sep 17 00:00:00 2001 From: Ivan Valdes Date: Wed, 25 Jun 2025 12:24:12 -0700 Subject: [PATCH 383/439] release: narrow down existing tag search Check for the actual tag by an explicit query, rather than listing all tags available at the repository. Signed-off-by: Ivan Valdes --- scripts/release.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/release.sh b/scripts/release.sh index 86baa1e0f..8e702bf43 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -36,9 +36,9 @@ function main { RELEASE_BRANCH="release-${MINOR_VERSION}" REPOSITORY=${REPOSITORY:-"git@github.com:etcd-io/bbolt.git"} - REMOTE="${REMOTE:-"origin"}" - remote_tag_exists=$(git ls-remote --tags "${REPOSITORY}" | grep -c "${VERSION}" || true) + local remote_tag_exists + remote_tag_exists=$(git ls-remote "${REPOSITORY}" "refs/tags/${VERSION}" | grep -c "${VERSION}" || true) if [ "${remote_tag_exists}" -gt 0 ]; then echo "Release version tag exists on remote." exit 1 From df4e63a768d0200295acf25408d96d2728915335 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Thu, 26 Jun 2025 11:34:23 +0100 Subject: [PATCH 384/439] Update changelog to cover the meta page protection fix Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 7 +++++++ CHANGELOG/CHANGELOG-1.4.md | 1 + 2 files changed, 8 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 23009eb95..32502d39f 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,6 +2,13 @@ Note that we start to track changes starting from v1.3.7.
+## v1.3.12(TBD) + +### BoltDB +- [Add protection on meta page when it's being written](https://github.com/etcd-io/bbolt/pull/1006) + +
+ ## v1.3.11(2024-08-21) ### BoltDB diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index cab08b859..749b533c4 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -5,6 +5,7 @@ ### BoltDB - [Fix the compilation issue on aix, android and solaris due to wrong use of `maxMapSize`](https://github.com/etcd-io/bbolt/pull/990) +- [Add protection on meta page when it's being written](https://github.com/etcd-io/bbolt/pull/1005)
From 2544d48bd8c373861a0d2713fd9672664d1e985c Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 27 Jun 2025 11:58:22 +0100 Subject: [PATCH 385/439] Update v1.4.2's release date Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.4.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 749b533c4..66578a096 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,7 +1,7 @@
-## v1.4.2(TBD) +## v1.4.2(2025-06-27) ### BoltDB - [Fix the compilation issue on aix, android and solaris due to wrong use of `maxMapSize`](https://github.com/etcd-io/bbolt/pull/990) From 3019cbc2276c44d6a75a3aad5d51038e68b077a3 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Fri, 27 Jun 2025 14:39:56 +0200 Subject: [PATCH 386/439] chore(release): fix release script for MacOS Signed-off-by: Mustafa Elbehery --- scripts/release.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/release.sh b/scripts/release.sh index 8e702bf43..95a7ac5da 100755 --- a/scripts/release.sh +++ b/scripts/release.sh @@ -69,7 +69,11 @@ function main { # bump 'version.go'. echo "Updating version from '${source_version}' to '${RELEASE_VERSION}' in 'version.go'" - sed -i "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + if [[ "$OSTYPE" == "darwin"* ]]; then + sed -i '' "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + else + sed -i "s/${source_version}/${RELEASE_VERSION}/g" ./version/version.go + fi # push 'version.go' to remote. echo "committing 'version.go'" From e26dc6d02f5ae985699f1514da506bbdca00459d Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Mon, 30 Jun 2025 19:00:14 +0200 Subject: [PATCH 387/439] chore(cmd): migrate command to cobra style Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_buckets.go | 46 +++++++++++++++++++++ cmd/bbolt/command_buckets_test.go | 67 +++++++++++++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 59 --------------------------- cmd/bbolt/main_test.go | 30 -------------- 5 files changed, 114 insertions(+), 89 deletions(-) create mode 100644 cmd/bbolt/command_buckets.go create mode 100644 cmd/bbolt/command_buckets_test.go diff --git a/cmd/bbolt/command_buckets.go b/cmd/bbolt/command_buckets.go new file mode 100644 index 000000000..b0377edb9 --- /dev/null +++ b/cmd/bbolt/command_buckets.go @@ -0,0 +1,46 @@ +package main + +import ( + "fmt" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" +) + +func newBucketsCommand() *cobra.Command { + bucketsCmd := &cobra.Command{ + Use: "buckets ", + Short: "print a list of buckets in bbolt database", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return bucketsFunc(cmd, args[0]) + }, + } + + return bucketsCmd +} + +func bucketsFunc(cmd *cobra.Command, dbPath string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // Open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + PreLoadFreelist: true, + }) + if err != nil { + return err + } + defer db.Close() + + // Print buckets. + return db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + fmt.Fprintln(cmd.OutOrStdout(), string(name)) + return nil + }) + }) +} diff --git a/cmd/bbolt/command_buckets_test.go b/cmd/bbolt/command_buckets_test.go new file mode 100644 index 000000000..39ffed66f --- /dev/null +++ b/cmd/bbolt/command_buckets_test.go @@ -0,0 +1,67 @@ +package main_test + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "buckets" command can print a list of buckets. +func TestBucketsCommand_Run(t *testing.T) { + + testCases := []struct { + name string + args []string + expErr error + expOutput string + }{ + { + name: "buckets all buckets in bbolt database", + args: []string{"buckets", "path"}, + expErr: nil, + expOutput: "bar\nbaz\nfoo\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + if err := db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"foo", "bar", "baz"} { + _, err := tx.CreateBucket([]byte(name)) + if err != nil { + return err + } + } + return nil + }); err != nil { + t.Fatal(err) + } + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running buckets cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + tc.args[1] = db.Path() + rootCmd.SetArgs(tc.args) + err := rootCmd.Execute() + require.Equal(t, tc.expErr, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Containsf(t, string(output), tc.expOutput, "unexpected stdout:\n\n%s", string(output)) + }) + } +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 0336ea36c..a278458b7 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -21,6 +21,7 @@ func NewRootCommand() *cobra.Command { newSurgeryCommand(), newInspectCommand(), newCheckCommand(), + newBucketsCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 04050fe50..96c425e5b 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -122,8 +122,6 @@ func (m *Main) Run(args ...string) error { return ErrUsage case "bench": return newBenchCommand(m).Run(args[1:]...) - case "buckets": - return newBucketsCommand(m).Run(args[1:]...) case "compact": return newCompactCommand(m).Run(args[1:]...) case "dump": @@ -763,63 +761,6 @@ experience corruption, please submit a ticket to the etcd-io/bbolt project page: `, "\n") } -// bucketsCommand represents the "buckets" command execution. -type bucketsCommand struct { - baseCommand -} - -// newBucketsCommand returns a bucketsCommand. -func newBucketsCommand(m *Main) *bucketsCommand { - c := &bucketsCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *bucketsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer db.Close() - - // Print buckets. - return db.View(func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { - fmt.Fprintln(cmd.Stdout, string(name)) - return nil - }) - }) -} - -// Usage returns the help message. -func (cmd *bucketsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt buckets PATH - -Print a list of buckets. -`, "\n") -} - // keysCommand represents the "keys" command execution. type keysCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 173d6595f..d4cf06de0 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -273,36 +273,6 @@ func TestStatsCommand_Run(t *testing.T) { } } -// Ensure the "buckets" command can print a list of buckets. -func TestBucketsCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) - - if err := db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{"foo", "bar", "baz"} { - _, err := tx.CreateBucket([]byte(name)) - if err != nil { - return err - } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - expected := "bar\nbaz\nfoo\n" - - // Run the command. - m := NewMain() - if err := m.Run("buckets", db.Path()); err != nil { - t.Fatal(err) - } else if actual := m.Stdout.String(); actual != expected { - t.Fatalf("unexpected stdout:\n\n%s", actual) - } -} - // Ensure the "keys" command can print a list of keys for a bucket. func TestKeysCommand_Run(t *testing.T) { testCases := []struct { From dcb91a881542142e9d7e0dfcefafa28b15662cc7 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 1 Jul 2025 16:30:37 +0200 Subject: [PATCH 388/439] chore(cmd): migrate info command to cobra style Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_info.go | 43 +++++++++++++++++++++++++ cmd/bbolt/command_info_test.go | 42 +++++++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 57 ---------------------------------- cmd/bbolt/main_test.go | 14 --------- 5 files changed, 86 insertions(+), 71 deletions(-) create mode 100644 cmd/bbolt/command_info.go create mode 100644 cmd/bbolt/command_info_test.go diff --git a/cmd/bbolt/command_info.go b/cmd/bbolt/command_info.go new file mode 100644 index 000000000..b7e3922ab --- /dev/null +++ b/cmd/bbolt/command_info.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" +) + +func newInfoCommand() *cobra.Command { + infoCmd := &cobra.Command{ + Use: "info ", + Short: "prints basic information about the bbolt database.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return infoFunc(cmd, args[0]) + }, + } + + return infoCmd +} + +func infoFunc(cmd *cobra.Command, dbPath string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // Open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + }) + if err != nil { + return err + } + defer db.Close() + + // Print basic database info. + info := db.Info() + fmt.Fprintf(cmd.OutOrStdout(), "Page Size: %d\n", info.PageSize) + + return nil +} diff --git a/cmd/bbolt/command_info_test.go b/cmd/bbolt/command_info_test.go new file mode 100644 index 000000000..bd608043f --- /dev/null +++ b/cmd/bbolt/command_info_test.go @@ -0,0 +1,42 @@ +package main_test + +import ( + "bytes" + "errors" + "io" + "testing" + + "github.com/stretchr/testify/require" + + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "info" command can print information about a database. +func TestInfoCommand_Run(t *testing.T) { + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running info cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + rootCmd.SetArgs([]string{"info", db.Path()}) + err := rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + _, err = io.ReadAll(outputBuf) + require.NoError(t, err) +} + +func TestInfoCommand_NoArgs(t *testing.T) { + expErr := errors.New("accepts 1 arg(s), received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"info"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index a278458b7..d7ed53bb4 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -22,6 +22,7 @@ func NewRootCommand() *cobra.Command { newInspectCommand(), newCheckCommand(), newBucketsCommand(), + newInfoCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 96c425e5b..0e3adc53b 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -130,8 +130,6 @@ func (m *Main) Run(args ...string) error { return newPageItemCommand(m).Run(args[1:]...) case "get": return newGetCommand(m).Run(args[1:]...) - case "info": - return newInfoCommand(m).Run(args[1:]...) case "keys": return newKeysCommand(m).Run(args[1:]...) case "page": @@ -177,61 +175,6 @@ Use "bbolt [command] -h" for more information about a command. `, "\n") } -// infoCommand represents the "info" command execution. -type infoCommand struct { - baseCommand -} - -// newInfoCommand returns a infoCommand. -func newInfoCommand(m *Main) *infoCommand { - c := &infoCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *infoCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open the database. - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer db.Close() - - // Print basic database info. - info := db.Info() - fmt.Fprintf(cmd.Stdout, "Page Size: %d\n", info.PageSize) - - return nil -} - -// Usage returns the help message. -func (cmd *infoCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt info PATH - -Info prints basic information about the Bolt database at PATH. -`, "\n") -} - // dumpCommand represents the "dump" command execution. type dumpCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index d4cf06de0..75af3e939 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -23,20 +23,6 @@ import ( "go.etcd.io/bbolt/internal/guts_cli" ) -// Ensure the "info" command can print information about a database. -func TestInfoCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - // Run the info command. - m := NewMain() - if err := m.Run("info", db.Path()); err != nil { - t.Fatal(err) - } -} - // Ensure the "stats" command executes correctly with an empty database. func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { // Ignore From 819ac0a21169c889e735e5892b6171bf0637e30d Mon Sep 17 00:00:00 2001 From: "shenmu.wy" Date: Tue, 1 Jul 2025 21:25:11 +0800 Subject: [PATCH 389/439] cmd: migrate compact command to cobra style Signed-off-by: shenmu.wy --- cmd/bbolt/command_compact.go | 94 +++++++++++++++++++++++++++ cmd/bbolt/command_compact_test.go | 101 +++++++++++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 104 ------------------------------ cmd/bbolt/main_test.go | 88 ------------------------- 5 files changed, 196 insertions(+), 192 deletions(-) create mode 100644 cmd/bbolt/command_compact.go create mode 100644 cmd/bbolt/command_compact_test.go diff --git a/cmd/bbolt/command_compact.go b/cmd/bbolt/command_compact.go new file mode 100644 index 000000000..673e7a6f1 --- /dev/null +++ b/cmd/bbolt/command_compact.go @@ -0,0 +1,94 @@ +package main + +import ( + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + bolt "go.etcd.io/bbolt" +) + +type compactOptions struct { + dstPath string + txMaxSize int64 + dstNoSync bool +} + +func newCompactCommand() *cobra.Command { + var o compactOptions + var compactCmd = &cobra.Command{ + Use: "compact [options] -o ", + Short: "creates a compacted copy of the database from source path to the destination path, preserving the original.", + Long: `compact opens a database at source path and walks it recursively, copying keys +as they are found from all buckets, to a newly created database at the destination path. +The original database is left untouched.`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Validate(args[0]); err != nil { + return err + } + return o.Run(cmd, args[0]) + }, + } + o.AddFlags(compactCmd.Flags()) + + return compactCmd +} + +func (o *compactOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&o.dstPath, "output", "o", "", "") + fs.Int64Var(&o.txMaxSize, "tx-max-size", 65536, "") + fs.BoolVar(&o.dstNoSync, "no-sync", false, "") + _ = cobra.MarkFlagRequired(fs, "output") +} + +func (o *compactOptions) Validate(srcPath string) (err error) { + if o.dstPath == "" { + return errors.New("output file required") + } + + return +} + +func (o *compactOptions) Run(cmd *cobra.Command, srcPath string) (err error) { + + // ensure source file exists. + fi, err := checkSourceDBPath(srcPath) + if err != nil { + return err + } + initialSize := fi.Size() + + // open source database. + src, err := bolt.Open(srcPath, 0400, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer src.Close() + + // open destination database. + dst, err := bolt.Open(o.dstPath, fi.Mode(), &bolt.Options{NoSync: o.dstNoSync}) + if err != nil { + return err + } + defer dst.Close() + + // run compaction. + if err := bolt.Compact(dst, src, o.txMaxSize); err != nil { + return err + } + + // report stats on new size. + fi, err = os.Stat(o.dstPath) + if err != nil { + return err + } else if fi.Size() == 0 { + return fmt.Errorf("zero db size") + } + fmt.Fprintf(cmd.OutOrStdout(), "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) + + return +} diff --git a/cmd/bbolt/command_compact_test.go b/cmd/bbolt/command_compact_test.go new file mode 100644 index 000000000..121c1c96d --- /dev/null +++ b/cmd/bbolt/command_compact_test.go @@ -0,0 +1,101 @@ +package main_test + +import ( + crypto "crypto/rand" + "errors" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "compact" command can print a list of buckets. +func TestCompactCommand_Run(t *testing.T) { + dstdb := btesting.MustCreateDB(t) + dstdb.Close() + + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + if err := db.Update(func(tx *bolt.Tx) error { + n := 2 + rand.Intn(5) + for i := 0; i < n; i++ { + k := []byte(fmt.Sprintf("b%d", i)) + b, err := tx.CreateBucketIfNotExists(k) + if err != nil { + return err + } + if err := b.SetSequence(uint64(i)); err != nil { + return err + } + if err := fillBucket(b, append(k, '.')); err != nil { + return err + } + } + return nil + }); err != nil { + t.Fatal(err) + } + + // make the db grow by adding large values, and delete them. + if err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) + if err != nil { + return err + } + n := 5 + rand.Intn(5) + for i := 0; i < n; i++ { + v := make([]byte, 1000*1000*(1+rand.Intn(5))) + _, err := crypto.Read(v) + if err != nil { + return err + } + if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { + return err + } + } + return nil + }); err != nil { + t.Fatal(err) + } + if err := db.Update(func(tx *bolt.Tx) error { + c := tx.Bucket([]byte("large_vals")).Cursor() + for k, _ := c.First(); k != nil; k, _ = c.Next() { + if err := c.Delete(); err != nil { + return err + } + } + return tx.DeleteBucket([]byte("large_vals")) + }); err != nil { + t.Fatal(err) + } + db.Close() + dbChk, err := chkdb(db.Path()) + require.NoError(t, err) + + t.Log("Running compact cmd") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"compact", "-o", dstdb.Path(), db.Path()}) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + dbChkAfterCompact, err := chkdb(db.Path()) + require.NoError(t, err) + dstdbChk, err := chkdb(dstdb.Path()) + require.NoError(t, err) + require.Equal(t, dbChk, dbChkAfterCompact, "the original db has been touched") + require.Equal(t, dbChk, dstdbChk, "the compacted db data isn't the same than the original db") +} + +func TestCompactCommand_NoArgs(t *testing.T) { + expErr := errors.New("requires at least 1 arg(s), only received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"compact"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index d7ed53bb4..020d37005 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -23,6 +23,7 @@ func NewRootCommand() *cobra.Command { newCheckCommand(), newBucketsCommand(), newInfoCommand(), + newCompactCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 0e3adc53b..7467e9299 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -122,8 +122,6 @@ func (m *Main) Run(args ...string) error { return ErrUsage case "bench": return newBenchCommand(m).Run(args[1:]...) - case "compact": - return newCompactCommand(m).Run(args[1:]...) case "dump": return newDumpCommand(m).Run(args[1:]...) case "page-item": @@ -1555,108 +1553,6 @@ func stringToPages(strs []string) ([]uint64, error) { return a, nil } -// compactCommand represents the "compact" command execution. -type compactCommand struct { - baseCommand - - SrcPath string - DstPath string - TxMaxSize int64 - DstNoSync bool -} - -// newCompactCommand returns a CompactCommand. -func newCompactCommand(m *Main) *compactCommand { - c := &compactCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *compactCommand) Run(args ...string) (err error) { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.SetOutput(io.Discard) - fs.StringVar(&cmd.DstPath, "o", "", "") - fs.Int64Var(&cmd.TxMaxSize, "tx-max-size", 65536, "") - fs.BoolVar(&cmd.DstNoSync, "no-sync", false, "") - if err := fs.Parse(args); err == flag.ErrHelp { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } else if err != nil { - return err - } else if cmd.DstPath == "" { - return errors.New("output file required") - } - - // Require database paths. - cmd.SrcPath = fs.Arg(0) - if cmd.SrcPath == "" { - return ErrPathRequired - } - - // Ensure source file exists. - fi, err := os.Stat(cmd.SrcPath) - if os.IsNotExist(err) { - return ErrFileNotFound - } else if err != nil { - return err - } - initialSize := fi.Size() - - // Open source database. - src, err := bolt.Open(cmd.SrcPath, 0400, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer src.Close() - - // Open destination database. - dst, err := bolt.Open(cmd.DstPath, fi.Mode(), &bolt.Options{NoSync: cmd.DstNoSync}) - if err != nil { - return err - } - defer dst.Close() - - // Run compaction. - if err := bolt.Compact(dst, src, cmd.TxMaxSize); err != nil { - return err - } - - // Report stats on new size. - fi, err = os.Stat(cmd.DstPath) - if err != nil { - return err - } else if fi.Size() == 0 { - return fmt.Errorf("zero db size") - } - fmt.Fprintf(cmd.Stdout, "%d -> %d bytes (gain=%.2fx)\n", initialSize, fi.Size(), float64(initialSize)/float64(fi.Size())) - - return nil -} - -// Usage returns the help message. -func (cmd *compactCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt compact [options] -o DST SRC - -Compact opens a database at SRC path and walks it recursively, copying keys -as they are found from all buckets, to a newly created database at DST path. - -The original database is left untouched. - -Additional options include: - - -tx-max-size NUM - Specifies the maximum size of individual transactions. - Defaults to 64KB. - - -no-sync BOOL - Skip fsync() calls after each commit (fast but unsafe) - Defaults to false -`, "\n") -} - type cmdKvStringer struct{} func (cmdKvStringer) KeyToString(key []byte) string { diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 75af3e939..2b07aef0c 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -498,94 +498,6 @@ func NewMain() *Main { return m } -func TestCompactCommand_Run(t *testing.T) { - dstdb := btesting.MustCreateDB(t) - dstdb.Close() - - // fill the db - db := btesting.MustCreateDB(t) - if err := db.Update(func(tx *bolt.Tx) error { - n := 2 + rand.Intn(5) - for i := 0; i < n; i++ { - k := []byte(fmt.Sprintf("b%d", i)) - b, err := tx.CreateBucketIfNotExists(k) - if err != nil { - return err - } - if err := b.SetSequence(uint64(i)); err != nil { - return err - } - if err := fillBucket(b, append(k, '.')); err != nil { - return err - } - } - return nil - }); err != nil { - t.Fatal(err) - } - - // make the db grow by adding large values, and delete them. - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists([]byte("large_vals")) - if err != nil { - return err - } - n := 5 + rand.Intn(5) - for i := 0; i < n; i++ { - v := make([]byte, 1000*1000*(1+rand.Intn(5))) - _, err := crypto.Read(v) - if err != nil { - return err - } - if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil { - return err - } - } - return nil - }); err != nil { - t.Fatal(err) - } - if err := db.Update(func(tx *bolt.Tx) error { - c := tx.Bucket([]byte("large_vals")).Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - if err := c.Delete(); err != nil { - return err - } - } - return tx.DeleteBucket([]byte("large_vals")) - }); err != nil { - t.Fatal(err) - } - db.Close() - - dbChk, err := chkdb(db.Path()) - if err != nil { - t.Fatal(err) - } - - m := NewMain() - if err := m.Run("compact", "-o", dstdb.Path(), db.Path()); err != nil { - t.Fatal(err) - } - - dbChkAfterCompact, err := chkdb(db.Path()) - if err != nil { - t.Fatal(err) - } - - dstdbChk, err := chkdb(dstdb.Path()) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(dbChk, dbChkAfterCompact) { - t.Error("the original db has been touched") - } - if !bytes.Equal(dbChk, dstdbChk) { - t.Error("the compacted db data isn't the same than the original db") - } -} - func TestCommands_Run_NoArgs(t *testing.T) { testCases := []struct { name string From 96f44720d0537290c8fb7bf83845a9d3d794667a Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 1 Jul 2025 17:20:16 +0200 Subject: [PATCH 390/439] chore(cmd): migrate stats command to cobra style Signed-off-by: Mustafa Elbehery # Conflicts: # cmd/bbolt/command_root.go --- cmd/bbolt/command_root.go | 1 + cmd/bbolt/command_stats.go | 132 +++++++++++++++++++++++++++ cmd/bbolt/command_stats_test.go | 154 ++++++++++++++++++++++++++++++++ cmd/bbolt/main.go | 133 --------------------------- cmd/bbolt/main_test.go | 120 ------------------------- 5 files changed, 287 insertions(+), 253 deletions(-) create mode 100644 cmd/bbolt/command_stats.go create mode 100644 cmd/bbolt/command_stats_test.go diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 020d37005..cbc544bf9 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -24,6 +24,7 @@ func NewRootCommand() *cobra.Command { newBucketsCommand(), newInfoCommand(), newCompactCommand(), + newStatsCommand(), ) return rootCmd diff --git a/cmd/bbolt/command_stats.go b/cmd/bbolt/command_stats.go new file mode 100644 index 000000000..d7c37205d --- /dev/null +++ b/cmd/bbolt/command_stats.go @@ -0,0 +1,132 @@ +package main + +import ( + "bytes" + "fmt" + "strings" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" +) + +func newStatsCommand() *cobra.Command { + statsCmd := &cobra.Command{ + Use: "stats ", + Short: "print stats of bbolt database", + Long: strings.TrimLeft(` +usage: bolt stats PATH + +Stats performs an extensive search of the database to track every page +reference. It starts at the current meta page and recursively iterates +through every accessible bucket. + +The following errors can be reported: + + already freed + The page is referenced more than once in the freelist. + + unreachable unfreed + The page is not referenced by a bucket or in the freelist. + + reachable freed + The page is referenced by a bucket but is also in the freelist. + + out of bounds + A page is referenced that is above the high water mark. + + multiple references + A page is referenced by more than one other page. + + invalid type + The page type is not "meta", "leaf", "branch", or "freelist". + +No errors should occur in your database. However, if for some reason you +experience corruption, please submit a ticket to the etcd-io/bbolt project page: + + https://github.com/etcd-io/bbolt/issues +`, "\n"), + Args: cobra.RangeArgs(1, 2), + RunE: func(cmd *cobra.Command, args []string) error { + prefix := "" + if len(args) > 1 { + prefix = args[1] + } + + return statsFunc(cmd, args[0], prefix) + }, + } + + return statsCmd +} + +func statsFunc(cmd *cobra.Command, dbPath string, prefix string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + PreLoadFreelist: true, + }) + if err != nil { + return err + } + defer db.Close() + + return db.View(func(tx *bolt.Tx) error { + var s bolt.BucketStats + var count int + if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { + if bytes.HasPrefix(name, []byte(prefix)) { + s.Add(b.Stats()) + count += 1 + } + return nil + }); err != nil { + return err + } + + fmt.Fprintf(cmd.OutOrStdout(), "Aggregate statistics for %d buckets\n\n", count) + + fmt.Fprintln(cmd.OutOrStdout(), "Page count statistics") + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of logical branch pages: %d\n", s.BranchPageN) + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of logical leaf pages: %d\n", s.LeafPageN) + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) + + fmt.Fprintln(cmd.OutOrStdout(), "Tree statistics") + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of keys/value pairs: %d\n", s.KeyN) + fmt.Fprintf(cmd.OutOrStdout(), "\tNumber of levels in B+tree: %d\n", s.Depth) + + fmt.Fprintln(cmd.OutOrStdout(), "Page size utilization") + fmt.Fprintf(cmd.OutOrStdout(), "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) + var percentage int + if s.BranchAlloc != 0 { + percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) + } + fmt.Fprintf(cmd.OutOrStdout(), "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) + fmt.Fprintf(cmd.OutOrStdout(), "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) + percentage = 0 + if s.LeafAlloc != 0 { + percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) + } + fmt.Fprintf(cmd.OutOrStdout(), "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) + + fmt.Fprintln(cmd.OutOrStdout(), "Bucket statistics") + fmt.Fprintf(cmd.OutOrStdout(), "\tTotal number of buckets: %d\n", s.BucketN) + percentage = 0 + if s.BucketN != 0 { + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + } + fmt.Fprintf(cmd.OutOrStdout(), "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) + percentage = 0 + if s.LeafInuse != 0 { + percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) + } + fmt.Fprintf(cmd.OutOrStdout(), "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) + + return nil + }) +} diff --git a/cmd/bbolt/command_stats_test.go b/cmd/bbolt/command_stats_test.go new file mode 100644 index 000000000..c0b4a613e --- /dev/null +++ b/cmd/bbolt/command_stats_test.go @@ -0,0 +1,154 @@ +package main_test + +import ( + "bytes" + "errors" + "io" + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "stats" command executes correctly with an empty database. +func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { + // ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + // generate expected result. + exp := "Aggregate statistics for 0 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 0\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 0\n" + + "\tNumber of levels in B+tree: 0\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 0\n" + + "\tBytes actually used for leaf data: 0 (0%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 0\n" + + "\tTotal number on inlined buckets: 0 (0%)\n" + + "\tBytes used for inlined buckets: 0 (0%)\n" + + t.Log("Running stats cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + rootCmd.SetArgs([]string{"stats", db.Path()}) + err := rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Exactlyf(t, exp, string(output), "unexpected stdout:\n\n%s", string(output)) +} + +// Ensure the "stats" command can execute correctly. +func TestStatsCommand_Run(t *testing.T) { + // ignore + if os.Getpagesize() != 4096 { + t.Skip("system does not use 4KB page size") + } + + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + if err := db.Update(func(tx *bolt.Tx) error { + // create "foo" bucket. + b, err := tx.CreateBucket([]byte("foo")) + if err != nil { + return err + } + for i := 0; i < 10; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // create "bar" bucket. + b, err = tx.CreateBucket([]byte("bar")) + if err != nil { + return err + } + for i := 0; i < 100; i++ { + if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { + return err + } + } + + // create "baz" bucket. + b, err = tx.CreateBucket([]byte("baz")) + if err != nil { + return err + } + if err := b.Put([]byte("key"), []byte("value")); err != nil { + return err + } + + return nil + }); err != nil { + t.Fatal(err) + } + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + // generate expected result. + exp := "Aggregate statistics for 3 buckets\n\n" + + "Page count statistics\n" + + "\tNumber of logical branch pages: 0\n" + + "\tNumber of physical branch overflow pages: 0\n" + + "\tNumber of logical leaf pages: 1\n" + + "\tNumber of physical leaf overflow pages: 0\n" + + "Tree statistics\n" + + "\tNumber of keys/value pairs: 111\n" + + "\tNumber of levels in B+tree: 1\n" + + "Page size utilization\n" + + "\tBytes allocated for physical branch pages: 0\n" + + "\tBytes actually used for branch data: 0 (0%)\n" + + "\tBytes allocated for physical leaf pages: 4096\n" + + "\tBytes actually used for leaf data: 1996 (48%)\n" + + "Bucket statistics\n" + + "\tTotal number of buckets: 3\n" + + "\tTotal number on inlined buckets: 2 (66%)\n" + + "\tBytes used for inlined buckets: 236 (11%)\n" + + t.Log("Running stats cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + rootCmd.SetArgs([]string{"stats", db.Path()}) + err := rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Exactlyf(t, exp, string(output), "unexpected stdout:\n\n%s", string(output)) +} + +func TestStatsCommand_NoArgs(t *testing.T) { + expErr := errors.New("accepts between 1 and 2 arg(s), received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"stats"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 7467e9299..7f48fd4cc 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -134,8 +134,6 @@ func (m *Main) Run(args ...string) error { return newPageCommand(m).Run(args[1:]...) case "pages": return newPagesCommand(m).Run(args[1:]...) - case "stats": - return newStatsCommand(m).Run(args[1:]...) default: return ErrUnknownCommand } @@ -571,137 +569,6 @@ a single page to take up multiple blocks. `, "\n") } -// statsCommand represents the "stats" command execution. -type statsCommand struct { - baseCommand -} - -// newStatsCommand returns a statsCommand. -func newStatsCommand(m *Main) *statsCommand { - c := &statsCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *statsCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path, prefix := fs.Arg(0), fs.Arg(1) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer db.Close() - - return db.View(func(tx *bolt.Tx) error { - var s bolt.BucketStats - var count int - if err := tx.ForEach(func(name []byte, b *bolt.Bucket) error { - if bytes.HasPrefix(name, []byte(prefix)) { - s.Add(b.Stats()) - count += 1 - } - return nil - }); err != nil { - return err - } - - fmt.Fprintf(cmd.Stdout, "Aggregate statistics for %d buckets\n\n", count) - - fmt.Fprintln(cmd.Stdout, "Page count statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of logical branch pages: %d\n", s.BranchPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical branch overflow pages: %d\n", s.BranchOverflowN) - fmt.Fprintf(cmd.Stdout, "\tNumber of logical leaf pages: %d\n", s.LeafPageN) - fmt.Fprintf(cmd.Stdout, "\tNumber of physical leaf overflow pages: %d\n", s.LeafOverflowN) - - fmt.Fprintln(cmd.Stdout, "Tree statistics") - fmt.Fprintf(cmd.Stdout, "\tNumber of keys/value pairs: %d\n", s.KeyN) - fmt.Fprintf(cmd.Stdout, "\tNumber of levels in B+tree: %d\n", s.Depth) - - fmt.Fprintln(cmd.Stdout, "Page size utilization") - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical branch pages: %d\n", s.BranchAlloc) - var percentage int - if s.BranchAlloc != 0 { - percentage = int(float32(s.BranchInuse) * 100.0 / float32(s.BranchAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for branch data: %d (%d%%)\n", s.BranchInuse, percentage) - fmt.Fprintf(cmd.Stdout, "\tBytes allocated for physical leaf pages: %d\n", s.LeafAlloc) - percentage = 0 - if s.LeafAlloc != 0 { - percentage = int(float32(s.LeafInuse) * 100.0 / float32(s.LeafAlloc)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes actually used for leaf data: %d (%d%%)\n", s.LeafInuse, percentage) - - fmt.Fprintln(cmd.Stdout, "Bucket statistics") - fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = 0 - if s.BucketN != 0 { - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) - } - fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) - percentage = 0 - if s.LeafInuse != 0 { - percentage = int(float32(s.InlineBucketInuse) * 100.0 / float32(s.LeafInuse)) - } - fmt.Fprintf(cmd.Stdout, "\tBytes used for inlined buckets: %d (%d%%)\n", s.InlineBucketInuse, percentage) - - return nil - }) -} - -// Usage returns the help message. -func (cmd *statsCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt stats PATH - -Stats performs an extensive search of the database to track every page -reference. It starts at the current meta page and recursively iterates -through every accessible bucket. - -The following errors can be reported: - - already freed - The page is referenced more than once in the freelist. - - unreachable unfreed - The page is not referenced by a bucket or in the freelist. - - reachable freed - The page is referenced by a bucket but is also in the freelist. - - out of bounds - A page is referenced that is above the high water mark. - - multiple references - A page is referenced by more than one other page. - - invalid type - The page type is not "meta", "leaf", "branch", or "freelist". - -No errors should occur in your database. However, if for some reason you -experience corruption, please submit a ticket to the etcd-io/bbolt project page: - - https://github.com/etcd-io/bbolt/issues -`, "\n") -} - // keysCommand represents the "keys" command execution. type keysCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 2b07aef0c..983277ebb 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -9,7 +9,6 @@ import ( "io" "math/rand" "os" - "strconv" "strings" "sync" "testing" @@ -23,47 +22,6 @@ import ( "go.etcd.io/bbolt/internal/guts_cli" ) -// Ensure the "stats" command executes correctly with an empty database. -func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := btesting.MustCreateDB(t) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - // Generate expected result. - exp := "Aggregate statistics for 0 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 0\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 0\n" + - "\tNumber of levels in B+tree: 0\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 0\n" + - "\tBytes actually used for leaf data: 0 (0%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 0\n" + - "\tTotal number on inlined buckets: 0 (0%)\n" + - "\tBytes used for inlined buckets: 0 (0%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path()); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - func TestDumpCommand_Run(t *testing.T) { db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) db.Close() @@ -181,84 +139,6 @@ func TestPageItemCommand_Run(t *testing.T) { } } -// Ensure the "stats" command can execute correctly. -func TestStatsCommand_Run(t *testing.T) { - // Ignore - if os.Getpagesize() != 4096 { - t.Skip("system does not use 4KB page size") - } - - db := btesting.MustCreateDB(t) - - if err := db.Update(func(tx *bolt.Tx) error { - // Create "foo" bucket. - b, err := tx.CreateBucket([]byte("foo")) - if err != nil { - return err - } - for i := 0; i < 10; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "bar" bucket. - b, err = tx.CreateBucket([]byte("bar")) - if err != nil { - return err - } - for i := 0; i < 100; i++ { - if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil { - return err - } - } - - // Create "baz" bucket. - b, err = tx.CreateBucket([]byte("baz")) - if err != nil { - return err - } - if err := b.Put([]byte("key"), []byte("value")); err != nil { - return err - } - - return nil - }); err != nil { - t.Fatal(err) - } - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - // Generate expected result. - exp := "Aggregate statistics for 3 buckets\n\n" + - "Page count statistics\n" + - "\tNumber of logical branch pages: 0\n" + - "\tNumber of physical branch overflow pages: 0\n" + - "\tNumber of logical leaf pages: 1\n" + - "\tNumber of physical leaf overflow pages: 0\n" + - "Tree statistics\n" + - "\tNumber of keys/value pairs: 111\n" + - "\tNumber of levels in B+tree: 1\n" + - "Page size utilization\n" + - "\tBytes allocated for physical branch pages: 0\n" + - "\tBytes actually used for branch data: 0 (0%)\n" + - "\tBytes allocated for physical leaf pages: 4096\n" + - "\tBytes actually used for leaf data: 1996 (48%)\n" + - "Bucket statistics\n" + - "\tTotal number of buckets: 3\n" + - "\tTotal number on inlined buckets: 2 (66%)\n" + - "\tBytes used for inlined buckets: 236 (11%)\n" - - // Run the command. - m := NewMain() - if err := m.Run("stats", db.Path()); err != nil { - t.Fatal(err) - } else if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String()) - } -} - // Ensure the "keys" command can print a list of keys for a bucket. func TestKeysCommand_Run(t *testing.T) { testCases := []struct { From 1b56d8a60048ab822387a15672e2125e06c41f11 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 1 Jul 2025 17:36:52 +0200 Subject: [PATCH 391/439] chore(cmd): migrate pages command to cobra style Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_pages.go | 84 +++++++++++++++++++++++++++++ cmd/bbolt/command_pages_test.go | 61 +++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 95 --------------------------------- cmd/bbolt/main_test.go | 31 ----------- 5 files changed, 146 insertions(+), 126 deletions(-) create mode 100644 cmd/bbolt/command_pages.go create mode 100644 cmd/bbolt/command_pages_test.go diff --git a/cmd/bbolt/command_pages.go b/cmd/bbolt/command_pages.go new file mode 100644 index 000000000..576071c16 --- /dev/null +++ b/cmd/bbolt/command_pages.go @@ -0,0 +1,84 @@ +package main + +import ( + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" +) + +func newPagesCommand() *cobra.Command { + pagesCmd := &cobra.Command{ + Use: "pages ", + Short: "print a list of pages in bbolt database", + Long: strings.TrimLeft(` +Pages prints a table of pages with their type (meta, leaf, branch, freelist). +Leaf and branch pages will show a key count in the "items" column while the +freelist will show the number of free pages in the "items" column. + +The "overflow" column shows the number of blocks that the page spills over +into. Normally there is no overflow but large keys and values can cause +a single page to take up multiple blocks. +`, "\n"), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return pagesFunc(cmd, args[0]) + }, + } + + return pagesCmd +} + +func pagesFunc(cmd *cobra.Command, dbPath string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // Open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + PreLoadFreelist: true, + }) + if err != nil { + return err + } + defer db.Close() + + // Write header. + fmt.Fprintln(cmd.OutOrStdout(), "ID TYPE ITEMS OVRFLW") + fmt.Fprintln(cmd.OutOrStdout(), "======== ========== ====== ======") + + return db.View(func(tx *bolt.Tx) error { + var id int + for { + p, err := tx.Page(id) + if err != nil { + return &PageError{ID: id, Err: err} + } else if p == nil { + break + } + + // Only display count and overflow if this is a non-free page. + var count, overflow string + if p.Type != "free" { + count = strconv.Itoa(p.Count) + if p.OverflowCount > 0 { + overflow = strconv.Itoa(p.OverflowCount) + } + } + + // Print table row. + fmt.Fprintf(cmd.OutOrStdout(), "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) + + // Move to the next non-overflow page. + id += 1 + if p.Type != "free" { + id += p.OverflowCount + } + } + return nil + }) +} diff --git a/cmd/bbolt/command_pages_test.go b/cmd/bbolt/command_pages_test.go new file mode 100644 index 000000000..c367f7180 --- /dev/null +++ b/cmd/bbolt/command_pages_test.go @@ -0,0 +1,61 @@ +package main_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "pages" command neither panic, nor change the db file. +func TestPagesCommand_Run(t *testing.T) { + t.Log("Creating sample DB") + db := btesting.MustCreateDB(t) + err := db.Update(func(tx *bolt.Tx) error { + for _, name := range []string{"foo", "bar"} { + b, err := tx.CreateBucket([]byte(name)) + if err != nil { + return err + } + for i := 0; i < 3; i++ { + key := fmt.Sprintf("%s-%d", name, i) + val := fmt.Sprintf("val-%s-%d", name, i) + if err := b.Put([]byte(key), []byte(val)); err != nil { + return err + } + } + } + return nil + }) + require.NoError(t, err) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running pages cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + + rootCmd.SetArgs([]string{"pages", db.Path()}) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + _, err = io.ReadAll(outputBuf) + require.NoError(t, err) +} + +func TestPagesCommand_NoArgs(t *testing.T) { + expErr := errors.New("accepts 1 arg(s), received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"pages"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index cbc544bf9..7a4aa13e5 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -25,6 +25,7 @@ func NewRootCommand() *cobra.Command { newInfoCommand(), newCompactCommand(), newStatsCommand(), + newPagesCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 7f48fd4cc..39d264301 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -132,8 +132,6 @@ func (m *Main) Run(args ...string) error { return newKeysCommand(m).Run(args[1:]...) case "page": return newPageCommand(m).Run(args[1:]...) - case "pages": - return newPagesCommand(m).Run(args[1:]...) default: return ErrUnknownCommand } @@ -476,99 +474,6 @@ page-item prints a page item key and value. `, "\n") } -// pagesCommand represents the "pages" command execution. -type pagesCommand struct { - baseCommand -} - -// newPagesCommand returns a pagesCommand. -func newPagesCommand(m *Main) *pagesCommand { - c := &pagesCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *pagesCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ - ReadOnly: true, - PreLoadFreelist: true, - }) - if err != nil { - return err - } - defer func() { _ = db.Close() }() - - // Write header. - fmt.Fprintln(cmd.Stdout, "ID TYPE ITEMS OVRFLW") - fmt.Fprintln(cmd.Stdout, "======== ========== ====== ======") - - return db.View(func(tx *bolt.Tx) error { - var id int - for { - p, err := tx.Page(id) - if err != nil { - return &PageError{ID: id, Err: err} - } else if p == nil { - break - } - - // Only display count and overflow if this is a non-free page. - var count, overflow string - if p.Type != "free" { - count = strconv.Itoa(p.Count) - if p.OverflowCount > 0 { - overflow = strconv.Itoa(p.OverflowCount) - } - } - - // Print table row. - fmt.Fprintf(cmd.Stdout, "%-8d %-10s %-6s %-6s\n", p.ID, p.Type, count, overflow) - - // Move to the next non-overflow page. - id += 1 - if p.Type != "free" { - id += p.OverflowCount - } - } - return nil - }) -} - -// Usage returns the help message. -func (cmd *pagesCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt pages PATH - -Pages prints a table of pages with their type (meta, leaf, branch, freelist). -Leaf and branch pages will show a key count in the "items" column while the -freelist will show the number of free pages in the "items" column. - -The "overflow" column shows the number of blocks that the page spills over -into. Normally there is no overflow but large keys and values can cause -a single page to take up multiple blocks. -`, "\n") -} - // keysCommand represents the "keys" command execution. type keysCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 983277ebb..4730c5fe5 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -269,37 +269,6 @@ func TestGetCommand_Run(t *testing.T) { } } -// Ensure the "pages" command neither panic, nor change the db file. -func TestPagesCommand_Run(t *testing.T) { - db := btesting.MustCreateDB(t) - - err := db.Update(func(tx *bolt.Tx) error { - for _, name := range []string{"foo", "bar"} { - b, err := tx.CreateBucket([]byte(name)) - if err != nil { - return err - } - for i := 0; i < 3; i++ { - key := fmt.Sprintf("%s-%d", name, i) - val := fmt.Sprintf("val-%s-%d", name, i) - if err := b.Put([]byte(key), []byte(val)); err != nil { - return err - } - } - } - return nil - }) - require.NoError(t, err) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - // Run the command. - m := NewMain() - err = m.Run("pages", db.Path()) - require.NoError(t, err) -} - // Ensure the "bench" command runs and exits without errors func TestBenchCommand_Run(t *testing.T) { tests := map[string]struct { From c884fa47418c9c0342f2e17e0706e84bad7b25f7 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 1 Jul 2025 18:48:08 +0200 Subject: [PATCH 392/439] chore(cmd): migrate keys command to cobra style Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_keys.go | 60 ++++++++++++++++++++++ cmd/bbolt/command_keys_test.go | 94 ++++++++++++++++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 81 ----------------------------- cmd/bbolt/main_test.go | 72 -------------------------- 5 files changed, 155 insertions(+), 153 deletions(-) create mode 100644 cmd/bbolt/command_keys.go create mode 100644 cmd/bbolt/command_keys_test.go diff --git a/cmd/bbolt/command_keys.go b/cmd/bbolt/command_keys.go new file mode 100644 index 000000000..396f38042 --- /dev/null +++ b/cmd/bbolt/command_keys.go @@ -0,0 +1,60 @@ +package main + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + bolt "go.etcd.io/bbolt" +) + +type keysOptions struct { + format string +} + +func (o *keysOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVarP(&o.format, "format", "f", "auto", "Output format one of: "+FORMAT_MODES) +} + +func newKeysCommand() *cobra.Command { + var o keysOptions + + keysCmd := &cobra.Command{ + Use: "keys ", + Short: "print a list of keys in the given (sub)bucket in bbolt database", + Args: cobra.MinimumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return keysFunc(cmd, o, args[0], args[1:]...) + }, + } + + o.AddFlags(keysCmd.Flags()) + return keysCmd +} + +func keysFunc(cmd *cobra.Command, cfg keysOptions, dbPath string, buckets ...string) error { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + // Open database. + db, err := bolt.Open(dbPath, 0600, &bolt.Options{ + ReadOnly: true, + }) + if err != nil { + return err + } + defer db.Close() + + // Print keys. + return db.View(func(tx *bolt.Tx) error { + // Find bucket. + lastBucket, err := findLastBucket(tx, buckets) + if err != nil { + return err + } + + // Iterate over each key. + return lastBucket.ForEach(func(key, _ []byte) error { + return writelnBytes(cmd.OutOrStdout(), key, cfg.format) + }) + }) +} diff --git a/cmd/bbolt/command_keys_test.go b/cmd/bbolt/command_keys_test.go new file mode 100644 index 000000000..3de2c25db --- /dev/null +++ b/cmd/bbolt/command_keys_test.go @@ -0,0 +1,94 @@ +package main_test + +import ( + "bytes" + "errors" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +// Ensure the "keys" command can print a list of keys for a bucket. +func TestKeysCommand_Run(t *testing.T) { + testCases := []struct { + name string + printable bool + testBucket string + expected string + }{ + { + name: "printable keys", + printable: true, + testBucket: "foo", + expected: "foo-0\nfoo-1\nfoo-2\n", + }, + { + name: "non printable keys", + printable: false, + testBucket: "bar", + expected: convertInt64KeysIntoHexString(100001, 100002, 100003), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Logf("Creating test database for subtest '%s'", tc.name) + db := btesting.MustCreateDB(t) + err := db.Update(func(tx *bolt.Tx) error { + t.Logf("creating test bucket %s", tc.testBucket) + b, bErr := tx.CreateBucketIfNotExists([]byte(tc.testBucket)) + if bErr != nil { + return fmt.Errorf("error creating test bucket %q: %v", tc.testBucket, bErr) + } + + t.Logf("inserting test data into test bucket %s", tc.testBucket) + if tc.printable { + for i := 0; i < 3; i++ { + key := fmt.Sprintf("%s-%d", tc.testBucket, i) + if pErr := b.Put([]byte(key), []byte{0}); pErr != nil { + return pErr + } + } + } else { + for i := 100001; i < 100004; i++ { + k := convertInt64IntoBytes(int64(i)) + if pErr := b.Put(k, []byte{0}); pErr != nil { + return pErr + } + } + } + return nil + }) + require.NoError(t, err) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running Keys cmd") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + rootCmd.SetArgs([]string{"keys", db.Path(), tc.testBucket}) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Equalf(t, tc.expected, string(output), "unexpected stdout:\n\n%s", string(output)) + }) + } +} + +func TestKeyCommand_NoArgs(t *testing.T) { + expErr := errors.New("requires at least 2 arg(s), only received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"keys"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 7a4aa13e5..e820603c9 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -26,6 +26,7 @@ func NewRootCommand() *cobra.Command { newCompactCommand(), newStatsCommand(), newPagesCommand(), + newKeysCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 39d264301..9301f01d9 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -128,8 +128,6 @@ func (m *Main) Run(args ...string) error { return newPageItemCommand(m).Run(args[1:]...) case "get": return newGetCommand(m).Run(args[1:]...) - case "keys": - return newKeysCommand(m).Run(args[1:]...) case "page": return newPageCommand(m).Run(args[1:]...) default: @@ -474,85 +472,6 @@ page-item prints a page item key and value. `, "\n") } -// keysCommand represents the "keys" command execution. -type keysCommand struct { - baseCommand -} - -// newKeysCommand returns a keysCommand. -func newKeysCommand(m *Main) *keysCommand { - c := &keysCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *keysCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - optionsFormat := fs.String("format", "auto", "Output format. One of: "+FORMAT_MODES+" (default: auto)") - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and bucket. - relevantArgs := fs.Args() - if len(relevantArgs) < 2 { - return ErrNotEnoughArgs - } - path, buckets := relevantArgs[0], relevantArgs[1:] - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } else if len(buckets) == 0 { - return ErrBucketRequired - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer db.Close() - - // Print keys. - return db.View(func(tx *bolt.Tx) error { - // Find bucket. - lastBucket, err := findLastBucket(tx, buckets) - if err != nil { - return err - } - - // Iterate over each key. - return lastBucket.ForEach(func(key, _ []byte) error { - return writelnBytes(cmd.Stdout, key, *optionsFormat) - }) - }) -} - -// Usage returns the help message. -// TODO: Use https://pkg.go.dev/flag#FlagSet.PrintDefaults to print supported flags. -func (cmd *keysCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt keys PATH [BUCKET...] - -Print a list of keys in the given (sub)bucket. -======= - -Additional options include: - - --format - Output format. One of: `+FORMAT_MODES+` (default=auto) - -Print a list of keys in the given bucket. -`, "\n") -} - // getCommand represents the "get" command execution. type getCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 4730c5fe5..ab344b848 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -139,73 +139,6 @@ func TestPageItemCommand_Run(t *testing.T) { } } -// Ensure the "keys" command can print a list of keys for a bucket. -func TestKeysCommand_Run(t *testing.T) { - testCases := []struct { - name string - printable bool - testBucket string - expected string - }{ - { - name: "printable keys", - printable: true, - testBucket: "foo", - expected: "foo-0\nfoo-1\nfoo-2\n", - }, - { - name: "non printable keys", - printable: false, - testBucket: "bar", - expected: convertInt64KeysIntoHexString(100001, 100002, 100003), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - t.Logf("creating test database for subtest '%s'", tc.name) - db := btesting.MustCreateDB(t) - - err := db.Update(func(tx *bolt.Tx) error { - t.Logf("creating test bucket %s", tc.testBucket) - b, bErr := tx.CreateBucketIfNotExists([]byte(tc.testBucket)) - if bErr != nil { - return fmt.Errorf("error creating test bucket %q: %v", tc.testBucket, bErr) - } - - t.Logf("inserting test data into test bucket %s", tc.testBucket) - if tc.printable { - for i := 0; i < 3; i++ { - key := fmt.Sprintf("%s-%d", tc.testBucket, i) - if pErr := b.Put([]byte(key), []byte{0}); pErr != nil { - return pErr - } - } - } else { - for i := 100001; i < 100004; i++ { - k := convertInt64IntoBytes(int64(i)) - if pErr := b.Put(k, []byte{0}); pErr != nil { - return pErr - } - } - } - return nil - }) - require.NoError(t, err) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - t.Log("running Keys cmd") - m := NewMain() - kErr := m.Run("keys", db.Path(), tc.testBucket) - require.NoError(t, kErr) - actual := m.Stdout.String() - assert.Equal(t, tc.expected, actual) - }) - } -} - // Ensure the "get" command can print the value of a key in a bucket. func TestGetCommand_Run(t *testing.T) { testCases := []struct { @@ -358,11 +291,6 @@ func TestCommands_Run_NoArgs(t *testing.T) { cmd: "get", expErr: main.ErrNotEnoughArgs, }, - { - name: "keys", - cmd: "keys", - expErr: main.ErrNotEnoughArgs, - }, } for _, tc := range testCases { From 290fca35f350bf0372d16fc7268157407fefd372 Mon Sep 17 00:00:00 2001 From: "shenmu.wy" Date: Wed, 2 Jul 2025 11:12:58 +0800 Subject: [PATCH 393/439] cmd: migrate dump command to cobra style Signed-off-by: shenmu.wy --- cmd/bbolt/command_dump.go | 110 ++++++++++++++++++++++++++++ cmd/bbolt/command_dump_test.go | 44 ++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 126 --------------------------------- cmd/bbolt/main_test.go | 16 ----- 5 files changed, 155 insertions(+), 142 deletions(-) create mode 100644 cmd/bbolt/command_dump.go create mode 100644 cmd/bbolt/command_dump_test.go diff --git a/cmd/bbolt/command_dump.go b/cmd/bbolt/command_dump.go new file mode 100644 index 000000000..72826e66b --- /dev/null +++ b/cmd/bbolt/command_dump.go @@ -0,0 +1,110 @@ +package main + +import ( + "bytes" + "fmt" + "io" + "os" + + "github.com/spf13/cobra" + + "go.etcd.io/bbolt/internal/guts_cli" +) + +func newDumpCommand() *cobra.Command { + dumpCmd := &cobra.Command{ + Use: "dump pageid [pageid...]", + Short: "prints a hexadecimal dump of one or more pages of bbolt database.", + Args: cobra.MinimumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + dbPath := args[0] + pageIDs, err := stringToPages(args[1:]) + if err != nil { + return err + } else if len(pageIDs) == 0 { + return ErrPageIDRequired + } + return dumpFunc(cmd, dbPath, pageIDs) + }, + } + + return dumpCmd +} + +func dumpFunc(cmd *cobra.Command, dbPath string, pageIDs []uint64) (err error) { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // open database to retrieve page size. + pageSize, _, err := guts_cli.ReadPageAndHWMSize(dbPath) + if err != nil { + return err + } + + // open database file handler. + f, err := os.Open(dbPath) + if err != nil { + return err + } + defer func() { _ = f.Close() }() + + // print each page listed. + for i, pageID := range pageIDs { + // print a separator. + if i > 0 { + fmt.Fprintln(cmd.OutOrStdout(), "===============================================") + } + + // print page to stdout. + if err := dumpPage(cmd.OutOrStdout(), f, pageID, uint64(pageSize)); err != nil { + return err + } + } + + return +} + +func dumpPage(w io.Writer, r io.ReaderAt, pageID uint64, pageSize uint64) error { + const bytesPerLineN = 16 + + // read page into buffer. + buf := make([]byte, pageSize) + addr := pageID * uint64(pageSize) + if n, err := r.ReadAt(buf, int64(addr)); err != nil { + return err + } else if uint64(n) != pageSize { + return io.ErrUnexpectedEOF + } + + // write out to writer in 16-byte lines. + var prev []byte + var skipped bool + for offset := uint64(0); offset < pageSize; offset += bytesPerLineN { + // retrieve current 16-byte line. + line := buf[offset : offset+bytesPerLineN] + isLastLine := (offset == (pageSize - bytesPerLineN)) + + // if it's the same as the previous line then print a skip. + if bytes.Equal(line, prev) && !isLastLine { + if !skipped { + fmt.Fprintf(w, "%07x *\n", addr+offset) + skipped = true + } + } else { + // print line as hexadecimal in 2-byte groups. + fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, + line[0:2], line[2:4], line[4:6], line[6:8], + line[8:10], line[10:12], line[12:14], line[14:16], + ) + + skipped = false + } + + // save the previous line. + prev = line + } + fmt.Fprint(w, "\n") + + return nil +} diff --git a/cmd/bbolt/command_dump_test.go b/cmd/bbolt/command_dump_test.go new file mode 100644 index 000000000..6b1820f82 --- /dev/null +++ b/cmd/bbolt/command_dump_test.go @@ -0,0 +1,44 @@ +package main_test + +import ( + "bytes" + "errors" + "io" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +func TestDumpCommand_Run(t *testing.T) { + t.Log("Creating database") + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + require.NoError(t, db.Close()) + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running dump command") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + rootCmd.SetArgs([]string{"dump", db.Path(), "0"}) + err := rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + exp := `0000010 edda 0ced 0200 0000 0010 0000 0000 0000` + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.True(t, strings.Contains(string(output), exp), "unexpected stdout:", string(output)) +} + +func TestDumpCommand_NoArgs(t *testing.T) { + expErr := errors.New("requires at least 2 arg(s), only received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"dump"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index e820603c9..10b6a2592 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -27,6 +27,7 @@ func NewRootCommand() *cobra.Command { newStatsCommand(), newPagesCommand(), newKeysCommand(), + newDumpCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 9301f01d9..a3a169173 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1,7 +1,6 @@ package main import ( - "bytes" "crypto/sha256" "encoding/binary" "encoding/hex" @@ -122,8 +121,6 @@ func (m *Main) Run(args ...string) error { return ErrUsage case "bench": return newBenchCommand(m).Run(args[1:]...) - case "dump": - return newDumpCommand(m).Run(args[1:]...) case "page-item": return newPageItemCommand(m).Run(args[1:]...) case "get": @@ -167,129 +164,6 @@ Use "bbolt [command] -h" for more information about a command. `, "\n") } -// dumpCommand represents the "dump" command execution. -type dumpCommand struct { - baseCommand -} - -// newDumpCommand returns a dumpCommand. -func newDumpCommand(m *Main) *dumpCommand { - c := &dumpCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *dumpCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page ids. - pageIDs, err := stringToPages(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - - // Open database to retrieve page size. - pageSize, _, err := guts_cli.ReadPageAndHWMSize(path) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - - // Print page to stdout. - if err := cmd.PrintPage(cmd.Stdout, f, pageID, uint64(pageSize)); err != nil { - return err - } - } - - return nil -} - -// PrintPage prints a given page as hexadecimal. -func (cmd *dumpCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID uint64, pageSize uint64) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * uint64(pageSize) - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if uint64(n) != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := uint64(0); offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := (offset == (pageSize - bytesPerLineN)) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *dumpCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt dump PATH pageid [pageid...] - -Dump prints a hexadecimal dump of one or more pages. -`, "\n") -} - // pageItemCommand represents the "page-item" command execution. type pageItemCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index ab344b848..26d031f2b 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -22,22 +22,6 @@ import ( "go.etcd.io/bbolt/internal/guts_cli" ) -func TestDumpCommand_Run(t *testing.T) { - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - exp := `0000010 edda 0ced 0200 0000 0010 0000 0000 0000` - - m := NewMain() - err := m.Run("dump", db.Path(), "0") - require.NoError(t, err) - if !strings.Contains(m.Stdout.String(), exp) { - t.Fatalf("unexpected stdout:\n%s\n", m.Stdout.String()) - } -} - func TestPageCommand_Run(t *testing.T) { db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) db.Close() From 6288198fdd7747f171b98032c204984e08e5da45 Mon Sep 17 00:00:00 2001 From: "shenmu.wy" Date: Wed, 2 Jul 2025 13:47:17 +0800 Subject: [PATCH 394/439] cmd: migrate page-item command to cobra style Signed-off-by: shenmu.wy --- cmd/bbolt/command_page_item.go | 111 ++++++++++++++++++++++ cmd/bbolt/command_page_item_test.go | 106 +++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 140 ---------------------------- cmd/bbolt/main_test.go | 75 --------------- 5 files changed, 218 insertions(+), 215 deletions(-) create mode 100644 cmd/bbolt/command_page_item.go create mode 100644 cmd/bbolt/command_page_item_test.go diff --git a/cmd/bbolt/command_page_item.go b/cmd/bbolt/command_page_item.go new file mode 100644 index 000000000..ce0af2da9 --- /dev/null +++ b/cmd/bbolt/command_page_item.go @@ -0,0 +1,111 @@ +package main + +import ( + "errors" + "fmt" + "io" + "strconv" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" +) + +type pageItemOptions struct { + keyOnly bool + valueOnly bool + format string +} + +func newPageItemCommand() *cobra.Command { + var opt pageItemOptions + pageItemCmd := &cobra.Command{ + Use: "page-item [options] pageid itemid", + Short: "print a page item key and value in a bbolt database", + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + dbPath := args[0] + pageID, err := strconv.ParseUint(args[1], 10, 64) + if err != nil { + return err + } + itemID, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return err + } + return pageItemFunc(cmd, opt, dbPath, pageID, itemID) + }, + } + opt.AddFlags(pageItemCmd.Flags()) + + return pageItemCmd +} + +func (o *pageItemOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.keyOnly, "key-only", false, "Print only the key") + fs.BoolVar(&o.valueOnly, "value-only", false, "Print only the value") + fs.StringVar(&o.format, "format", "auto", "Output format one of: "+FORMAT_MODES) +} + +func pageItemFunc(cmd *cobra.Command, cfg pageItemOptions, dbPath string, pageID, itemID uint64) (err error) { + if cfg.keyOnly && cfg.valueOnly { + return errors.New("the --key-only or --value-only flag may be set, but not both") + } + + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + // retrieve page info and page size. + _, buf, err := guts_cli.ReadPage(dbPath, pageID) + if err != nil { + return err + } + + if !cfg.valueOnly { + err := pageItemPrintLeafItemKey(cmd.OutOrStdout(), buf, uint16(itemID), cfg.format) + if err != nil { + return err + } + } + if !cfg.keyOnly { + err := pageItemPrintLeafItemValue(cmd.OutOrStdout(), buf, uint16(itemID), cfg.format) + if err != nil { + return err + } + } + + return +} + +func pageItemPrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error { + k, _, err := pageItemLeafPageElement(pageBytes, index) + if err != nil { + return err + } + + return writelnBytes(w, k, format) +} + +func pageItemPrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error { + _, v, err := pageItemLeafPageElement(pageBytes, index) + if err != nil { + return err + } + return writelnBytes(w, v, format) +} + +func pageItemLeafPageElement(pageBytes []byte, index uint16) ([]byte, []byte, error) { + p := common.LoadPage(pageBytes) + if index >= p.Count() { + return nil, nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d", p.Count(), index) + } + if p.Typ() != "leaf" { + return nil, nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Typ()) + } + + e := p.LeafPageElement(index) + return e.Key(), e.Value(), nil +} diff --git a/cmd/bbolt/command_page_item_test.go b/cmd/bbolt/command_page_item_test.go new file mode 100644 index 000000000..b4c583666 --- /dev/null +++ b/cmd/bbolt/command_page_item_test.go @@ -0,0 +1,106 @@ +package main_test + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/guts_cli" +) + +func TestPageItemCommand_Run(t *testing.T) { + testCases := []struct { + name string + printable bool + itemId string + expectedKey string + expectedValue string + }{ + { + name: "printable items", + printable: true, + itemId: "0", + expectedKey: "key_0", + expectedValue: "value_0", + }, + { + name: "non printable items", + printable: false, + itemId: "0", + expectedKey: hex.EncodeToString(convertInt64IntoBytes(0 + 1)), + expectedValue: hex.EncodeToString(convertInt64IntoBytes(0 + 2)), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + srcPath := db.Path() + + t.Log("Inserting some sample data") + err := db.Update(func(tx *bolt.Tx) error { + b, bErr := tx.CreateBucketIfNotExists([]byte("data")) + if bErr != nil { + return bErr + } + + for i := 0; i < 100; i++ { + if tc.printable { + if bErr = b.Put([]byte(fmt.Sprintf("key_%d", i)), []byte(fmt.Sprintf("value_%d", i))); bErr != nil { + return bErr + } + } else { + k, v := convertInt64IntoBytes(int64(i+1)), convertInt64IntoBytes(int64(i+2)) + if bErr = b.Put(k, v); bErr != nil { + return bErr + } + } + } + return nil + }) + require.NoError(t, err) + require.NoError(t, db.Close()) + defer requireDBNoChange(t, dbData(t, srcPath), srcPath) + + meta := readMetaPage(t, srcPath) + leafPageId := 0 + for i := 2; i < int(meta.Pgid()); i++ { + p, _, err := guts_cli.ReadPage(srcPath, uint64(i)) + require.NoError(t, err) + if p.IsLeafPage() && p.Count() > 1 { + leafPageId = int(p.Id()) + } + } + require.NotEqual(t, 0, leafPageId) + + t.Log("Running page-item command") + rootCmd := main.NewRootCommand() + outBuf := &bytes.Buffer{} + rootCmd.SetOut(outBuf) + rootCmd.SetArgs([]string{"page-item", db.Path(), fmt.Sprintf("%d", leafPageId), tc.itemId}) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + output := outBuf.String() + require.True(t, strings.Contains(output, tc.expectedKey), "unexpected output:", output) + require.True(t, strings.Contains(output, tc.expectedValue), "unexpected output:", output) + }) + } +} + +func TestPageItemCommand_NoArgs(t *testing.T) { + expErr := errors.New("accepts 3 arg(s), received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"page-item"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 10b6a2592..b49b04be5 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -28,6 +28,7 @@ func NewRootCommand() *cobra.Command { newPagesCommand(), newKeysCommand(), newDumpCommand(), + newPageItemCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a3a169173..337283e03 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -23,7 +23,6 @@ import ( bolt "go.etcd.io/bbolt" berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/common" - "go.etcd.io/bbolt/internal/guts_cli" ) var ( @@ -121,8 +120,6 @@ func (m *Main) Run(args ...string) error { return ErrUsage case "bench": return newBenchCommand(m).Run(args[1:]...) - case "page-item": - return newPageItemCommand(m).Run(args[1:]...) case "get": return newGetCommand(m).Run(args[1:]...) case "page": @@ -164,106 +161,6 @@ Use "bbolt [command] -h" for more information about a command. `, "\n") } -// pageItemCommand represents the "page-item" command execution. -type pageItemCommand struct { - baseCommand -} - -// newPageItemCommand returns a pageItemCommand. -func newPageItemCommand(m *Main) *pageItemCommand { - c := &pageItemCommand{} - c.baseCommand = m.baseCommand - return c -} - -type pageItemOptions struct { - help bool - keyOnly bool - valueOnly bool - format string -} - -// Run executes the command. -func (cmd *pageItemCommand) Run(args ...string) error { - // Parse flags. - options := &pageItemOptions{} - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.BoolVar(&options.keyOnly, "key-only", false, "Print only the key") - fs.BoolVar(&options.valueOnly, "value-only", false, "Print only the value") - fs.StringVar(&options.format, "format", "auto", "Output format. One of: "+FORMAT_MODES) - fs.BoolVar(&options.help, "h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if options.help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - if options.keyOnly && options.valueOnly { - return errors.New("The --key-only or --value-only flag may be set, but not both.") - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - // Read page id. - pageID, err := strconv.ParseUint(fs.Arg(1), 10, 64) - if err != nil { - return err - } - - // Read item id. - itemID, err := strconv.ParseUint(fs.Arg(2), 10, 64) - if err != nil { - return err - } - - // Open database file handler. - f, err := os.Open(path) - if err != nil { - return err - } - defer func() { _ = f.Close() }() - - // Retrieve page info and page size. - _, buf, err := guts_cli.ReadPage(path, pageID) - if err != nil { - return err - } - - if !options.valueOnly { - err := cmd.PrintLeafItemKey(cmd.Stdout, buf, uint16(itemID), options.format) - if err != nil { - return err - } - } - if !options.keyOnly { - err := cmd.PrintLeafItemValue(cmd.Stdout, buf, uint16(itemID), options.format) - if err != nil { - return err - } - } - return nil -} - -func (cmd *pageItemCommand) leafPageElement(pageBytes []byte, index uint16) ([]byte, []byte, error) { - p := common.LoadPage(pageBytes) - if index >= p.Count() { - return nil, nil, fmt.Errorf("leafPageElement: expected item index less than %d, but got %d", p.Count(), index) - } - if p.Typ() != "leaf" { - return nil, nil, fmt.Errorf("leafPageElement: expected page type of 'leaf', but got '%s'", p.Typ()) - } - - e := p.LeafPageElement(index) - return e.Key(), e.Value(), nil -} - const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted" // formatBytes converts bytes into string according to format. @@ -309,43 +206,6 @@ func writelnBytes(w io.Writer, b []byte, format string) error { return err } -// PrintLeafItemKey writes the bytes of a leaf element's key. -func (cmd *pageItemCommand) PrintLeafItemKey(w io.Writer, pageBytes []byte, index uint16, format string) error { - k, _, err := cmd.leafPageElement(pageBytes, index) - if err != nil { - return err - } - - return writelnBytes(w, k, format) -} - -// PrintLeafItemValue writes the bytes of a leaf element's value. -func (cmd *pageItemCommand) PrintLeafItemValue(w io.Writer, pageBytes []byte, index uint16, format string) error { - _, v, err := cmd.leafPageElement(pageBytes, index) - if err != nil { - return err - } - return writelnBytes(w, v, format) -} - -// Usage returns the help message. -func (cmd *pageItemCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page-item [options] PATH pageid itemid - -Additional options include: - - --key-only - Print only the key - --value-only - Print only the value - --format - Output format. One of: `+FORMAT_MODES+` (default=auto) - -page-item prints a page item key and value. -`, "\n") -} - // getCommand represents the "get" command execution. type getCommand struct { baseCommand diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 26d031f2b..a5269119d 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -19,7 +19,6 @@ import ( bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" "go.etcd.io/bbolt/internal/btesting" - "go.etcd.io/bbolt/internal/guts_cli" ) func TestPageCommand_Run(t *testing.T) { @@ -49,80 +48,6 @@ func TestPageCommand_Run(t *testing.T) { } } -func TestPageItemCommand_Run(t *testing.T) { - testCases := []struct { - name string - printable bool - itemId string - expectedKey string - expectedValue string - }{ - { - name: "printable items", - printable: true, - itemId: "0", - expectedKey: "key_0", - expectedValue: "value_0", - }, - { - name: "non printable items", - printable: false, - itemId: "0", - expectedKey: hex.EncodeToString(convertInt64IntoBytes(0 + 1)), - expectedValue: hex.EncodeToString(convertInt64IntoBytes(0 + 2)), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) - srcPath := db.Path() - - t.Log("Insert some sample data") - err := db.Update(func(tx *bolt.Tx) error { - b, bErr := tx.CreateBucketIfNotExists([]byte("data")) - if bErr != nil { - return bErr - } - - for i := 0; i < 100; i++ { - if tc.printable { - if bErr = b.Put([]byte(fmt.Sprintf("key_%d", i)), []byte(fmt.Sprintf("value_%d", i))); bErr != nil { - return bErr - } - } else { - k, v := convertInt64IntoBytes(int64(i+1)), convertInt64IntoBytes(int64(i+2)) - if bErr = b.Put(k, v); bErr != nil { - return bErr - } - } - } - return nil - }) - require.NoError(t, err) - defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - - meta := readMetaPage(t, srcPath) - leafPageId := 0 - for i := 2; i < int(meta.Pgid()); i++ { - p, _, err := guts_cli.ReadPage(srcPath, uint64(i)) - require.NoError(t, err) - if p.IsLeafPage() && p.Count() > 1 { - leafPageId = int(p.Id()) - } - } - require.NotEqual(t, 0, leafPageId) - - m := NewMain() - err = m.Run("page-item", db.Path(), fmt.Sprintf("%d", leafPageId), tc.itemId) - require.NoError(t, err) - if !strings.Contains(m.Stdout.String(), tc.expectedKey) || !strings.Contains(m.Stdout.String(), tc.expectedValue) { - t.Fatalf("Unexpected output:\n%s\n", m.Stdout.String()) - } - }) - } -} - // Ensure the "get" command can print the value of a key in a bucket. func TestGetCommand_Run(t *testing.T) { testCases := []struct { From 6e90ee7ca757439765e3a2277e3ac04f898a80db Mon Sep 17 00:00:00 2001 From: Gang Li Date: Thu, 12 Jun 2025 16:43:31 +0000 Subject: [PATCH 395/439] add write mode "seq-del" Signed-off-by: Gang Li --- cmd/bbolt/main.go | 57 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 337283e03..b34164f8c 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "io" + "math" "math/rand" "os" "runtime" @@ -458,6 +459,9 @@ func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results * keys, err = cmd.runWritesSequentialNested(db, options, results) case "rnd-nest": keys, err = cmd.runWritesRandomNested(db, options, results, r) + case "seq-del": + options.DeleteFraction = 0.1 + keys, err = cmd.runWritesSequentialAndDelete(db, options, results) default: return nil, fmt.Errorf("invalid write mode: %s", options.WriteMode) } @@ -478,6 +482,11 @@ func (cmd *benchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) } +func (cmd *benchCommand) runWritesSequentialAndDelete(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { + var i = uint32(0) + return cmd.runWritesDeletesWithSource(db, options, results, func() uint32 { i++; return i }) +} + func (cmd *benchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) } @@ -529,6 +538,53 @@ func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, return keys, nil } +func (cmd *benchCommand) runWritesDeletesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + deleteSize := int64(math.Ceil(float64(options.BatchSize) * options.DeleteFraction)) + var InsertedKeys [][]byte + + for i := int64(0); i < options.Iterations; i += options.BatchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.FillPercent + + fmt.Fprintf(cmd.Stderr, "Starting delete iteration %d, deleteSize: %d\n", i, deleteSize) + for i := int64(0); i < deleteSize && i < int64(len(InsertedKeys)); i++ { + if err := b.Delete(InsertedKeys[i]); err != nil { + return err + } + } + InsertedKeys = InsertedKeys[:0] + fmt.Fprintf(cmd.Stderr, "Finished delete iteration %d\n", i) + + fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) + for j := int64(0); j < options.BatchSize; j++ { + + key := make([]byte, options.KeySize) + value := make([]byte, options.ValueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + InsertedKeys = append(InsertedKeys, key) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + if keys != nil { + keys = append(keys, nestedKey{nil, key}) + } + results.AddCompletedOps(1) + } + fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) + return nil + }); err != nil { + return nil, err + } + } + return keys, nil +} + func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { var keys []nestedKey if options.ReadMode == "rnd" { @@ -889,6 +945,7 @@ type BenchOptions struct { GoBenchOutput bool PageSize int InitialMmapSize int + DeleteFraction float64 // Fraction of keys of last tx to delete during writes. works only with "seq-del" write mode. } // BenchResults represents the performance results of the benchmark and is thread-safe. From 40fd0f06dd9888a8d955c1af4b8f6fce5a38895d Mon Sep 17 00:00:00 2001 From: hwdef Date: Thu, 10 Jul 2025 16:11:32 +0800 Subject: [PATCH 396/439] chore(main): Bump Go to 1.24.5 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 2f4320f67..6521720b4 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.4 +1.24.5 diff --git a/go.mod b/go.mod index 995366134..0fb246cda 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.4 +toolchain go1.24.5 require ( github.com/spf13/cobra v1.9.1 From ef0224dea1312e18eed075bafa522c9c2ed1d86f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Jul 2025 17:11:25 +0000 Subject: [PATCH 397/439] build(deps): Bump golang.org/x/sys from 0.33.0 to 0.34.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.33.0 to 0.34.0. - [Commits](https://github.com/golang/sys/compare/v0.33.0...v0.34.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.34.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 0fb246cda..e67b4c19a 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.15.0 - golang.org/x/sys v0.33.0 + golang.org/x/sys v0.34.0 ) require ( diff --git a/go.sum b/go.sum index 10457fcd4..e1d37d0e6 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= -golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From 5feef26378834a9226376ecf8bb88fb9aa62c102 Mon Sep 17 00:00:00 2001 From: "shenmu.wy" Date: Wed, 16 Jul 2025 14:31:02 +0800 Subject: [PATCH 398/439] cmd: migrate page command to cobra style Signed-off-by: shenmu.wy --- cmd/bbolt/command_page.go | 225 +++++++++++++++++++++++++ cmd/bbolt/command_page_test.go | 52 ++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 2 - cmd/bbolt/main_test.go | 27 --- cmd/bbolt/page_command.go | 290 --------------------------------- 6 files changed, 278 insertions(+), 319 deletions(-) create mode 100644 cmd/bbolt/command_page.go create mode 100644 cmd/bbolt/command_page_test.go delete mode 100644 cmd/bbolt/page_command.go diff --git a/cmd/bbolt/command_page.go b/cmd/bbolt/command_page.go new file mode 100644 index 000000000..0f6439091 --- /dev/null +++ b/cmd/bbolt/command_page.go @@ -0,0 +1,225 @@ +package main + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "go.etcd.io/bbolt/internal/common" + "go.etcd.io/bbolt/internal/guts_cli" +) + +type getPageOptions struct { + all bool + format string +} + +func newPageCommand() *cobra.Command { + var opt getPageOptions + pageCmd := &cobra.Command{ + Use: "page [pageid...]", + Short: "page prints one or more pages in human readable format.", + Args: cobra.MinimumNArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + dbPath := args[0] + pageIDs, err := stringToPages(args[1:]) + if err != nil { + return err + } + if len(pageIDs) == 0 { + return ErrPageIDRequired + } + return pageFunc(cmd, opt, dbPath, pageIDs) + }, + } + opt.AddFlags(pageCmd.Flags()) + + return pageCmd +} + +func (o *getPageOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.all, "all", false, "List all pages.") + fs.StringVar(&o.format, "format-value", "auto", "Output format one of: "+FORMAT_MODES+". Applies to values on the leaf page.") +} + +func pageFunc(cmd *cobra.Command, cfg getPageOptions, dbPath string, pageIDs []uint64) (err error) { + if _, err := checkSourceDBPath(dbPath); err != nil { + return err + } + + if cfg.all { + printAllPages(cmd, dbPath, cfg.format) + } else { + printPages(cmd, pageIDs, dbPath, cfg.format) + } + + return +} + +func printPages(cmd *cobra.Command, pageIDs []uint64, path string, formatValue string) { + // print each page listed. + for i, pageID := range pageIDs { + // print a separator. + if i > 0 { + fmt.Fprintln(cmd.OutOrStdout(), "===============================================") + } + _, pErr := printPage(cmd, path, pageID, formatValue) + if pErr != nil { + fmt.Fprintf(cmd.OutOrStdout(), "Prining page %d failed: %s. Continuing...\n", pageID, pErr) + } + } +} + +// printPage prints given page to cmd.Stdout and returns error or number of interpreted pages. +func printPage(cmd *cobra.Command, path string, pageID uint64, formatValue string) (numPages uint32, reterr error) { + defer func() { + if err := recover(); err != nil { + reterr = fmt.Errorf("%s", err) + } + }() + + // retrieve page info and page size. + p, buf, err := guts_cli.ReadPage(path, pageID) + if err != nil { + return 0, err + } + + // print basic page info. + stdout := cmd.OutOrStdout() + fmt.Fprintf(stdout, "Page ID: %d\n", p.Id()) + fmt.Fprintf(stdout, "Page Type: %s\n", p.Typ()) + fmt.Fprintf(stdout, "Total Size: %d bytes\n", len(buf)) + fmt.Fprintf(stdout, "Overflow pages: %d\n", p.Overflow()) + + // print type-specific data. + switch p.Typ() { + case "meta": + err = pagePrintMeta(stdout, buf) + case "leaf": + err = pagePrintLeaf(stdout, buf, formatValue) + case "branch": + err = pagePrintBranch(stdout, buf) + case "freelist": + err = pagePrintFreelist(stdout, buf) + } + if err != nil { + return 0, err + } + return p.Overflow(), nil +} + +func printAllPages(cmd *cobra.Command, path string, formatValue string) { + _, hwm, err := guts_cli.ReadPageAndHWMSize(path) + if err != nil { + fmt.Fprintf(cmd.OutOrStdout(), "cannot read number of pages: %v", err) + } + + // print each page listed. + for pageID := uint64(0); pageID < uint64(hwm); { + // print a separator. + if pageID > 0 { + fmt.Fprintln(cmd.OutOrStdout(), "===============================================") + } + overflow, pErr := printPage(cmd, path, pageID, formatValue) + if pErr != nil { + fmt.Fprintf(cmd.OutOrStdout(), "Prining page %d failed: %s. Continuing...\n", pageID, pErr) + pageID++ + } else { + pageID += uint64(overflow) + 1 + } + } +} + +// pagePrintMeta prints the data from the meta page. +func pagePrintMeta(w io.Writer, buf []byte) error { + m := common.LoadPageMeta(buf) + m.Print(w) + return nil +} + +// pagePrintLeaf prints the data for a leaf page. +func pagePrintLeaf(w io.Writer, buf []byte, formatValue string) error { + p := common.LoadPage(buf) + + // print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.Count()) + fmt.Fprintf(w, "\n") + + // print each key/value. + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + + // format key as string. + var k string + if isPrintable(string(e.Key())) { + k = fmt.Sprintf("%q", string(e.Key())) + } else { + k = fmt.Sprintf("%x", string(e.Key())) + } + + // format value as string. + var v string + var err error + if e.IsBucketEntry() { + b := e.Bucket() + v = b.String() + } else { + v, err = formatBytes(e.Value(), formatValue) + if err != nil { + return err + } + } + + fmt.Fprintf(w, "%s: %s\n", k, v) + } + fmt.Fprintf(w, "\n") + return nil +} + +// pagePrintBranch prints the data for a leaf page. +func pagePrintBranch(w io.Writer, buf []byte) error { + p := common.LoadPage(buf) + + // print number of items. + fmt.Fprintf(w, "Item Count: %d\n", p.Count()) + fmt.Fprintf(w, "\n") + + // print each key/value. + for i := uint16(0); i < p.Count(); i++ { + e := p.BranchPageElement(i) + + // format key as string. + var k string + if isPrintable(string(e.Key())) { + k = fmt.Sprintf("%q", string(e.Key())) + } else { + k = fmt.Sprintf("%x", string(e.Key())) + } + + fmt.Fprintf(w, "%s: \n", k, e.Pgid()) + } + fmt.Fprintf(w, "\n") + return nil +} + +// pagePrintFreelist prints the data for a freelist page. +func pagePrintFreelist(w io.Writer, buf []byte) error { + p := common.LoadPage(buf) + + // print number of items. + _, cnt := p.FreelistPageCount() + fmt.Fprintf(w, "Item Count: %d\n", cnt) + fmt.Fprintf(w, "Overflow: %d\n", p.Overflow()) + + fmt.Fprintf(w, "\n") + + // print each page in the freelist. + ids := p.FreelistPageIds() + for _, ids := range ids { + fmt.Fprintf(w, "%d\n", ids) + } + fmt.Fprintf(w, "\n") + return nil +} diff --git a/cmd/bbolt/command_page_test.go b/cmd/bbolt/command_page_test.go new file mode 100644 index 000000000..689b76d9a --- /dev/null +++ b/cmd/bbolt/command_page_test.go @@ -0,0 +1,52 @@ +package main_test + +import ( + "bytes" + "errors" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +func TestPageCommand_Run(t *testing.T) { + t.Log("Creating a new database") + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + exp := "Page ID: 0\n" + + "Page Type: meta\n" + + "Total Size: 4096 bytes\n" + + "Overflow pages: 0\n" + + "Version: 2\n" + + "Page Size: 4096 bytes\n" + + "Flags: 00000000\n" + + "Root: \n" + + "Freelist: \n" + + "HWM: \n" + + "Txn ID: 0\n" + + "Checksum: 07516e114689fdee\n\n" + + t.Log("Running page command") + rootCmd := main.NewRootCommand() + outBuf := &bytes.Buffer{} + rootCmd.SetOut(outBuf) + rootCmd.SetArgs([]string{"page", db.Path(), "0"}) + + err := rootCmd.Execute() + require.NoError(t, err) + require.Equal(t, exp, outBuf.String(), "unexpected stdout") +} + +func TestPageCommand_NoArgs(t *testing.T) { + expErr := errors.New("requires at least 2 arg(s), only received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"page"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index b49b04be5..73090519f 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -29,6 +29,7 @@ func NewRootCommand() *cobra.Command { newKeysCommand(), newDumpCommand(), newPageItemCommand(), + newPageCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index b34164f8c..3fa5bb460 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -123,8 +123,6 @@ func (m *Main) Run(args ...string) error { return newBenchCommand(m).Run(args[1:]...) case "get": return newGetCommand(m).Run(args[1:]...) - case "page": - return newPageCommand(m).Run(args[1:]...) default: return ErrUnknownCommand } diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index a5269119d..657952197 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -21,33 +21,6 @@ import ( "go.etcd.io/bbolt/internal/btesting" ) -func TestPageCommand_Run(t *testing.T) { - db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - exp := "Page ID: 0\n" + - "Page Type: meta\n" + - "Total Size: 4096 bytes\n" + - "Overflow pages: 0\n" + - "Version: 2\n" + - "Page Size: 4096 bytes\n" + - "Flags: 00000000\n" + - "Root: \n" + - "Freelist: \n" + - "HWM: \n" + - "Txn ID: 0\n" + - "Checksum: 07516e114689fdee\n\n" - - m := NewMain() - err := m.Run("page", db.Path(), "0") - require.NoError(t, err) - if m.Stdout.String() != exp { - t.Fatalf("unexpected stdout:\n%s\n%s", m.Stdout.String(), exp) - } -} - // Ensure the "get" command can print the value of a key in a bucket. func TestGetCommand_Run(t *testing.T) { testCases := []struct { diff --git a/cmd/bbolt/page_command.go b/cmd/bbolt/page_command.go deleted file mode 100644 index 7a6ec5b9b..000000000 --- a/cmd/bbolt/page_command.go +++ /dev/null @@ -1,290 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "io" - "os" - "strings" - - "go.etcd.io/bbolt/internal/common" - "go.etcd.io/bbolt/internal/guts_cli" -) - -// pageCommand represents the "page" command execution. -type pageCommand struct { - baseCommand -} - -// newPageCommand returns a pageCommand. -func newPageCommand(m *Main) *pageCommand { - c := &pageCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *pageCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - help := fs.Bool("h", false, "") - all := fs.Bool("all", false, "list all pages") - formatValue := fs.String("format-value", "auto", "One of: "+FORMAT_MODES+" . Applies to values on the leaf page.") - - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path and page id. - path := fs.Arg(0) - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } - - if !*all { - // Read page ids. - pageIDs, err := stringToPages(fs.Args()[1:]) - if err != nil { - return err - } else if len(pageIDs) == 0 { - return ErrPageIDRequired - } - cmd.printPages(pageIDs, path, formatValue) - } else { - cmd.printAllPages(path, formatValue) - } - return nil -} - -func (cmd *pageCommand) printPages(pageIDs []uint64, path string, formatValue *string) { - // Print each page listed. - for i, pageID := range pageIDs { - // Print a separator. - if i > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - _, err2 := cmd.printPage(path, pageID, *formatValue) - if err2 != nil { - fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuing...\n", pageID, err2) - } - } -} - -func (cmd *pageCommand) printAllPages(path string, formatValue *string) { - _, hwm, err := guts_cli.ReadPageAndHWMSize(path) - if err != nil { - fmt.Fprintf(cmd.Stdout, "cannot read number of pages: %v", err) - } - - // Print each page listed. - for pageID := uint64(0); pageID < uint64(hwm); { - // Print a separator. - if pageID > 0 { - fmt.Fprintln(cmd.Stdout, "===============================================") - } - overflow, err2 := cmd.printPage(path, pageID, *formatValue) - if err2 != nil { - fmt.Fprintf(cmd.Stdout, "Prining page %d failed: %s. Continuing...\n", pageID, err2) - pageID++ - } else { - pageID += uint64(overflow) + 1 - } - } -} - -// printPage prints given page to cmd.Stdout and returns error or number of interpreted pages. -func (cmd *pageCommand) printPage(path string, pageID uint64, formatValue string) (numPages uint32, reterr error) { - defer func() { - if err := recover(); err != nil { - reterr = fmt.Errorf("%s", err) - } - }() - - // Retrieve page info and page size. - p, buf, err := guts_cli.ReadPage(path, pageID) - if err != nil { - return 0, err - } - - // Print basic page info. - fmt.Fprintf(cmd.Stdout, "Page ID: %d\n", p.Id()) - fmt.Fprintf(cmd.Stdout, "Page Type: %s\n", p.Typ()) - fmt.Fprintf(cmd.Stdout, "Total Size: %d bytes\n", len(buf)) - fmt.Fprintf(cmd.Stdout, "Overflow pages: %d\n", p.Overflow()) - - // Print type-specific data. - switch p.Typ() { - case "meta": - err = cmd.PrintMeta(cmd.Stdout, buf) - case "leaf": - err = cmd.PrintLeaf(cmd.Stdout, buf, formatValue) - case "branch": - err = cmd.PrintBranch(cmd.Stdout, buf) - case "freelist": - err = cmd.PrintFreelist(cmd.Stdout, buf) - } - if err != nil { - return 0, err - } - return p.Overflow(), nil -} - -// PrintMeta prints the data from the meta page. -func (cmd *pageCommand) PrintMeta(w io.Writer, buf []byte) error { - m := common.LoadPageMeta(buf) - m.Print(w) - return nil -} - -// PrintLeaf prints the data for a leaf page. -func (cmd *pageCommand) PrintLeaf(w io.Writer, buf []byte, formatValue string) error { - p := common.LoadPage(buf) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.Count()) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.Count(); i++ { - e := p.LeafPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.Key())) { - k = fmt.Sprintf("%q", string(e.Key())) - } else { - k = fmt.Sprintf("%x", string(e.Key())) - } - - // Format value as string. - var v string - if e.IsBucketEntry() { - b := e.Bucket() - v = b.String() - } else { - var err error - v, err = formatBytes(e.Value(), formatValue) - if err != nil { - return err - } - } - - fmt.Fprintf(w, "%s: %s\n", k, v) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintBranch prints the data for a leaf page. -func (cmd *pageCommand) PrintBranch(w io.Writer, buf []byte) error { - p := common.LoadPage(buf) - - // Print number of items. - fmt.Fprintf(w, "Item Count: %d\n", p.Count()) - fmt.Fprintf(w, "\n") - - // Print each key/value. - for i := uint16(0); i < p.Count(); i++ { - e := p.BranchPageElement(i) - - // Format key as string. - var k string - if isPrintable(string(e.Key())) { - k = fmt.Sprintf("%q", string(e.Key())) - } else { - k = fmt.Sprintf("%x", string(e.Key())) - } - - fmt.Fprintf(w, "%s: \n", k, e.Pgid()) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintFreelist prints the data for a freelist page. -func (cmd *pageCommand) PrintFreelist(w io.Writer, buf []byte) error { - p := common.LoadPage(buf) - - // Print number of items. - _, cnt := p.FreelistPageCount() - fmt.Fprintf(w, "Item Count: %d\n", cnt) - fmt.Fprintf(w, "Overflow: %d\n", p.Overflow()) - - fmt.Fprintf(w, "\n") - - // Print each page in the freelist. - ids := p.FreelistPageIds() - for _, ids := range ids { - fmt.Fprintf(w, "%d\n", ids) - } - fmt.Fprintf(w, "\n") - return nil -} - -// PrintPage prints a given page as hexadecimal. -func (cmd *pageCommand) PrintPage(w io.Writer, r io.ReaderAt, pageID int, pageSize int) error { - const bytesPerLineN = 16 - - // Read page into buffer. - buf := make([]byte, pageSize) - addr := pageID * pageSize - if n, err := r.ReadAt(buf, int64(addr)); err != nil { - return err - } else if n != pageSize { - return io.ErrUnexpectedEOF - } - - // Write out to writer in 16-byte lines. - var prev []byte - var skipped bool - for offset := 0; offset < pageSize; offset += bytesPerLineN { - // Retrieve current 16-byte line. - line := buf[offset : offset+bytesPerLineN] - isLastLine := offset == (pageSize - bytesPerLineN) - - // If it's the same as the previous line then print a skip. - if bytes.Equal(line, prev) && !isLastLine { - if !skipped { - fmt.Fprintf(w, "%07x *\n", addr+offset) - skipped = true - } - } else { - // Print line as hexadecimal in 2-byte groups. - fmt.Fprintf(w, "%07x %04x %04x %04x %04x %04x %04x %04x %04x\n", addr+offset, - line[0:2], line[2:4], line[4:6], line[6:8], - line[8:10], line[10:12], line[12:14], line[14:16], - ) - - skipped = false - } - - // Save the previous line. - prev = line - } - fmt.Fprint(w, "\n") - - return nil -} - -// Usage returns the help message. -func (cmd *pageCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt page PATH pageid [pageid...] - or: bolt page --all PATH - -Additional options include: - - --all - prints all pages (only skips pages that were considered successful overflow pages) - --format-value=`+FORMAT_MODES+` (default: auto) - prints values (on the leaf page) using the given format. - -Page prints one or more pages in human readable format. -`, "\n") -} From 39b63a0395ce8e6a89cf344dd94048054e41fb9e Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Fri, 18 Jul 2025 17:08:42 +0200 Subject: [PATCH 399/439] chore(test): bump_windows_test_timeout Signed-off-by: Mustafa Elbehery --- .github/workflows/tests_windows.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 79f9163c5..97fbd68af 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -31,7 +31,7 @@ jobs: run: | case "${TARGET}" in windows-amd64-unit-test-4-cpu) - CPU=4 make test + CPU=4 TIMEOUT=40m make test ;; *) echo "Failed to find target" @@ -59,3 +59,5 @@ jobs: with: version: v2.1.6 - run: make coverage + env: + TIMEOUT: 40m From 985246c080f98d4b7d3c1ff89e820e7859103b49 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 17 Jul 2025 14:04:28 +0200 Subject: [PATCH 400/439] chore(cmd): add exclusive behaviour to page cmd Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_page.go | 10 ++++--- cmd/bbolt/command_page_test.go | 49 +++++++++++++++++++++++++++++++++- cmd/bbolt/main.go | 6 +++++ 3 files changed, 61 insertions(+), 4 deletions(-) diff --git a/cmd/bbolt/command_page.go b/cmd/bbolt/command_page.go index 0f6439091..1b02cc317 100644 --- a/cmd/bbolt/command_page.go +++ b/cmd/bbolt/command_page.go @@ -19,16 +19,16 @@ type getPageOptions struct { func newPageCommand() *cobra.Command { var opt getPageOptions pageCmd := &cobra.Command{ - Use: "page [pageid...]", + Use: "page [pageid...]", Short: "page prints one or more pages in human readable format.", - Args: cobra.MinimumNArgs(2), + Args: cobra.MinimumNArgs(1), RunE: func(cmd *cobra.Command, args []string) error { dbPath := args[0] pageIDs, err := stringToPages(args[1:]) if err != nil { return err } - if len(pageIDs) == 0 { + if len(pageIDs) == 0 && !opt.all { return ErrPageIDRequired } return pageFunc(cmd, opt, dbPath, pageIDs) @@ -45,6 +45,10 @@ func (o *getPageOptions) AddFlags(fs *pflag.FlagSet) { } func pageFunc(cmd *cobra.Command, cfg getPageOptions, dbPath string, pageIDs []uint64) (err error) { + if cfg.all && len(pageIDs) != 0 { + return ErrInvalidPageArgs + } + if _, err := checkSourceDBPath(dbPath); err != nil { return err } diff --git a/cmd/bbolt/command_page_test.go b/cmd/bbolt/command_page_test.go index 689b76d9a..e900a0372 100644 --- a/cmd/bbolt/command_page_test.go +++ b/cmd/bbolt/command_page_test.go @@ -43,8 +43,55 @@ func TestPageCommand_Run(t *testing.T) { require.Equal(t, exp, outBuf.String(), "unexpected stdout") } +func TestPageCommand_ExclusiveArgs(t *testing.T) { + testCases := []struct { + name string + pageIds string + allFlag string + expErr error + }{ + { + name: "flag only", + pageIds: "", + allFlag: "--all", + expErr: nil, + }, + { + name: "pageIds only", + pageIds: "0", + allFlag: "", + expErr: nil, + }, + { + name: "pageIds and flag", + pageIds: "0", + allFlag: "--all", + expErr: main.ErrInvalidPageArgs, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Log("Creating a new database") + db := btesting.MustCreateDBWithOption(t, &bolt.Options{PageSize: 4096}) + db.Close() + + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running page command") + rootCmd := main.NewRootCommand() + outBuf := &bytes.Buffer{} + rootCmd.SetOut(outBuf) + rootCmd.SetArgs([]string{"page", db.Path(), tc.pageIds, tc.allFlag}) + + err := rootCmd.Execute() + require.Equal(t, tc.expErr, err) + }) + } +} + func TestPageCommand_NoArgs(t *testing.T) { - expErr := errors.New("requires at least 2 arg(s), only received 0") + expErr := errors.New("requires at least 1 arg(s), only received 0") rootCmd := main.NewRootCommand() rootCmd.SetArgs([]string{"page"}) err := rootCmd.Execute() diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 3fa5bb460..a25a70557 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -50,6 +50,9 @@ var ( // ErrPageIDRequired is returned when a required page id is not specified. ErrPageIDRequired = errors.New("page id required") + // ErrInvalidPageArgs is returned when Page cmd receives pageIds and all option is true. + ErrInvalidPageArgs = errors.New("invalid args: either use '--all' or 'pageid...'") + // ErrBucketRequired is returned when a bucket is not specified. ErrBucketRequired = errors.New("bucket required") @@ -1024,6 +1027,9 @@ func stringToPage(str string) (uint64, error) { func stringToPages(strs []string) ([]uint64, error) { var a []uint64 for _, str := range strs { + if len(str) == 0 { + continue + } i, err := stringToPage(str) if err != nil { return nil, err From 051ce254db4d5d5414396acf96126e05179e493e Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sat, 19 Jul 2025 20:20:25 +0200 Subject: [PATCH 401/439] chore(test): bump_windows_test_timeout Signed-off-by: Mustafa Elbehery --- .github/workflows/tests_windows.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 97fbd68af..3236b6b50 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -31,7 +31,7 @@ jobs: run: | case "${TARGET}" in windows-amd64-unit-test-4-cpu) - CPU=4 TIMEOUT=40m make test + CPU=4 TIMEOUT=50m make test ;; *) echo "Failed to find target" @@ -60,4 +60,4 @@ jobs: version: v2.1.6 - run: make coverage env: - TIMEOUT: 40m + TIMEOUT: 50m From 1cbe18ae7384ce5ffd586dc6d1dcb02c1af71c66 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 19 Jul 2025 20:39:34 +0000 Subject: [PATCH 402/439] build(deps): Bump golang.org/x/sync from 0.15.0 to 0.16.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.15.0 to 0.16.0. - [Commits](https://github.com/golang/sync/compare/v0.15.0...v0.16.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.16.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e67b4c19a..90dfe551d 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.15.0 + golang.org/x/sync v0.16.0 golang.org/x/sys v0.34.0 ) diff --git a/go.sum b/go.sum index e1d37d0e6..62fd7e313 100644 --- a/go.sum +++ b/go.sum @@ -14,8 +14,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 09d56b1002e37e0cf67787efa2b0dc2207a8db34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 21 Jul 2025 16:35:16 +0000 Subject: [PATCH 403/439] build(deps): Bump github.com/spf13/pflag from 1.0.6 to 1.0.7 Bumps [github.com/spf13/pflag](https://github.com/spf13/pflag) from 1.0.6 to 1.0.7. - [Release notes](https://github.com/spf13/pflag/releases) - [Commits](https://github.com/spf13/pflag/compare/v1.0.6...v1.0.7) --- updated-dependencies: - dependency-name: github.com/spf13/pflag dependency-version: 1.0.7 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 90dfe551d..32d358ccd 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.24.5 require ( github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 diff --git a/go.sum b/go.sum index 62fd7e313..65a3766af 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= From e3a8d9d98f77f196826a50478e7ed04007469baf Mon Sep 17 00:00:00 2001 From: joshjms Date: Thu, 24 Jul 2025 12:52:14 +0800 Subject: [PATCH 404/439] refactor: migrate bench command to cobra Signed-off-by: joshjms --- cmd/bbolt/command_bench.go | 734 ++++++++++++++++++++++++++ cmd/bbolt/command_bench_test.go | 76 +++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 701 ------------------------ cmd/bbolt/main_test.go | 35 -- scripts/compare_benchmarks.sh | 6 +- tests/robustness/powerfailure_test.go | 10 +- 7 files changed, 819 insertions(+), 744 deletions(-) create mode 100644 cmd/bbolt/command_bench.go create mode 100644 cmd/bbolt/command_bench_test.go diff --git a/cmd/bbolt/command_bench.go b/cmd/bbolt/command_bench.go new file mode 100644 index 000000000..f8beeb1c8 --- /dev/null +++ b/cmd/bbolt/command_bench.go @@ -0,0 +1,734 @@ +package main + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "math/rand" + "os" + "runtime" + "runtime/pprof" + "sync/atomic" + "testing" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/internal/common" +) + +var ( + // ErrBatchNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrBatchNonDivisibleBatchSize = errors.New("the number of iterations must be divisible by the batch size") + + // ErrBatchInvalidWriteMode is returned when the write mode is other than seq, rnd, seq-nest, or rnd-nest. + ErrBatchInvalidWriteMode = errors.New("the write mode should be one of seq, rnd, seq-nest, or rnd-nest") +) + +var benchBucketName = []byte("bench") + +type benchOptions struct { + profileMode string + writeMode string + readMode string + iterations int64 + batchSize int64 + keySize int + valueSize int + cpuProfile string + memProfile string + blockProfile string + fillPercent float64 + noSync bool + work bool + path string + goBenchOutput bool + pageSize int + initialMmapSize int + deleteFraction float64 // Fraction of keys of last tx to delete during writes. works only with "seq-del" write mode. +} + +func (o *benchOptions) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&o.profileMode, "profile-mode", "rw", "") + fs.StringVar(&o.writeMode, "write-mode", "seq", "") + fs.StringVar(&o.readMode, "read-mode", "seq", "") + fs.Int64Var(&o.iterations, "count", 1000, "") + fs.Int64Var(&o.batchSize, "batch-size", 0, "") + fs.IntVar(&o.keySize, "key-size", 8, "") + fs.IntVar(&o.valueSize, "value-size", 32, "") + fs.StringVar(&o.cpuProfile, "cpuprofile", "", "") + fs.StringVar(&o.memProfile, "memprofile", "", "") + fs.StringVar(&o.blockProfile, "blockprofile", "", "") + fs.Float64Var(&o.fillPercent, "fill-percent", bolt.DefaultFillPercent, "") + fs.BoolVar(&o.noSync, "no-sync", false, "") + fs.BoolVar(&o.work, "work", false, "") + fs.StringVar(&o.path, "path", "", "") + fs.BoolVar(&o.goBenchOutput, "gobench-output", false, "") + fs.IntVar(&o.pageSize, "page-size", common.DefaultPageSize, "Set page size in bytes.") + fs.IntVar(&o.initialMmapSize, "initial-mmap-size", 0, "Set initial mmap size in bytes for database file.") +} + +// Returns an error if `bench` options are not valid. +func (o *benchOptions) Validate() error { + // Require that batch size can be evenly divided by the iteration count if set. + if o.batchSize > 0 && o.iterations%o.batchSize != 0 { + return ErrBatchNonDivisibleBatchSize + } + + switch o.writeMode { + case "seq", "rnd", "seq-nest", "rnd-nest": + default: + return ErrBatchInvalidWriteMode + } + + // Generate temp path if one is not passed in. + if o.path == "" { + f, err := os.CreateTemp("", "bolt-bench-") + if err != nil { + return fmt.Errorf("temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + o.path = f.Name() + } + + return nil +} + +// Sets the `bench` option values that are dependent on other options. +func (o *benchOptions) SetOptionValues() error { + // Generate temp path if one is not passed in. + if o.path == "" { + f, err := os.CreateTemp("", "bolt-bench-") + if err != nil { + return fmt.Errorf("error creating temp file: %s", err) + } + f.Close() + os.Remove(f.Name()) + o.path = f.Name() + } + + // Set batch size to iteration size if not set. + if o.batchSize == 0 { + o.batchSize = o.iterations + } + + return nil +} + +func newBenchCommand() *cobra.Command { + var o benchOptions + + benchCmd := &cobra.Command{ + Use: "bench", + Short: "run synthetic benchmark against bbolt", + RunE: func(cmd *cobra.Command, args []string) error { + if err := o.Validate(); err != nil { + return err + } + if err := o.SetOptionValues(); err != nil { + return err + } + return benchFunc(cmd, &o) + }, + } + + o.AddFlags(benchCmd.Flags()) + + return benchCmd +} + +func benchFunc(cmd *cobra.Command, options *benchOptions) error { + // Remove path if "-work" is not set. Otherwise keep path. + if options.work { + fmt.Fprintf(cmd.ErrOrStderr(), "work: %s\n", options.path) + } else { + defer os.Remove(options.path) + } + + // Create database. + dbOptions := *bolt.DefaultOptions + dbOptions.PageSize = options.pageSize + dbOptions.InitialMmapSize = options.initialMmapSize + db, err := bolt.Open(options.path, 0600, &dbOptions) + if err != nil { + return err + } + db.NoSync = options.noSync + defer db.Close() + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + var writeResults benchResults + + fmt.Fprintf(cmd.ErrOrStderr(), "starting write benchmark.\n") + keys, err := runWrites(cmd, db, options, &writeResults, r) + if err != nil { + return fmt.Errorf("write: %v", err) + } + + if keys != nil { + r.Shuffle(len(keys), func(i, j int) { + keys[i], keys[j] = keys[j], keys[i] + }) + } + + var readResults benchResults + fmt.Fprintf(cmd.ErrOrStderr(), "starting read benchmark.\n") + // Read from the database. + if err := runReads(cmd, db, options, &readResults, keys); err != nil { + return fmt.Errorf("bench: read: %s", err) + } + + // Print results. + if options.goBenchOutput { + // below replicates the output of testing.B benchmarks, e.g. for external tooling + benchWriteName := "BenchmarkWrite" + benchReadName := "BenchmarkRead" + maxLen := max(len(benchReadName), len(benchWriteName)) + printGoBenchResult(cmd.OutOrStdout(), writeResults, maxLen, benchWriteName) + printGoBenchResult(cmd.OutOrStdout(), readResults, maxLen, benchReadName) + } else { + fmt.Fprintf(cmd.OutOrStdout(), "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.getCompletedOps(), writeResults.getDuration(), writeResults.opDuration(), writeResults.opsPerSecond()) + fmt.Fprintf(cmd.OutOrStdout(), "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.getCompletedOps(), readResults.getDuration(), readResults.opDuration(), readResults.opsPerSecond()) + } + fmt.Fprintln(cmd.OutOrStdout(), "") + + return nil +} + +func runWrites(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, r *rand.Rand) ([]nestedKey, error) { + // Start profiling for writes. + if options.profileMode == "rw" || options.profileMode == "w" { + startProfiling(cmd, options) + } + + finishChan := make(chan interface{}) + go checkProgress(results, finishChan, cmd.ErrOrStderr()) + defer close(finishChan) + + t := time.Now() + + var keys []nestedKey + var err error + switch options.writeMode { + case "seq": + keys, err = runWritesSequential(cmd, db, options, results) + case "rnd": + keys, err = runWritesRandom(cmd, db, options, results, r) + case "seq-nest": + keys, err = runWritesSequentialNested(cmd, db, options, results) + case "rnd-nest": + keys, err = runWritesRandomNested(cmd, db, options, results, r) + case "seq-del": + options.deleteFraction = 0.1 + keys, err = runWritesSequentialAndDelete(cmd, db, options, results) + default: + return nil, fmt.Errorf("invalid write mode: %s", options.writeMode) + } + + // Save time to write. + results.setDuration(time.Since(t)) + + // Stop profiling for writes only. + if options.profileMode == "w" { + stopProfiling(cmd) + } + + return keys, err +} + +func runWritesSequential(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults) ([]nestedKey, error) { + var i = uint32(0) + return runWritesWithSource(cmd, db, options, results, func() uint32 { i++; return i }) +} + +func runWritesSequentialAndDelete(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults) ([]nestedKey, error) { + var i = uint32(0) + return runWritesDeletesWithSource(cmd, db, options, results, func() uint32 { i++; return i }) +} + +func runWritesRandom(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, r *rand.Rand) ([]nestedKey, error) { + return runWritesWithSource(cmd, db, options, results, func() uint32 { return r.Uint32() }) +} + +func runWritesSequentialNested(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults) ([]nestedKey, error) { + var i = uint32(0) + return runWritesNestedWithSource(cmd, db, options, results, func() uint32 { i++; return i }) +} + +func runWritesRandomNested(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, r *rand.Rand) ([]nestedKey, error) { + return runWritesNestedWithSource(cmd, db, options, results, func() uint32 { return r.Uint32() }) +} + +func runWritesWithSource(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + if options.readMode == "rnd" { + keys = make([]nestedKey, 0, options.iterations) + } + + for i := int64(0); i < options.iterations; i += options.batchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.fillPercent + + fmt.Fprintf(cmd.ErrOrStderr(), "Starting write iteration %d\n", i) + for j := int64(0); j < options.batchSize; j++ { + key := make([]byte, options.keySize) + value := make([]byte, options.valueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + if keys != nil { + keys = append(keys, nestedKey{nil, key}) + } + results.addCompletedOps(1) + } + fmt.Fprintf(cmd.ErrOrStderr(), "Finished write iteration %d\n", i) + + return nil + }); err != nil { + return nil, err + } + } + return keys, nil +} + +func runWritesDeletesWithSource(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + deleteSize := int64(math.Ceil(float64(options.batchSize) * options.deleteFraction)) + var InsertedKeys [][]byte + + for i := int64(0); i < options.iterations; i += options.batchSize { + if err := db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + b.FillPercent = options.fillPercent + + fmt.Fprintf(cmd.ErrOrStderr(), "Starting delete iteration %d, deleteSize: %d\n", i, deleteSize) + for i := int64(0); i < deleteSize && i < int64(len(InsertedKeys)); i++ { + if err := b.Delete(InsertedKeys[i]); err != nil { + return err + } + } + InsertedKeys = InsertedKeys[:0] + fmt.Fprintf(cmd.ErrOrStderr(), "Finished delete iteration %d\n", i) + + fmt.Fprintf(cmd.ErrOrStderr(), "Starting write iteration %d\n", i) + for j := int64(0); j < options.batchSize; j++ { + + key := make([]byte, options.keySize) + value := make([]byte, options.valueSize) + + // Write key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + InsertedKeys = append(InsertedKeys, key) + + // Insert key/value. + if err := b.Put(key, value); err != nil { + return err + } + if keys != nil { + keys = append(keys, nestedKey{nil, key}) + } + results.addCompletedOps(1) + } + fmt.Fprintf(cmd.ErrOrStderr(), "Finished write iteration %d\n", i) + return nil + }); err != nil { + return nil, err + } + } + return keys, nil +} + +func runWritesNestedWithSource(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, keySource func() uint32) ([]nestedKey, error) { + var keys []nestedKey + if options.readMode == "rnd" { + keys = make([]nestedKey, 0, options.iterations) + } + + for i := int64(0); i < options.iterations; i += options.batchSize { + if err := db.Update(func(tx *bolt.Tx) error { + top, err := tx.CreateBucketIfNotExists(benchBucketName) + if err != nil { + return err + } + top.FillPercent = options.fillPercent + + // Create bucket key. + name := make([]byte, options.keySize) + binary.BigEndian.PutUint32(name, keySource()) + + // Create bucket. + b, err := top.CreateBucketIfNotExists(name) + if err != nil { + return err + } + b.FillPercent = options.fillPercent + + fmt.Fprintf(cmd.ErrOrStderr(), "Starting write iteration %d\n", i) + for j := int64(0); j < options.batchSize; j++ { + var key = make([]byte, options.keySize) + var value = make([]byte, options.valueSize) + + // Generate key as uint32. + binary.BigEndian.PutUint32(key, keySource()) + + // Insert value into subbucket. + if err := b.Put(key, value); err != nil { + return err + } + if keys != nil { + keys = append(keys, nestedKey{name, key}) + } + results.addCompletedOps(1) + } + fmt.Fprintf(cmd.ErrOrStderr(), "Finished write iteration %d\n", i) + + return nil + }); err != nil { + return nil, err + } + } + return keys, nil +} + +func runReads(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults, keys []nestedKey) error { + // Start profiling for reads. + if options.profileMode == "r" { + startProfiling(cmd, options) + } + + finishChan := make(chan interface{}) + go checkProgress(results, finishChan, cmd.ErrOrStderr()) + defer close(finishChan) + + t := time.Now() + + var err error + switch options.readMode { + case "seq": + switch options.writeMode { + case "seq-nest", "rnd-nest": + err = runReadsSequentialNested(cmd, db, options, results) + default: + err = runReadsSequential(cmd, db, options, results) + } + case "rnd": + switch options.writeMode { + case "seq-nest", "rnd-nest": + err = runReadsRandomNested(cmd, db, options, keys, results) + default: + err = runReadsRandom(cmd, db, options, keys, results) + } + default: + return fmt.Errorf("invalid read mode: %s", options.readMode) + } + + // Save read time. + results.setDuration(time.Since(t)) + + // Stop profiling for reads. + if options.profileMode == "rw" || options.profileMode == "r" { + stopProfiling(cmd) + } + + return err +} + +type nestedKey struct{ bucket, key []byte } + +func runReadsSequential(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + err := func() error { + defer func() { results.addCompletedOps(numReads) }() + + c := tx.Bucket(benchBucketName).Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + numReads++ + if v == nil { + return ErrInvalidValue + } + } + + return nil + }() + + if err != nil { + return err + } + + if options.writeMode == "seq" && numReads != options.iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func runReadsRandom(cmd *cobra.Command, db *bolt.DB, options *benchOptions, keys []nestedKey, results *benchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + err := func() error { + defer func() { results.addCompletedOps(numReads) }() + + b := tx.Bucket(benchBucketName) + for _, key := range keys { + v := b.Get(key.key) + numReads++ + if v == nil { + return ErrInvalidValue + } + } + + return nil + }() + + if err != nil { + return err + } + + if options.writeMode == "seq" && numReads != options.iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func runReadsSequentialNested(cmd *cobra.Command, db *bolt.DB, options *benchOptions, results *benchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + var top = tx.Bucket(benchBucketName) + if err := top.ForEach(func(name, _ []byte) error { + defer func() { results.addCompletedOps(numReads) }() + if b := top.Bucket(name); b != nil { + c := b.Cursor() + for k, v := c.First(); k != nil; k, v = c.Next() { + numReads++ + if v == nil { + return ErrInvalidValue + } + } + } + return nil + }); err != nil { + return err + } + + if options.writeMode == "seq-nest" && numReads != options.iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func runReadsRandomNested(cmd *cobra.Command, db *bolt.DB, options *benchOptions, nestedKeys []nestedKey, results *benchResults) error { + return db.View(func(tx *bolt.Tx) error { + t := time.Now() + + for { + numReads := int64(0) + err := func() error { + defer func() { results.addCompletedOps(numReads) }() + + var top = tx.Bucket(benchBucketName) + for _, nestedKey := range nestedKeys { + if b := top.Bucket(nestedKey.bucket); b != nil { + v := b.Get(nestedKey.key) + numReads++ + if v == nil { + return ErrInvalidValue + } + } + } + + return nil + }() + + if err != nil { + return err + } + + if options.writeMode == "seq-nest" && numReads != options.iterations { + return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.iterations, numReads) + } + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil + }) +} + +func checkProgress(results *benchResults, finishChan chan interface{}, stderr io.Writer) { + ticker := time.Tick(time.Second) + lastCompleted, lastTime := int64(0), time.Now() + for { + select { + case <-finishChan: + return + case t := <-ticker: + completed, taken := results.getCompletedOps(), t.Sub(lastTime) + fmt.Fprintf(stderr, "Completed %d requests, %d/s \n", + completed, ((completed-lastCompleted)*int64(time.Second))/int64(taken), + ) + lastCompleted, lastTime = completed, t + } + } +} + +var cpuprofile, memprofile, blockprofile *os.File + +func startProfiling(cmd *cobra.Command, options *benchOptions) { + var err error + + // Start CPU profiling. + if options.cpuProfile != "" { + cpuprofile, err = os.Create(options.cpuProfile) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not create cpu profile %q: %v\n", options.cpuProfile, err) + os.Exit(1) + } + err = pprof.StartCPUProfile(cpuprofile) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not start cpu profile %q: %v\n", options.cpuProfile, err) + os.Exit(1) + } + } + + // Start memory profiling. + if options.memProfile != "" { + memprofile, err = os.Create(options.memProfile) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not create memory profile %q: %v\n", options.memProfile, err) + os.Exit(1) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.blockProfile != "" { + blockprofile, err = os.Create(options.blockProfile) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not create block profile %q: %v\n", options.blockProfile, err) + os.Exit(1) + } + runtime.SetBlockProfileRate(1) + } +} + +func stopProfiling(cmd *cobra.Command) { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + err := pprof.Lookup("heap").WriteTo(memprofile, 0) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not write mem profile") + } + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + err := pprof.Lookup("block").WriteTo(blockprofile, 0) + if err != nil { + fmt.Fprintf(cmd.ErrOrStderr(), "bench: could not write block profile") + } + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// benchResults represents the performance results of the benchmark and is thread-safe. +type benchResults struct { + completedOps int64 + duration int64 +} + +func (r *benchResults) addCompletedOps(amount int64) { + atomic.AddInt64(&r.completedOps, amount) +} + +func (r *benchResults) getCompletedOps() int64 { + return atomic.LoadInt64(&r.completedOps) +} + +func (r *benchResults) setDuration(dur time.Duration) { + atomic.StoreInt64(&r.duration, int64(dur)) +} + +func (r *benchResults) getDuration() time.Duration { + return time.Duration(atomic.LoadInt64(&r.duration)) +} + +// opDuration returns the duration for a single read/write operation. +func (r *benchResults) opDuration() time.Duration { + if r.getCompletedOps() == 0 { + return 0 + } + return r.getDuration() / time.Duration(r.getCompletedOps()) +} + +// opsPerSecond returns average number of read/write operations that can be performed per second. +func (r *benchResults) opsPerSecond() int { + var op = r.opDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +func printGoBenchResult(w io.Writer, r benchResults, maxLen int, benchName string) { + gobenchResult := testing.BenchmarkResult{} + gobenchResult.T = r.getDuration() + gobenchResult.N = int(r.getCompletedOps()) + fmt.Fprintf(w, "%-*s\t%s\n", maxLen, benchName, gobenchResult.String()) +} diff --git a/cmd/bbolt/command_bench_test.go b/cmd/bbolt/command_bench_test.go new file mode 100644 index 000000000..dc023eeba --- /dev/null +++ b/cmd/bbolt/command_bench_test.go @@ -0,0 +1,76 @@ +package main + +import ( + "bytes" + "fmt" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" +) + +type safeWriter struct { + buf *bytes.Buffer + mu sync.Mutex +} + +func (w *safeWriter) Write(p []byte) (n int, err error) { + w.mu.Lock() + defer w.mu.Unlock() + return w.buf.Write(p) +} + +func (w *safeWriter) String() string { + w.mu.Lock() + defer w.mu.Unlock() + return w.buf.String() +} + +func newSafeWriter() *safeWriter { + return &safeWriter{buf: bytes.NewBufferString("")} +} + +// Ensure the "bench" command runs and exits without errors +func TestBenchCommand_Run(t *testing.T) { + tests := map[string]struct { + args []string + }{ + "no-args": {}, + "100k count": {[]string{"--count", "100000"}}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + // Run the command. + rootCmd := NewRootCommand() + + outputWriter := newSafeWriter() + rootCmd.SetOut(outputWriter) + + errorWriter := newSafeWriter() + rootCmd.SetErr(errorWriter) + + args := append([]string{"bench"}, test.args...) + rootCmd.SetArgs(args) + + err := rootCmd.Execute() + require.NoError(t, err) + + outStr := outputWriter.String() + errStr := errorWriter.String() + + if !strings.Contains(errStr, "starting write benchmark.") || !strings.Contains(errStr, "starting read benchmark.") { + t.Fatal(fmt.Errorf("benchmark result does not contain read/write start output:\n%s", outStr)) + } + + if strings.Contains(errStr, "iter mismatch") { + t.Fatal(fmt.Errorf("found iter mismatch in stdout:\n%s", outStr)) + } + + if !strings.Contains(outStr, "# Write") || !strings.Contains(outStr, "# Read") { + t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", outStr)) + } + }) + } +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index 73090519f..ed6ca0bcb 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -30,6 +30,7 @@ func NewRootCommand() *cobra.Command { newDumpCommand(), newPageItemCommand(), newPageCommand(), + newBenchCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index a25a70557..b7b4c6ffa 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -2,28 +2,19 @@ package main import ( "crypto/sha256" - "encoding/binary" "encoding/hex" "errors" "flag" "fmt" "io" - "math" - "math/rand" "os" - "runtime" - "runtime/pprof" "strconv" "strings" - "sync/atomic" - "testing" - "time" "unicode" "unicode/utf8" bolt "go.etcd.io/bbolt" berrors "go.etcd.io/bbolt/errors" - "go.etcd.io/bbolt/internal/common" ) var ( @@ -122,8 +113,6 @@ func (m *Main) Run(args ...string) error { case "help": fmt.Fprintln(m.Stderr, m.Usage()) return ErrUsage - case "bench": - return newBenchCommand(m).Run(args[1:]...) case "get": return newGetCommand(m).Run(args[1:]...) default: @@ -298,696 +287,6 @@ Additional options include: `, "\n") } -var benchBucketName = []byte("bench") - -// benchCommand represents the "bench" command execution. -type benchCommand struct { - baseCommand -} - -// newBenchCommand returns a BenchCommand using the -func newBenchCommand(m *Main) *benchCommand { - c := &benchCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the "bench" command. -func (cmd *benchCommand) Run(args ...string) error { - // Parse CLI arguments. - options, err := cmd.ParseFlags(args) - if err != nil { - return err - } - - // Remove path if "-work" is not set. Otherwise keep path. - if options.Work { - fmt.Fprintf(cmd.Stderr, "work: %s\n", options.Path) - } else { - defer os.Remove(options.Path) - } - - // Create database. - dbOptions := *bolt.DefaultOptions - dbOptions.PageSize = options.PageSize - dbOptions.InitialMmapSize = options.InitialMmapSize - db, err := bolt.Open(options.Path, 0600, &dbOptions) - if err != nil { - return err - } - db.NoSync = options.NoSync - defer db.Close() - - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - // Write to the database. - var writeResults BenchResults - - fmt.Fprintf(cmd.Stderr, "starting write benchmark.\n") - keys, err := cmd.runWrites(db, options, &writeResults, r) - if err != nil { - return fmt.Errorf("write: %v", err) - } - - if keys != nil { - r.Shuffle(len(keys), func(i, j int) { - keys[i], keys[j] = keys[j], keys[i] - }) - } - - var readResults BenchResults - fmt.Fprintf(cmd.Stderr, "starting read benchmark.\n") - // Read from the database. - if err := cmd.runReads(db, options, &readResults, keys); err != nil { - return fmt.Errorf("bench: read: %s", err) - } - - // Print results. - if options.GoBenchOutput { - // below replicates the output of testing.B benchmarks, e.g. for external tooling - benchWriteName := "BenchmarkWrite" - benchReadName := "BenchmarkRead" - maxLen := max(len(benchReadName), len(benchWriteName)) - printGoBenchResult(cmd.Stdout, writeResults, maxLen, benchWriteName) - printGoBenchResult(cmd.Stdout, readResults, maxLen, benchReadName) - } else { - fmt.Fprintf(cmd.Stdout, "# Write\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", writeResults.CompletedOps(), writeResults.Duration(), writeResults.OpDuration(), writeResults.OpsPerSecond()) - fmt.Fprintf(cmd.Stdout, "# Read\t%v(ops)\t%v\t(%v/op)\t(%v op/sec)\n", readResults.CompletedOps(), readResults.Duration(), readResults.OpDuration(), readResults.OpsPerSecond()) - } - fmt.Fprintln(cmd.Stderr, "") - - return nil -} - -func printGoBenchResult(w io.Writer, r BenchResults, maxLen int, benchName string) { - gobenchResult := testing.BenchmarkResult{} - gobenchResult.T = r.Duration() - gobenchResult.N = int(r.CompletedOps()) - fmt.Fprintf(w, "%-*s\t%s\n", maxLen, benchName, gobenchResult.String()) -} - -// ParseFlags parses the command line flags. -func (cmd *benchCommand) ParseFlags(args []string) (*BenchOptions, error) { - var options BenchOptions - - // Parse flagset. - fs := flag.NewFlagSet("", flag.ContinueOnError) - fs.StringVar(&options.ProfileMode, "profile-mode", "rw", "") - fs.StringVar(&options.WriteMode, "write-mode", "seq", "") - fs.StringVar(&options.ReadMode, "read-mode", "seq", "") - fs.Int64Var(&options.Iterations, "count", 1000, "") - fs.Int64Var(&options.BatchSize, "batch-size", 0, "") - fs.IntVar(&options.KeySize, "key-size", 8, "") - fs.IntVar(&options.ValueSize, "value-size", 32, "") - fs.StringVar(&options.CPUProfile, "cpuprofile", "", "") - fs.StringVar(&options.MemProfile, "memprofile", "", "") - fs.StringVar(&options.BlockProfile, "blockprofile", "", "") - fs.Float64Var(&options.FillPercent, "fill-percent", bolt.DefaultFillPercent, "") - fs.BoolVar(&options.NoSync, "no-sync", false, "") - fs.BoolVar(&options.Work, "work", false, "") - fs.StringVar(&options.Path, "path", "", "") - fs.BoolVar(&options.GoBenchOutput, "gobench-output", false, "") - fs.IntVar(&options.PageSize, "page-size", common.DefaultPageSize, "Set page size in bytes.") - fs.IntVar(&options.InitialMmapSize, "initial-mmap-size", 0, "Set initial mmap size in bytes for database file.") - fs.SetOutput(cmd.Stderr) - if err := fs.Parse(args); err != nil { - return nil, err - } - - // Set batch size to iteration size if not set. - // Require that batch size can be evenly divided by the iteration count. - if options.BatchSize == 0 { - options.BatchSize = options.Iterations - } else if options.Iterations%options.BatchSize != 0 { - return nil, ErrNonDivisibleBatchSize - } - - // Generate temp path if one is not passed in. - if options.Path == "" { - f, err := os.CreateTemp("", "bolt-bench-") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - f.Close() - os.Remove(f.Name()) - options.Path = f.Name() - } - - return &options, nil -} - -// Writes to the database. -func (cmd *benchCommand) runWrites(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { - // Start profiling for writes. - if options.ProfileMode == "rw" || options.ProfileMode == "w" { - cmd.startProfiling(options) - } - - finishChan := make(chan interface{}) - go checkProgress(results, finishChan, cmd.Stderr) - defer close(finishChan) - - t := time.Now() - - var keys []nestedKey - var err error - switch options.WriteMode { - case "seq": - keys, err = cmd.runWritesSequential(db, options, results) - case "rnd": - keys, err = cmd.runWritesRandom(db, options, results, r) - case "seq-nest": - keys, err = cmd.runWritesSequentialNested(db, options, results) - case "rnd-nest": - keys, err = cmd.runWritesRandomNested(db, options, results, r) - case "seq-del": - options.DeleteFraction = 0.1 - keys, err = cmd.runWritesSequentialAndDelete(db, options, results) - default: - return nil, fmt.Errorf("invalid write mode: %s", options.WriteMode) - } - - // Save time to write. - results.SetDuration(time.Since(t)) - - // Stop profiling for writes only. - if options.ProfileMode == "w" { - cmd.stopProfiling() - } - - return keys, err -} - -func (cmd *benchCommand) runWritesSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { - var i = uint32(0) - return cmd.runWritesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *benchCommand) runWritesSequentialAndDelete(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { - var i = uint32(0) - return cmd.runWritesDeletesWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *benchCommand) runWritesRandom(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { - return cmd.runWritesWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *benchCommand) runWritesSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) ([]nestedKey, error) { - var i = uint32(0) - return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { i++; return i }) -} - -func (cmd *benchCommand) runWritesRandomNested(db *bolt.DB, options *BenchOptions, results *BenchResults, r *rand.Rand) ([]nestedKey, error) { - return cmd.runWritesNestedWithSource(db, options, results, func() uint32 { return r.Uint32() }) -} - -func (cmd *benchCommand) runWritesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { - var keys []nestedKey - if options.ReadMode == "rnd" { - keys = make([]nestedKey, 0, options.Iterations) - } - - for i := int64(0); i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) - for j := int64(0); j < options.BatchSize; j++ { - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - if keys != nil { - keys = append(keys, nestedKey{nil, key}) - } - results.AddCompletedOps(1) - } - fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) - - return nil - }); err != nil { - return nil, err - } - } - return keys, nil -} - -func (cmd *benchCommand) runWritesDeletesWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { - var keys []nestedKey - deleteSize := int64(math.Ceil(float64(options.BatchSize) * options.DeleteFraction)) - var InsertedKeys [][]byte - - for i := int64(0); i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - b, _ := tx.CreateBucketIfNotExists(benchBucketName) - b.FillPercent = options.FillPercent - - fmt.Fprintf(cmd.Stderr, "Starting delete iteration %d, deleteSize: %d\n", i, deleteSize) - for i := int64(0); i < deleteSize && i < int64(len(InsertedKeys)); i++ { - if err := b.Delete(InsertedKeys[i]); err != nil { - return err - } - } - InsertedKeys = InsertedKeys[:0] - fmt.Fprintf(cmd.Stderr, "Finished delete iteration %d\n", i) - - fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) - for j := int64(0); j < options.BatchSize; j++ { - - key := make([]byte, options.KeySize) - value := make([]byte, options.ValueSize) - - // Write key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - InsertedKeys = append(InsertedKeys, key) - - // Insert key/value. - if err := b.Put(key, value); err != nil { - return err - } - if keys != nil { - keys = append(keys, nestedKey{nil, key}) - } - results.AddCompletedOps(1) - } - fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) - return nil - }); err != nil { - return nil, err - } - } - return keys, nil -} - -func (cmd *benchCommand) runWritesNestedWithSource(db *bolt.DB, options *BenchOptions, results *BenchResults, keySource func() uint32) ([]nestedKey, error) { - var keys []nestedKey - if options.ReadMode == "rnd" { - keys = make([]nestedKey, 0, options.Iterations) - } - - for i := int64(0); i < options.Iterations; i += options.BatchSize { - if err := db.Update(func(tx *bolt.Tx) error { - top, err := tx.CreateBucketIfNotExists(benchBucketName) - if err != nil { - return err - } - top.FillPercent = options.FillPercent - - // Create bucket key. - name := make([]byte, options.KeySize) - binary.BigEndian.PutUint32(name, keySource()) - - // Create bucket. - b, err := top.CreateBucketIfNotExists(name) - if err != nil { - return err - } - b.FillPercent = options.FillPercent - - fmt.Fprintf(cmd.Stderr, "Starting write iteration %d\n", i) - for j := int64(0); j < options.BatchSize; j++ { - var key = make([]byte, options.KeySize) - var value = make([]byte, options.ValueSize) - - // Generate key as uint32. - binary.BigEndian.PutUint32(key, keySource()) - - // Insert value into subbucket. - if err := b.Put(key, value); err != nil { - return err - } - if keys != nil { - keys = append(keys, nestedKey{name, key}) - } - results.AddCompletedOps(1) - } - fmt.Fprintf(cmd.Stderr, "Finished write iteration %d\n", i) - - return nil - }); err != nil { - return nil, err - } - } - return keys, nil -} - -// Reads from the database. -func (cmd *benchCommand) runReads(db *bolt.DB, options *BenchOptions, results *BenchResults, keys []nestedKey) error { - // Start profiling for reads. - if options.ProfileMode == "r" { - cmd.startProfiling(options) - } - - finishChan := make(chan interface{}) - go checkProgress(results, finishChan, cmd.Stderr) - defer close(finishChan) - - t := time.Now() - - var err error - switch options.ReadMode { - case "seq": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsSequentialNested(db, options, results) - default: - err = cmd.runReadsSequential(db, options, results) - } - case "rnd": - switch options.WriteMode { - case "seq-nest", "rnd-nest": - err = cmd.runReadsRandomNested(db, options, keys, results) - default: - err = cmd.runReadsRandom(db, options, keys, results) - } - default: - return fmt.Errorf("invalid read mode: %s", options.ReadMode) - } - - // Save read time. - results.SetDuration(time.Since(t)) - - // Stop profiling for reads. - if options.ProfileMode == "rw" || options.ProfileMode == "r" { - cmd.stopProfiling() - } - - return err -} - -type nestedKey struct{ bucket, key []byte } - -func (cmd *benchCommand) runReadsSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - numReads := int64(0) - err := func() error { - defer func() { results.AddCompletedOps(numReads) }() - - c := tx.Bucket(benchBucketName).Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - numReads++ - if v == nil { - return ErrInvalidValue - } - } - - return nil - }() - - if err != nil { - return err - } - - if options.WriteMode == "seq" && numReads != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) - } - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *benchCommand) runReadsRandom(db *bolt.DB, options *BenchOptions, keys []nestedKey, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - numReads := int64(0) - err := func() error { - defer func() { results.AddCompletedOps(numReads) }() - - b := tx.Bucket(benchBucketName) - for _, key := range keys { - v := b.Get(key.key) - numReads++ - if v == nil { - return ErrInvalidValue - } - } - - return nil - }() - - if err != nil { - return err - } - - if options.WriteMode == "seq" && numReads != options.Iterations { - return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, numReads) - } - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *benchCommand) runReadsSequentialNested(db *bolt.DB, options *BenchOptions, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - numReads := int64(0) - var top = tx.Bucket(benchBucketName) - if err := top.ForEach(func(name, _ []byte) error { - defer func() { results.AddCompletedOps(numReads) }() - if b := top.Bucket(name); b != nil { - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - numReads++ - if v == nil { - return ErrInvalidValue - } - } - } - return nil - }); err != nil { - return err - } - - if options.WriteMode == "seq-nest" && numReads != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) - } - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func (cmd *benchCommand) runReadsRandomNested(db *bolt.DB, options *BenchOptions, nestedKeys []nestedKey, results *BenchResults) error { - return db.View(func(tx *bolt.Tx) error { - t := time.Now() - - for { - numReads := int64(0) - err := func() error { - defer func() { results.AddCompletedOps(numReads) }() - - var top = tx.Bucket(benchBucketName) - for _, nestedKey := range nestedKeys { - if b := top.Bucket(nestedKey.bucket); b != nil { - v := b.Get(nestedKey.key) - numReads++ - if v == nil { - return ErrInvalidValue - } - } - } - - return nil - }() - - if err != nil { - return err - } - - if options.WriteMode == "seq-nest" && numReads != options.Iterations { - return fmt.Errorf("read seq-nest: iter mismatch: expected %d, got %d", options.Iterations, numReads) - } - - // Make sure we do this for at least a second. - if time.Since(t) >= time.Second { - break - } - } - - return nil - }) -} - -func checkProgress(results *BenchResults, finishChan chan interface{}, stderr io.Writer) { - ticker := time.Tick(time.Second) - lastCompleted, lastTime := int64(0), time.Now() - for { - select { - case <-finishChan: - return - case t := <-ticker: - completed, taken := results.CompletedOps(), t.Sub(lastTime) - fmt.Fprintf(stderr, "Completed %d requests, %d/s \n", - completed, ((completed-lastCompleted)*int64(time.Second))/int64(taken), - ) - lastCompleted, lastTime = completed, t - } - } -} - -// File handlers for the various profiles. -var cpuprofile, memprofile, blockprofile *os.File - -// Starts all profiles set on the options. -func (cmd *benchCommand) startProfiling(options *BenchOptions) { - var err error - - // Start CPU profiling. - if options.CPUProfile != "" { - cpuprofile, err = os.Create(options.CPUProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - err = pprof.StartCPUProfile(cpuprofile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not start cpu profile %q: %v\n", options.CPUProfile, err) - os.Exit(1) - } - } - - // Start memory profiling. - if options.MemProfile != "" { - memprofile, err = os.Create(options.MemProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create memory profile %q: %v\n", options.MemProfile, err) - os.Exit(1) - } - runtime.MemProfileRate = 4096 - } - - // Start fatal profiling. - if options.BlockProfile != "" { - blockprofile, err = os.Create(options.BlockProfile) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not create block profile %q: %v\n", options.BlockProfile, err) - os.Exit(1) - } - runtime.SetBlockProfileRate(1) - } -} - -// Stops all profiles. -func (cmd *benchCommand) stopProfiling() { - if cpuprofile != nil { - pprof.StopCPUProfile() - cpuprofile.Close() - cpuprofile = nil - } - - if memprofile != nil { - err := pprof.Lookup("heap").WriteTo(memprofile, 0) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not write mem profile") - } - memprofile.Close() - memprofile = nil - } - - if blockprofile != nil { - err := pprof.Lookup("block").WriteTo(blockprofile, 0) - if err != nil { - fmt.Fprintf(cmd.Stderr, "bench: could not write block profile") - } - blockprofile.Close() - blockprofile = nil - runtime.SetBlockProfileRate(0) - } -} - -// BenchOptions represents the set of options that can be passed to "bolt bench". -type BenchOptions struct { - ProfileMode string - WriteMode string - ReadMode string - Iterations int64 - BatchSize int64 - KeySize int - ValueSize int - CPUProfile string - MemProfile string - BlockProfile string - StatsInterval time.Duration - FillPercent float64 - NoSync bool - Work bool - Path string - GoBenchOutput bool - PageSize int - InitialMmapSize int - DeleteFraction float64 // Fraction of keys of last tx to delete during writes. works only with "seq-del" write mode. -} - -// BenchResults represents the performance results of the benchmark and is thread-safe. -type BenchResults struct { - completedOps int64 - duration int64 -} - -func (r *BenchResults) AddCompletedOps(amount int64) { - atomic.AddInt64(&r.completedOps, amount) -} - -func (r *BenchResults) CompletedOps() int64 { - return atomic.LoadInt64(&r.completedOps) -} - -func (r *BenchResults) SetDuration(dur time.Duration) { - atomic.StoreInt64(&r.duration, int64(dur)) -} - -func (r *BenchResults) Duration() time.Duration { - return time.Duration(atomic.LoadInt64(&r.duration)) -} - -// Returns the duration for a single read/write operation. -func (r *BenchResults) OpDuration() time.Duration { - if r.CompletedOps() == 0 { - return 0 - } - return r.Duration() / time.Duration(r.CompletedOps()) -} - -// Returns average number of read/write operations that can be performed per second. -func (r *BenchResults) OpsPerSecond() int { - var op = r.OpDuration() - if op == 0 { - return 0 - } - return int(time.Second) / int(op) -} - type PageError struct { ID int Err error diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index 657952197..ea869e2d6 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -84,41 +84,6 @@ func TestGetCommand_Run(t *testing.T) { } } -// Ensure the "bench" command runs and exits without errors -func TestBenchCommand_Run(t *testing.T) { - tests := map[string]struct { - args []string - }{ - "no-args": {}, - "100k count": {[]string{"-count", "100000"}}, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - // Run the command. - m := NewMain() - args := append([]string{"bench"}, test.args...) - if err := m.Run(args...); err != nil { - t.Fatal(err) - } - - stderr := m.Stderr.String() - stdout := m.Stdout.String() - if !strings.Contains(stderr, "starting write benchmark.") || !strings.Contains(stderr, "starting read benchmark.") { - t.Fatal(fmt.Errorf("benchmark result does not contain read/write start output:\n%s", stderr)) - } - - if strings.Contains(stderr, "iter mismatch") { - t.Fatal(fmt.Errorf("found iter mismatch in stdout:\n%s", stderr)) - } - - if !strings.Contains(stdout, "# Write") || !strings.Contains(stdout, "# Read") { - t.Fatal(fmt.Errorf("benchmark result does not contain read/write output:\n%s", stdout)) - } - }) - } -} - type ConcurrentBuffer struct { m sync.Mutex buf bytes.Buffer diff --git a/scripts/compare_benchmarks.sh b/scripts/compare_benchmarks.sh index 2b7766940..af397cb44 100755 --- a/scripts/compare_benchmarks.sh +++ b/scripts/compare_benchmarks.sh @@ -19,7 +19,7 @@ RESULT_TO_COMPARE="$(mktemp)-${BASE_TO_COMPARE}" BENCH_COUNT=${BENCH_COUNT:-10} BENCHSTAT_CONFIDENCE_LEVEL=${BENCHSTAT_CONFIDENCE_LEVEL:-0.9} BENCHSTAT_FORMAT=${BENCHSTAT_FORMAT:-"text"} -BENCH_PARAMETERS=${BENCH_PARAMETERS:-"-count 2000000 -batch-size 10000"} +BENCH_PARAMETERS=${BENCH_PARAMETERS:-"--count 2000000 --batch-size 10000"} if [[ "${BENCHSTAT_FORMAT}" == "csv" ]] && [[ -z "${BENCHSTAT_OUTPUT_FILE}" ]]; then echo "BENCHSTAT_FORMAT is set to csv, but BENCHSTAT_OUTPUT_FILE is not set." @@ -32,9 +32,9 @@ function bench() { make build for _ in $(seq "$BENCH_COUNT"); do - echo ./bin/bbolt bench -gobench-output -profile-mode n ${BENCH_PARAMETERS} + echo ./bin/bbolt bench --gobench-output --profile-mode n ${BENCH_PARAMETERS} # shellcheck disable=SC2086 - ./bin/bbolt bench -gobench-output -profile-mode n ${BENCH_PARAMETERS} >> "${output_file}" + ./bin/bbolt bench --gobench-output --profile-mode n ${BENCH_PARAMETERS} >> "${output_file}" done } diff --git a/tests/robustness/powerfailure_test.go b/tests/robustness/powerfailure_test.go index 54c611cbf..22da17059 100644 --- a/tests/robustness/powerfailure_test.go +++ b/tests/robustness/powerfailure_test.go @@ -146,11 +146,11 @@ func doPowerFailure(t *testing.T, du time.Duration, fsType dmflakey.FSType, mkfs dbPath := filepath.Join(root, "boltdb") args := []string{"bbolt", "bench", - "-work", // keep the database - "-path", dbPath, - "-count=1000000000", - "-batch-size=5", // separate total count into multiple truncation - "-value-size=512", + "--work", // keep the database + "--path", dbPath, + "--count=1000000000", + "--batch-size=5", // separate total count into multiple truncation + "--value-size=512", } logPath := filepath.Join(t.TempDir(), fmt.Sprintf("%s.log", t.Name())) From 0d01846493f807832b1dcd6a76bf7a396897ffa1 Mon Sep 17 00:00:00 2001 From: Lavish Pal Date: Fri, 1 Aug 2025 23:33:05 +0530 Subject: [PATCH 405/439] chore(cmd): use existing ErrKeyRequired from bbolt/errors Signed-off-by: Lavish Pal --- cmd/bbolt/command_get.go | 82 ++++++++++++++++++++++++++++++ cmd/bbolt/command_get_test.go | 86 ++++++++++++++++++++++++++++++++ cmd/bbolt/command_root.go | 1 + cmd/bbolt/main.go | 93 ----------------------------------- cmd/bbolt/main_test.go | 87 -------------------------------- 5 files changed, 169 insertions(+), 180 deletions(-) create mode 100644 cmd/bbolt/command_get.go create mode 100644 cmd/bbolt/command_get_test.go diff --git a/cmd/bbolt/command_get.go b/cmd/bbolt/command_get.go new file mode 100644 index 000000000..fa58de4a3 --- /dev/null +++ b/cmd/bbolt/command_get.go @@ -0,0 +1,82 @@ +package main + +import ( + "fmt" + + "github.com/spf13/cobra" + + bolt "go.etcd.io/bbolt" + "go.etcd.io/bbolt/errors" +) + +type getOptions struct { + parseFormat string + format string +} + +func newGetCommand() *cobra.Command { + var opts getOptions + + cmd := &cobra.Command{ + Use: "get PATH [BUCKET..] KEY", + Short: "get the value of a key from a (sub)bucket in a bbolt database", + Args: cobra.MinimumNArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + path := args[0] + if path == "" { + return ErrPathRequired + } + buckets := args[1 : len(args)-1] + keyStr := args[len(args)-1] + + // validate input parameters + if len(buckets) == 0 { + return fmt.Errorf("bucket is required: %w", ErrBucketRequired) + } + + key, err := parseBytes(keyStr, opts.parseFormat) + if err != nil { + return err + } + + if len(key) == 0 { + return fmt.Errorf("key is required: %w", errors.ErrKeyRequired) + } + + return getFunc(cmd, path, buckets, key, opts) + }, + } + + cmd.Flags().StringVar(&opts.parseFormat, "parse-format", "ascii-encoded", "Input format one of: ascii-encoded|hex") + cmd.Flags().StringVar(&opts.format, "format", "auto", "Output format one of: "+FORMAT_MODES+" (default: auto)") + + return cmd +} + +// getFunc opens the BoltDB and retrieves the key value from the bucket path. +func getFunc(cmd *cobra.Command, path string, buckets []string, key []byte, opts getOptions) error { + // check if the source DB path is valid + if _, err := checkSourceDBPath(path); err != nil { + return err + } + + // open the database + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return err + } + defer db.Close() + + // access the database and get the value + return db.View(func(tx *bolt.Tx) error { + lastBucket, err := findLastBucket(tx, buckets) + if err != nil { + return err + } + val := lastBucket.Get(key) + if val == nil { + return fmt.Errorf("Error %w for key: %q hex: \"%x\"", ErrKeyNotFound, key, string(key)) + } + return writelnBytes(cmd.OutOrStdout(), val, opts.format) + }) +} diff --git a/cmd/bbolt/command_get_test.go b/cmd/bbolt/command_get_test.go new file mode 100644 index 000000000..fc6d268f6 --- /dev/null +++ b/cmd/bbolt/command_get_test.go @@ -0,0 +1,86 @@ +package main_test + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "testing" + + "github.com/stretchr/testify/require" + + bolt "go.etcd.io/bbolt" + main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/internal/btesting" +) + +func TestGetCommand_Run(t *testing.T) { + testCases := []struct { + name string + printable bool + testBucket string + testKey string + expectedValue string + }{ + { + name: "printable data", + printable: true, + testBucket: "foo", + testKey: "foo-1", + expectedValue: "value-foo-1\n", + }, + { + name: "non printable data", + printable: false, + testBucket: "bar", + testKey: "100001", + expectedValue: hex.EncodeToString(convertInt64IntoBytes(100001)) + "\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + t.Logf("Creating test database for subtest '%s'", tc.name) + db := btesting.MustCreateDB(t) + + t.Log("Inserting test data") + err := db.Update(func(tx *bolt.Tx) error { + b, err := tx.CreateBucketIfNotExists([]byte(tc.testBucket)) + if err != nil { + return fmt.Errorf("create bucket %q: %w", tc.testBucket, err) + } + + if tc.printable { + return b.Put([]byte(tc.testKey), []byte("value-"+tc.testKey)) + } + + return b.Put([]byte(tc.testKey), convertInt64IntoBytes(100001)) + }) + require.NoError(t, err) + db.Close() + defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) + + t.Log("Running get command") + rootCmd := main.NewRootCommand() + outputBuf := bytes.NewBufferString("") + rootCmd.SetOut(outputBuf) + rootCmd.SetArgs([]string{"get", db.Path(), tc.testBucket, tc.testKey}) + err = rootCmd.Execute() + require.NoError(t, err) + + t.Log("Checking output") + output, err := io.ReadAll(outputBuf) + require.NoError(t, err) + require.Equalf(t, tc.expectedValue, string(output), "unexpected stdout:\n\n%s", string(output)) + }) + } +} + +func TestGetCommand_NoArgs(t *testing.T) { + expErr := errors.New("requires at least 3 arg(s), only received 0") + rootCmd := main.NewRootCommand() + rootCmd.SetArgs([]string{"get"}) + err := rootCmd.Execute() + require.ErrorContains(t, err, expErr.Error()) +} diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command_root.go index ed6ca0bcb..23b1313dd 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command_root.go @@ -31,6 +31,7 @@ func NewRootCommand() *cobra.Command { newPageItemCommand(), newPageCommand(), newBenchCommand(), + newGetCommand(), ) return rootCmd diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index b7b4c6ffa..438a3e06f 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "encoding/hex" "errors" - "flag" "fmt" "io" "os" @@ -113,8 +112,6 @@ func (m *Main) Run(args ...string) error { case "help": fmt.Fprintln(m.Stderr, m.Usage()) return ErrUsage - case "get": - return newGetCommand(m).Run(args[1:]...) default: return ErrUnknownCommand } @@ -197,96 +194,6 @@ func writelnBytes(w io.Writer, b []byte, format string) error { return err } -// getCommand represents the "get" command execution. -type getCommand struct { - baseCommand -} - -// newGetCommand returns a getCommand. -func newGetCommand(m *Main) *getCommand { - c := &getCommand{} - c.baseCommand = m.baseCommand - return c -} - -// Run executes the command. -func (cmd *getCommand) Run(args ...string) error { - // Parse flags. - fs := flag.NewFlagSet("", flag.ContinueOnError) - var parseFormat string - var format string - fs.StringVar(&parseFormat, "parse-format", "ascii-encoded", "Input format. One of: ascii-encoded|hex (default: ascii-encoded)") - fs.StringVar(&format, "format", "auto", "Output format. One of: "+FORMAT_MODES+" (default: auto)") - help := fs.Bool("h", false, "") - if err := fs.Parse(args); err != nil { - return err - } else if *help { - fmt.Fprintln(cmd.Stderr, cmd.Usage()) - return ErrUsage - } - - // Require database path, bucket and key. - relevantArgs := fs.Args() - if len(relevantArgs) < 3 { - return ErrNotEnoughArgs - } - path, buckets := relevantArgs[0], relevantArgs[1:len(relevantArgs)-1] - key, err := parseBytes(relevantArgs[len(relevantArgs)-1], parseFormat) - if err != nil { - return err - } - if path == "" { - return ErrPathRequired - } else if _, err := os.Stat(path); os.IsNotExist(err) { - return ErrFileNotFound - } else if len(buckets) == 0 { - return ErrBucketRequired - } else if len(key) == 0 { - return berrors.ErrKeyRequired - } - - // Open database. - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer db.Close() - - // Print value. - return db.View(func(tx *bolt.Tx) error { - // Find bucket. - lastBucket, err := findLastBucket(tx, buckets) - if err != nil { - return err - } - - // Find value for given key. - val := lastBucket.Get(key) - if val == nil { - return fmt.Errorf("Error %w for key: %q hex: \"%x\"", ErrKeyNotFound, key, string(key)) - } - - // TODO: In this particular case, it would be better to not terminate with '\n' - return writelnBytes(cmd.Stdout, val, format) - }) -} - -// Usage returns the help message. -func (cmd *getCommand) Usage() string { - return strings.TrimLeft(` -usage: bolt get PATH [BUCKET..] KEY - -Print the value of the given key in the given (sub)bucket. - -Additional options include: - - --format - Output format. One of: `+FORMAT_MODES+` (default=auto) - --parse-format - Input format (of key). One of: ascii-encoded|hex (default=ascii-encoded)" -`, "\n") -} - type PageError struct { ID int Err error diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index ea869e2d6..ce0da648f 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -13,77 +13,12 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" main "go.etcd.io/bbolt/cmd/bbolt" - "go.etcd.io/bbolt/internal/btesting" ) -// Ensure the "get" command can print the value of a key in a bucket. -func TestGetCommand_Run(t *testing.T) { - testCases := []struct { - name string - printable bool - testBucket string - testKey string - expectedValue string - }{ - { - name: "printable data", - printable: true, - testBucket: "foo", - testKey: "foo-1", - expectedValue: "val-foo-1\n", - }, - { - name: "non printable data", - printable: false, - testBucket: "bar", - testKey: "100001", - expectedValue: hex.EncodeToString(convertInt64IntoBytes(100001)) + "\n", - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - db := btesting.MustCreateDB(t) - - if err := db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte(tc.testBucket)) - if err != nil { - return err - } - if tc.printable { - val := fmt.Sprintf("val-%s", tc.testKey) - if err := b.Put([]byte(tc.testKey), []byte(val)); err != nil { - return err - } - } else { - if err := b.Put([]byte(tc.testKey), convertInt64IntoBytes(100001)); err != nil { - return err - } - } - return nil - }); err != nil { - t.Fatal(err) - } - db.Close() - - defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - - // Run the command. - m := NewMain() - if err := m.Run("get", db.Path(), tc.testBucket, tc.testKey); err != nil { - t.Fatal(err) - } - actual := m.Stdout.String() - assert.Equal(t, tc.expectedValue, actual) - }) - } -} - type ConcurrentBuffer struct { m sync.Mutex buf bytes.Buffer @@ -127,28 +62,6 @@ func NewMain() *Main { return m } -func TestCommands_Run_NoArgs(t *testing.T) { - testCases := []struct { - name string - cmd string - expErr error - }{ - { - name: "get", - cmd: "get", - expErr: main.ErrNotEnoughArgs, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - m := NewMain() - err := m.Run(tc.cmd) - require.ErrorIs(t, err, main.ErrNotEnoughArgs) - }) - } -} - func fillBucket(b *bolt.Bucket, prefix []byte) error { n := 10 + rand.Intn(50) for i := 0; i < n; i++ { From b3ac8c516c287b16d4e3e88e969b6049481f623d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sat, 9 Aug 2025 18:59:09 +0100 Subject: [PATCH 406/439] Fix test case TestDB_HugeValue for 32 bit systems Signed-off-by: Benjamin Wang --- db_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/db_test.go b/db_test.go index c6d604488..cb46e2d8b 100644 --- a/db_test.go +++ b/db_test.go @@ -24,6 +24,7 @@ import ( bolt "go.etcd.io/bbolt" berrors "go.etcd.io/bbolt/errors" "go.etcd.io/bbolt/internal/btesting" + "go.etcd.io/bbolt/internal/common" ) // pageSize is the size of one page in the data file. @@ -1555,7 +1556,14 @@ func TestDB_HugeValue(t *testing.T) { require.NoError(t, db.Close()) }() - data := make([]byte, 0xFFFFFFF+1) + maxSize := 0xFFFFFFF + 1 + // On 32 bit systems, the MaxAllocSize is 0xFFFFFFF (268435455, + // roughly 256MB), and the test will fail for sure, so we reduce + // the maxSize by half in such case. + if maxSize > common.MaxAllocSize { + maxSize = maxSize / 2 + } + data := make([]byte, maxSize) _ = db.Update(func(tx *bolt.Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("data")) From 5eb60d86664eb0b72b9849c5dbd22258114ca03b Mon Sep 17 00:00:00 2001 From: hwdef Date: Sun, 10 Aug 2025 20:43:15 +0800 Subject: [PATCH 407/439] Bump Go to 1.24.6 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 6521720b4..7a429d68a 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.5 +1.24.6 diff --git a/go.mod b/go.mod index 32d358ccd..903d6f384 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24 -toolchain go1.24.5 +toolchain go1.24.6 require ( github.com/spf13/cobra v1.9.1 From 29dc07e63a517ac4243fc5c1e6ce5dfbb41db1a3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 20:45:46 +0000 Subject: [PATCH 408/439] build(deps): Bump golang.org/x/sys from 0.34.0 to 0.35.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.34.0 to 0.35.0. - [Commits](https://github.com/golang/sys/compare/v0.34.0...v0.35.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.35.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 903d6f384..e63ca3d75 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 - golang.org/x/sys v0.34.0 + golang.org/x/sys v0.35.0 ) require ( diff --git a/go.sum b/go.sum index 65a3766af..20245f34b 100644 --- a/go.sum +++ b/go.sum @@ -17,8 +17,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From fa71487ff46fd557ddfe56036d43293a258c6f6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 21:58:23 +0000 Subject: [PATCH 409/439] build(deps): Bump actions/checkout from 4.2.2 to 5.0.0 Bumps [actions/checkout](https://github.com/actions/checkout) from 4.2.2 to 5.0.0. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/11bd71901bbe5b1630ceea73d27597364c9af683...08c6903cd8c0fde910a37f88322edcfb5dd907a8) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: 5.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/cross-arch-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_arm64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 057286be1..0eb9aaaa0 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -21,7 +21,7 @@ jobs: benchmark: runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 with: fetch-depth: 0 - id: goversion diff --git a/.github/workflows/cross-arch-template.yaml b/.github/workflows/cross-arch-template.yaml index a464e4d56..83229c072 100644 --- a/.github/workflows/cross-arch-template.yaml +++ b/.github/workflows/cross-arch-template.yaml @@ -18,7 +18,7 @@ jobs: arch: ${{ fromJSON(inputs.archs) }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 17546a8e7..21934adba 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -9,7 +9,7 @@ jobs: os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index f742476ae..6e5a1e0a1 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -23,7 +23,7 @@ jobs: timeout-minutes: 210 runs-on: ${{ fromJson(inputs.runs-on) }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index c39e701ea..c2d38c6f9 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -21,7 +21,7 @@ jobs: target: ${{ fromJSON(inputs.targets) }} runs-on: ${{ inputs.runs-on }} steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index ae80142d5..76b627daa 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -17,7 +17,7 @@ jobs: - test-linux-amd64-race runs-on: ubuntu-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index 195786817..e10841924 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -19,7 +19,7 @@ jobs: - test-linux-arm64-race runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 3236b6b50..cb0b50384 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -19,7 +19,7 @@ jobs: # - windows-amd64-unit-test-4-cpu-race runs-on: windows-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 @@ -48,7 +48,7 @@ jobs: needs: ["test-windows"] runs-on: windows-latest steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 From 9f864c870e52eeffb96047dfa16d814361d4e3d8 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Sun, 17 Aug 2025 16:58:39 +0100 Subject: [PATCH 410/439] Update (*Tx)WriteTo to reuse the already opened file if WriteFlag not set Signed-off-by: Benjamin Wang --- db_test.go | 108 +++++++++++++++++++++++++++++++++++++++++++++++++++++ tx.go | 68 ++++++++++++++++++++++++++------- 2 files changed, 162 insertions(+), 14 deletions(-) diff --git a/db_test.go b/db_test.go index cb46e2d8b..feb3368ce 100644 --- a/db_test.go +++ b/db_test.go @@ -761,6 +761,114 @@ func TestDB_Concurrent_WriteTo_and_ConsistentRead(t *testing.T) { } } +// TestDB_WriteTo_and_Overwrite verifies that `(tx *Tx) WriteTo` can still +// work even the underlying file is overwritten between the time a read-only +// transaction is created and the time the file is actually opened +func TestDB_WriteTo_and_Overwrite(t *testing.T) { + testCases := []struct { + name string + writeFlag int + }{ + { + name: "writeFlag not set", + writeFlag: 0, + }, + /* syscall.O_DIRECT not supported on some platforms, i.e. Windows and MacOS + { + name: "writeFlag set", + writeFlag: syscall.O_DIRECT, + },*/ + } + + fRead := func(db *bolt.DB, bucketName []byte) map[string]string { + data := make(map[string]string) + _ = db.View(func(tx *bolt.Tx) error { + b := tx.Bucket(bucketName) + berr := b.ForEach(func(k, v []byte) error { + data[string(k)] = string(v) + return nil + }) + require.NoError(t, berr) + return nil + }) + return data + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + db := btesting.MustCreateDBWithOption(t, &bolt.Options{ + PageSize: 4096, + }) + filePathOfDb := db.Path() + + var ( + bucketName = []byte("data") + dataExpected map[string]string + dataActual map[string]string + ) + + t.Log("Populate some data") + err := db.Update(func(tx *bolt.Tx) error { + b, berr := tx.CreateBucket(bucketName) + if berr != nil { + return berr + } + for k := 0; k < 10; k++ { + key, value := fmt.Sprintf("key_%d", rand.Intn(10)), fmt.Sprintf("value_%d", rand.Intn(100)) + if perr := b.Put([]byte(key), []byte(value)); perr != nil { + return perr + } + } + return nil + }) + require.NoError(t, err) + + t.Log("Read all the data before calling WriteTo") + dataExpected = fRead(db.DB, bucketName) + + t.Log("Create a readonly transaction for WriteTo") + rtx, rerr := db.Begin(false) + require.NoError(t, rerr) + + // Some platforms (i.e. Windows) don't support renaming a file + // when the target file already exist and is opened. + if runtime.GOOS == "linux" { + t.Log("Create another empty db file") + db2 := btesting.MustCreateDBWithOption(t, &bolt.Options{ + PageSize: 4096, + }) + db2.MustClose() + filePathOfDb2 := db2.Path() + + t.Logf("Renaming the new empty db file (%s) to the original db path (%s)", filePathOfDb2, filePathOfDb) + err = os.Rename(filePathOfDb2, filePathOfDb) + require.NoError(t, err) + } else { + t.Log("Ignore renaming step on non-Linux platform") + } + + t.Logf("Call WriteTo to copy the data of the original db file") + f := filepath.Join(t.TempDir(), "-backup-db") + err = rtx.CopyFile(f, 0600) + require.NoError(t, err) + require.NoError(t, rtx.Rollback()) + + t.Logf("Read all the data from the backup db after calling WriteTo") + newDB, err := bolt.Open(f, 0600, &bolt.Options{ + ReadOnly: true, + }) + require.NoError(t, err) + dataActual = fRead(newDB, bucketName) + err = newDB.Close() + require.NoError(t, err) + + t.Log("Compare the dataExpected and dataActual") + same := reflect.DeepEqual(dataExpected, dataActual) + require.True(t, same, fmt.Sprintf("found inconsistent data, dataExpected: %v, ddataActual : %v", dataExpected, dataActual)) + }) + } +} + // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db bolt.DB diff --git a/tx.go b/tx.go index f32a20931..b7df9fc03 100644 --- a/tx.go +++ b/tx.go @@ -389,16 +389,43 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { - if cerr := f.Close(); err == nil { - err = cerr + var f *os.File + // There is a risk that between the time a read-only transaction + // is created and the time the file is actually opened, the + // underlying db file at tx.db.path may have been replaced + // (e.g. via rename). In that case, opening the file again would + // unexpectedly point to a different file, rather than the one + // the transaction was based on. + // + // To overcome this, we reuse the already opened file handle when + // WritFlag not set. When the WriteFlag is set, we reopen the file + // but verify that it still refers to the same underlying file + // (by device and inode). If it does not, we fall back to + // reusing the existing already opened file handle. + if tx.WriteFlag != 0 { + // Attempt to open reader with WriteFlag + f, err = tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } - }() + + if ok, err := sameFile(tx.db.file, f); !ok { + lg := tx.db.Logger() + if cerr := f.Close(); cerr != nil { + lg.Errorf("failed to close the file (%s): %v", tx.db.path, cerr) + } + lg.Warningf("The underlying file has changed, so reuse the already opened file (%s): %v", tx.db.path, err) + f = tx.db.file + } else { + defer func() { + if cerr := f.Close(); err == nil { + err = cerr + } + }() + } + } else { + f = tx.db.file + } // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) @@ -425,13 +452,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, fmt.Errorf("meta 1 copy: %s", err) } - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil { - return n, fmt.Errorf("seek: %s", err) - } + // Copy data pages using a SectionReader to avoid affecting f's offset. + dataOffset := int64(tx.db.pageSize * 2) + dataSize := tx.Size() - dataOffset + sr := io.NewSectionReader(f, dataOffset, dataSize) // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) + wn, err := io.CopyN(w, sr, dataSize) n += wn if err != nil { return n, err @@ -440,6 +467,19 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { return n, nil } +func sameFile(f1, f2 *os.File) (bool, error) { + fi1, err := f1.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the first file (%s): %w", f1.Name(), err) + } + fi2, err := f2.Stat() + if err != nil { + return false, fmt.Errorf("failed to get fileInfo of the second file (%s): %w", f2.Name(), err) + } + + return os.SameFile(fi1, fi2), nil +} + // CopyFile copies the entire database to file at the given path. // A reader transaction is maintained during the copy so it is safe to continue // using the database while a copy is in progress. From b8f8dda655d68387a18967e65d2a6c6d7528f55d Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Mon, 18 Aug 2025 20:41:53 +0100 Subject: [PATCH 411/439] Update 1.3 and 1.4 changelog to cover WriteTo change Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 1 + CHANGELOG/CHANGELOG-1.4.md | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index 32502d39f..a46279014 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -6,6 +6,7 @@ Note that we start to track changes starting from v1.3.7. ### BoltDB - [Add protection on meta page when it's being written](https://github.com/etcd-io/bbolt/pull/1006) +- Fix [potential data corruption in `(*Tx)WriteTo` if underlying db file is overwritten](https://github.com/etcd-io/bbolt/pull/1059)
diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 66578a096..30372dfad 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,6 +1,13 @@
+## v1.4.3(TBD) + +### BoltDB +- Fix [potential data corruption in `(*Tx)WriteTo` if underlying db file is overwritten](https://github.com/etcd-io/bbolt/pull/1058) + +
+ ## v1.4.2(2025-06-27) ### BoltDB From c60c932c2df50f51b3f9ba3d74008c22b7f4f243 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 19 Aug 2025 18:25:58 +0100 Subject: [PATCH 412/439] Update release date for 1.3.12 and 1.4.3 Signed-off-by: Benjamin Wang --- CHANGELOG/CHANGELOG-1.3.md | 2 +- CHANGELOG/CHANGELOG-1.4.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG/CHANGELOG-1.3.md b/CHANGELOG/CHANGELOG-1.3.md index a46279014..1dcd400ca 100644 --- a/CHANGELOG/CHANGELOG-1.3.md +++ b/CHANGELOG/CHANGELOG-1.3.md @@ -2,7 +2,7 @@ Note that we start to track changes starting from v1.3.7.
-## v1.3.12(TBD) +## v1.3.12(2025-08-19) ### BoltDB - [Add protection on meta page when it's being written](https://github.com/etcd-io/bbolt/pull/1006) diff --git a/CHANGELOG/CHANGELOG-1.4.md b/CHANGELOG/CHANGELOG-1.4.md index 30372dfad..1d00ad997 100644 --- a/CHANGELOG/CHANGELOG-1.4.md +++ b/CHANGELOG/CHANGELOG-1.4.md @@ -1,7 +1,7 @@
-## v1.4.3(TBD) +## v1.4.3(2025-08-19) ### BoltDB - Fix [potential data corruption in `(*Tx)WriteTo` if underlying db file is overwritten](https://github.com/etcd-io/bbolt/pull/1058) From a257c2f8350f96c79c846478dabf0d9bbf6a1c98 Mon Sep 17 00:00:00 2001 From: "shenmu.wy" Date: Wed, 20 Aug 2025 11:03:37 +0800 Subject: [PATCH 413/439] fix a typo Signed-off-by: shenmu.wy --- tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tx.go b/tx.go index b7df9fc03..aa0066bd3 100644 --- a/tx.go +++ b/tx.go @@ -398,7 +398,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // the transaction was based on. // // To overcome this, we reuse the already opened file handle when - // WritFlag not set. When the WriteFlag is set, we reopen the file + // WriteFlag not set. When the WriteFlag is set, we reopen the file // but verify that it still refers to the same underlying file // (by device and inode). If it does not, we fall back to // reusing the existing already opened file handle. From 01f4ef0bebe24a4ee1547091a56b7d6b929b4b8b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 04:27:50 +0000 Subject: [PATCH 414/439] build(deps): Bump github.com/stretchr/testify from 1.10.0 to 1.11.0 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.10.0 to 1.11.0. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.10.0...v1.11.0) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-version: 1.11.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index e63ca3d75..968bdb536 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.24.6 require ( github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.7 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 golang.org/x/sys v0.35.0 diff --git a/go.sum b/go.sum index 20245f34b..16ddbf051 100644 --- a/go.sum +++ b/go.sum @@ -11,8 +11,8 @@ github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wx github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= +github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= From 67a91be40b2d9b72abce12f5a30367d3983a3b4f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 01:28:27 +0000 Subject: [PATCH 415/439] build(deps): Bump github.com/spf13/cobra from 1.9.1 to 1.10.1 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.9.1 to 1.10.1. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.9.1...v1.10.1) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-version: 1.10.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 9 ++++----- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/go.mod b/go.mod index 968bdb536..8d6651429 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.24 toolchain go1.24.6 require ( - github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/cobra v1.10.1 + github.com/spf13/pflag v1.0.9 github.com/stretchr/testify v1.11.0 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 diff --git a/go.sum b/go.sum index 16ddbf051..d05a8df97 100644 --- a/go.sum +++ b/go.sum @@ -6,11 +6,10 @@ github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= -github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= From c5e2ed474a1756dab78ee0cc773f638b1eb0da85 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 2 Sep 2025 08:53:49 +0000 Subject: [PATCH 416/439] build(deps): Bump github.com/stretchr/testify from 1.11.0 to 1.11.1 Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.11.0 to 1.11.1. - [Release notes](https://github.com/stretchr/testify/releases) - [Commits](https://github.com/stretchr/testify/compare/v1.11.0...v1.11.1) --- updated-dependencies: - dependency-name: github.com/stretchr/testify dependency-version: 1.11.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8d6651429..719e1a45b 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.24.6 require ( github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.9 - github.com/stretchr/testify v1.11.0 + github.com/stretchr/testify v1.11.1 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 golang.org/x/sys v0.35.0 diff --git a/go.sum b/go.sum index d05a8df97..c4cf4411d 100644 --- a/go.sum +++ b/go.sum @@ -10,8 +10,8 @@ github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.11.0 h1:ib4sjIrwZKxE5u/Japgo/7SJV3PvgjGiRNAvTVGqQl8= -github.com/stretchr/testify v1.11.0/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= From ce6fcebc80e22cb32ed425cee0c84e7e7151bb20 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 14:06:11 +0000 Subject: [PATCH 417/439] build(deps): Bump golang.org/x/sys from 0.35.0 to 0.36.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.35.0 to 0.36.0. - [Commits](https://github.com/golang/sys/compare/v0.35.0...v0.36.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.36.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 719e1a45b..e29330193 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module go.etcd.io/bbolt -go 1.24 +go 1.24.0 toolchain go1.24.6 @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.11.1 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 - golang.org/x/sys v0.35.0 + golang.org/x/sys v0.36.0 ) require ( diff --git a/go.sum b/go.sum index c4cf4411d..c6e24a5b2 100644 --- a/go.sum +++ b/go.sum @@ -16,8 +16,8 @@ go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= From a70fc37a0725ded15d3f68524ba3c5d578c6fb2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 14:06:18 +0000 Subject: [PATCH 418/439] build(deps): Bump github.com/spf13/pflag from 1.0.9 to 1.0.10 Bumps [github.com/spf13/pflag](https://github.com/spf13/pflag) from 1.0.9 to 1.0.10. - [Release notes](https://github.com/spf13/pflag/releases) - [Commits](https://github.com/spf13/pflag/compare/v1.0.9...v1.0.10) --- updated-dependencies: - dependency-name: github.com/spf13/pflag dependency-version: 1.0.10 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 719e1a45b..e7bb5ca28 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.24.6 require ( github.com/spf13/cobra v1.10.1 - github.com/spf13/pflag v1.0.9 + github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.16.0 diff --git a/go.sum b/go.sum index c4cf4411d..85b390b74 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,9 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= -github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= From e335e2059240f90564b4208efc34d632c12dea23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 19:20:15 +0000 Subject: [PATCH 419/439] build(deps): Bump golang.org/x/sync from 0.16.0 to 0.17.0 Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/sync/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/sync dependency-version: 0.17.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 32d7d1d72..f6d0a1abc 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 go.etcd.io/gofail v0.2.0 - golang.org/x/sync v0.16.0 + golang.org/x/sync v0.17.0 golang.org/x/sys v0.36.0 ) diff --git a/go.sum b/go.sum index a1c9f62e8..3484269ab 100644 --- a/go.sum +++ b/go.sum @@ -15,8 +15,8 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 06fbabdae6a3c997363417efd5500df742d16bb9 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 9 Sep 2025 10:14:03 +0100 Subject: [PATCH 420/439] Move the common functions into utils.go Signed-off-by: Benjamin Wang --- cmd/bbolt/main.go | 123 -------------------------------------------- cmd/bbolt/utils.go | 124 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 123 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 438a3e06f..3d8d823b3 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1,19 +1,11 @@ package main import ( - "crypto/sha256" - "encoding/hex" "errors" "fmt" "io" "os" - "strconv" "strings" - "unicode" - "unicode/utf8" - - bolt "go.etcd.io/bbolt" - berrors "go.etcd.io/bbolt/errors" ) var ( @@ -149,51 +141,6 @@ Use "bbolt [command] -h" for more information about a command. `, "\n") } -const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted" - -// formatBytes converts bytes into string according to format. -// Supported formats: ascii-encoded, hex, bytes. -func formatBytes(b []byte, format string) (string, error) { - switch format { - case "ascii-encoded": - return fmt.Sprintf("%q", b), nil - case "hex": - return fmt.Sprintf("%x", b), nil - case "bytes": - return string(b), nil - case "auto": - return bytesToAsciiOrHex(b), nil - case "redacted": - hash := sha256.New() - hash.Write(b) - return fmt.Sprintf("", len(b), hash.Sum(nil)), nil - default: - return "", fmt.Errorf("formatBytes: unsupported format: %s", format) - } -} - -func parseBytes(str string, format string) ([]byte, error) { - switch format { - case "ascii-encoded": - return []byte(str), nil - case "hex": - return hex.DecodeString(str) - default: - return nil, fmt.Errorf("parseBytes: unsupported format: %s", format) - } -} - -// writelnBytes writes the byte to the writer. Supported formats: ascii-encoded, hex, bytes, auto, redacted. -// Terminates the write with a new line symbol; -func writelnBytes(w io.Writer, b []byte, format string) error { - str, err := formatBytes(b, format) - if err != nil { - return err - } - _, err = fmt.Fprintln(w, str) - return err -} - type PageError struct { ID int Err error @@ -202,73 +149,3 @@ type PageError struct { func (e *PageError) Error() string { return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) } - -// isPrintable returns true if the string is valid unicode and contains only printable runes. -func isPrintable(s string) bool { - if !utf8.ValidString(s) { - return false - } - for _, ch := range s { - if !unicode.IsPrint(ch) { - return false - } - } - return true -} - -func bytesToAsciiOrHex(b []byte) string { - sb := string(b) - if isPrintable(sb) { - return sb - } else { - return hex.EncodeToString(b) - } -} - -func stringToPage(str string) (uint64, error) { - return strconv.ParseUint(str, 10, 64) -} - -// stringToPages parses a slice of strings into page ids. -func stringToPages(strs []string) ([]uint64, error) { - var a []uint64 - for _, str := range strs { - if len(str) == 0 { - continue - } - i, err := stringToPage(str) - if err != nil { - return nil, err - } - a = append(a, i) - } - return a, nil -} - -type cmdKvStringer struct{} - -func (cmdKvStringer) KeyToString(key []byte) string { - return bytesToAsciiOrHex(key) -} - -func (cmdKvStringer) ValueToString(value []byte) string { - return bytesToAsciiOrHex(value) -} - -func CmdKvStringer() bolt.KVStringer { - return cmdKvStringer{} -} - -func findLastBucket(tx *bolt.Tx, bucketNames []string) (*bolt.Bucket, error) { - lastbucket := tx.Bucket([]byte(bucketNames[0])) - if lastbucket == nil { - return nil, berrors.ErrBucketNotFound - } - for _, bucket := range bucketNames[1:] { - lastbucket = lastbucket.Bucket([]byte(bucket)) - if lastbucket == nil { - return nil, berrors.ErrBucketNotFound - } - } - return lastbucket, nil -} diff --git a/cmd/bbolt/utils.go b/cmd/bbolt/utils.go index 71f1a3d8c..a87328a3c 100644 --- a/cmd/bbolt/utils.go +++ b/cmd/bbolt/utils.go @@ -1,8 +1,17 @@ package main import ( + "crypto/sha256" + "encoding/hex" "fmt" + "io" "os" + "strconv" + "unicode" + "unicode/utf8" + + bolt "go.etcd.io/bbolt" + berrors "go.etcd.io/bbolt/errors" ) func checkSourceDBPath(srcPath string) (os.FileInfo, error) { @@ -14,3 +23,118 @@ func checkSourceDBPath(srcPath string) (os.FileInfo, error) { } return fi, nil } + +const FORMAT_MODES = "auto|ascii-encoded|hex|bytes|redacted" + +// formatBytes converts bytes into string according to format. +// Supported formats: ascii-encoded, hex, bytes. +func formatBytes(b []byte, format string) (string, error) { + switch format { + case "ascii-encoded": + return fmt.Sprintf("%q", b), nil + case "hex": + return fmt.Sprintf("%x", b), nil + case "bytes": + return string(b), nil + case "auto": + return bytesToAsciiOrHex(b), nil + case "redacted": + hash := sha256.New() + hash.Write(b) + return fmt.Sprintf("", len(b), hash.Sum(nil)), nil + default: + return "", fmt.Errorf("formatBytes: unsupported format: %s", format) + } +} + +func parseBytes(str string, format string) ([]byte, error) { + switch format { + case "ascii-encoded": + return []byte(str), nil + case "hex": + return hex.DecodeString(str) + default: + return nil, fmt.Errorf("parseBytes: unsupported format: %s", format) + } +} + +// writelnBytes writes the byte to the writer. Supported formats: ascii-encoded, hex, bytes, auto, redacted. +// Terminates the write with a new line symbol; +func writelnBytes(w io.Writer, b []byte, format string) error { + str, err := formatBytes(b, format) + if err != nil { + return err + } + _, err = fmt.Fprintln(w, str) + return err +} + +// isPrintable returns true if the string is valid unicode and contains only printable runes. +func isPrintable(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, ch := range s { + if !unicode.IsPrint(ch) { + return false + } + } + return true +} + +func bytesToAsciiOrHex(b []byte) string { + sb := string(b) + if isPrintable(sb) { + return sb + } else { + return hex.EncodeToString(b) + } +} + +func stringToPage(str string) (uint64, error) { + return strconv.ParseUint(str, 10, 64) +} + +// stringToPages parses a slice of strings into page ids. +func stringToPages(strs []string) ([]uint64, error) { + var a []uint64 + for _, str := range strs { + if len(str) == 0 { + continue + } + i, err := stringToPage(str) + if err != nil { + return nil, err + } + a = append(a, i) + } + return a, nil +} + +type cmdKvStringer struct{} + +func (cmdKvStringer) KeyToString(key []byte) string { + return bytesToAsciiOrHex(key) +} + +func (cmdKvStringer) ValueToString(value []byte) string { + return bytesToAsciiOrHex(value) +} + +func CmdKvStringer() bolt.KVStringer { + return cmdKvStringer{} +} + +func findLastBucket(tx *bolt.Tx, bucketNames []string) (*bolt.Bucket, error) { + lastbucket := tx.Bucket([]byte(bucketNames[0])) + if lastbucket == nil { + return nil, berrors.ErrBucketNotFound + } + for _, bucket := range bucketNames[1:] { + lastbucket = lastbucket.Bucket([]byte(bucket)) + if lastbucket == nil { + return nil, berrors.ErrBucketNotFound + } + } + return lastbucket, nil +} From 9fd6dd8abe41aaadfda0e6b945a3759214a511e8 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 9 Sep 2025 10:16:57 +0100 Subject: [PATCH 421/439] Move PageError into command_pages.go Signed-off-by: Benjamin Wang --- cmd/bbolt/command_pages.go | 9 +++++++++ cmd/bbolt/main.go | 9 --------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/bbolt/command_pages.go b/cmd/bbolt/command_pages.go index 576071c16..5fd1ffa9d 100644 --- a/cmd/bbolt/command_pages.go +++ b/cmd/bbolt/command_pages.go @@ -10,6 +10,15 @@ import ( bolt "go.etcd.io/bbolt" ) +type PageError struct { + ID int + Err error +} + +func (e *PageError) Error() string { + return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) +} + func newPagesCommand() *cobra.Command { pagesCmd := &cobra.Command{ Use: "pages ", diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 3d8d823b3..91072e8ec 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -140,12 +140,3 @@ The commands are: Use "bbolt [command] -h" for more information about a command. `, "\n") } - -type PageError struct { - ID int - Err error -} - -func (e *PageError) Error() string { - return fmt.Sprintf("page error: id=%d, err=%s", e.ID, e.Err) -} From b5bd98a45dcf21caa421858a823820c8c1b9747f Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Tue, 9 Sep 2025 10:22:06 +0100 Subject: [PATCH 422/439] Cleanup the legacy Main command Signed-off-by: Benjamin Wang --- cmd/bbolt/main.go | 86 ------------------------------------------ cmd/bbolt/main_test.go | 18 --------- 2 files changed, 104 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 91072e8ec..037c39126 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -3,9 +3,7 @@ package main import ( "errors" "fmt" - "io" "os" - "strings" ) var ( @@ -46,18 +44,6 @@ var ( ) func main() { - m := NewMain() - if err := m.Run(os.Args[1:]...); err == ErrUsage { - os.Exit(2) - } else if err == ErrUnknownCommand { - cobraExecute() - } else if err != nil { - fmt.Println(err.Error()) - os.Exit(1) - } -} - -func cobraExecute() { rootCmd := NewRootCommand() if err := rootCmd.Execute(); err != nil { if rootCmd.SilenceErrors { @@ -68,75 +54,3 @@ func cobraExecute() { } } } - -type baseCommand struct { - Stdin io.Reader - Stdout io.Writer - Stderr io.Writer -} - -// Main represents the main program execution. -type Main struct { - baseCommand -} - -// NewMain returns a new instance of Main connect to the standard input/output. -func NewMain() *Main { - return &Main{ - baseCommand: baseCommand{ - Stdin: os.Stdin, - Stdout: os.Stdout, - Stderr: os.Stderr, - }, - } -} - -// Run executes the program. -func (m *Main) Run(args ...string) error { - // Require a command at the beginning. - if len(args) == 0 || strings.HasPrefix(args[0], "-") { - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - } - - // Execute command. - switch args[0] { - case "help": - fmt.Fprintln(m.Stderr, m.Usage()) - return ErrUsage - default: - return ErrUnknownCommand - } -} - -// Usage returns the help message. -func (m *Main) Usage() string { - return strings.TrimLeft(` -Bbolt is a tool for inspecting bbolt databases. - -Usage: - - bbolt command [arguments] - -The commands are: - - version print the current version of bbolt - bench run synthetic benchmark against bbolt - buckets print a list of buckets - check verifies integrity of bbolt database - compact copies a bbolt database, compacting it in the process - dump print a hexadecimal dump of a single page - get print the value of a key in a bucket - info print basic info - keys print a list of keys in a bucket - help print this screen - page print one or more pages in human readable format - pages print list of pages with their types - page-item print the key and value of a page item. - stats iterate over all pages and generate usage stats - inspect inspect the structure of the database - surgery perform surgery on bbolt database - -Use "bbolt [command] -h" for more information about a command. -`, "\n") -} diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/main_test.go index ce0da648f..d75172535 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/main_test.go @@ -16,7 +16,6 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" ) type ConcurrentBuffer struct { @@ -45,23 +44,6 @@ func (b *ConcurrentBuffer) String() string { return b.buf.String() } -// Main represents a test wrapper for main.Main that records output. -type Main struct { - *main.Main - Stdin ConcurrentBuffer - Stdout ConcurrentBuffer - Stderr ConcurrentBuffer -} - -// NewMain returns a new instance of Main. -func NewMain() *Main { - m := &Main{Main: main.NewMain()} - m.Main.Stdin = &m.Stdin - m.Main.Stdout = &m.Stdout - m.Main.Stderr = &m.Stderr - return m -} - func fillBucket(b *bolt.Bucket, prefix []byte) error { n := 10 + rand.Intn(50) for i := 0; i < n; i++ { From a549fe844f97d151ca3eecc61da6b9e6a27de894 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Wed, 10 Sep 2025 00:26:09 +0200 Subject: [PATCH 423/439] chore(test): bump_windows_test_timeout Signed-off-by: Mustafa Elbehery --- .github/workflows/tests_windows.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index cb0b50384..878a41ee0 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -31,7 +31,7 @@ jobs: run: | case "${TARGET}" in windows-amd64-unit-test-4-cpu) - CPU=4 TIMEOUT=50m make test + CPU=4 TIMEOUT=60m make test ;; *) echo "Failed to find target" @@ -60,4 +60,4 @@ jobs: version: v2.1.6 - run: make coverage env: - TIMEOUT: 50m + TIMEOUT: 60m From 93d08dec976cdd065b4ab07b456a484d88cd7ee6 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Tue, 9 Sep 2025 21:27:56 +0200 Subject: [PATCH 424/439] refactor: remove unused errors Signed-off-by: Mustafa Elbehery --- cmd/bbolt/main.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index 037c39126..f8a38f728 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -7,26 +7,12 @@ import ( ) var ( - // ErrUsage is returned when a usage message was printed and the process - // should simply exit with an error. - ErrUsage = errors.New("usage") - - // ErrUnknownCommand is returned when a CLI command is not specified. - ErrUnknownCommand = errors.New("unknown command") - // ErrPathRequired is returned when the path to a Bolt database is not specified. ErrPathRequired = errors.New("path required") - // ErrFileNotFound is returned when a Bolt database does not exist. - ErrFileNotFound = errors.New("file not found") - // ErrInvalidValue is returned when a benchmark reads an unexpected value. ErrInvalidValue = errors.New("invalid value") - // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size") - // ErrPageIDRequired is returned when a required page id is not specified. ErrPageIDRequired = errors.New("page id required") @@ -38,9 +24,6 @@ var ( // ErrKeyNotFound is returned when a key is not found. ErrKeyNotFound = errors.New("key not found") - - // ErrNotEnoughArgs is returned with a cmd is being executed with fewer arguments. - ErrNotEnoughArgs = errors.New("not enough arguments") ) func main() { From 255bd6877f348631dc5fc57d5f3fdaf3b2e97307 Mon Sep 17 00:00:00 2001 From: Artur Melanchyk <13834276+arturmelanchyk@users.noreply.github.com> Date: Tue, 9 Sep 2025 12:03:59 +0200 Subject: [PATCH 425/439] internal/freelist: make pcache a map of struct{} Signed-off-by: Artur Melanchyk <13834276+arturmelanchyk@users.noreply.github.com> --- internal/freelist/shared.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/freelist/shared.go b/internal/freelist/shared.go index f2d113008..f30a69f10 100644 --- a/internal/freelist/shared.go +++ b/internal/freelist/shared.go @@ -220,10 +220,10 @@ func (t *shared) Reload(p *common.Page) { func (t *shared) NoSyncReload(pgIds common.Pgids) { // Build a cache of only pending pages. - pcache := make(map[common.Pgid]bool) + pcache := make(map[common.Pgid]struct{}) for _, txp := range t.pending { for _, pendingID := range txp.ids { - pcache[pendingID] = true + pcache[pendingID] = struct{}{} } } @@ -231,7 +231,7 @@ func (t *shared) NoSyncReload(pgIds common.Pgids) { // with any pages not in the pending lists. a := []common.Pgid{} for _, id := range pgIds { - if !pcache[id] { + if _, ok := pcache[id]; !ok { a = append(a, id) } } From 576812cd3676e811d131d4aac54ed05f4cf452cd Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Thu, 11 Sep 2025 14:02:39 +0200 Subject: [PATCH 426/439] refactor(cli): move cli errors to cmd dir Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command_bench.go | 10 ---------- cmd/bbolt/command_surgery.go | 4 ---- cmd/bbolt/errors.go | 33 +++++++++++++++++++++++++++++++++ cmd/bbolt/main.go | 21 --------------------- 4 files changed, 33 insertions(+), 35 deletions(-) create mode 100644 cmd/bbolt/errors.go diff --git a/cmd/bbolt/command_bench.go b/cmd/bbolt/command_bench.go index f8beeb1c8..d2d9bbe1e 100644 --- a/cmd/bbolt/command_bench.go +++ b/cmd/bbolt/command_bench.go @@ -2,7 +2,6 @@ package main import ( "encoding/binary" - "errors" "fmt" "io" "math" @@ -21,15 +20,6 @@ import ( "go.etcd.io/bbolt/internal/common" ) -var ( - // ErrBatchNonDivisibleBatchSize is returned when the batch size can't be evenly - // divided by the iteration count. - ErrBatchNonDivisibleBatchSize = errors.New("the number of iterations must be divisible by the batch size") - - // ErrBatchInvalidWriteMode is returned when the write mode is other than seq, rnd, seq-nest, or rnd-nest. - ErrBatchInvalidWriteMode = errors.New("the write mode should be one of seq, rnd, seq-nest, or rnd-nest") -) - var benchBucketName = []byte("bench") type benchOptions struct { diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command_surgery.go index ca369cddb..107345e4a 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command_surgery.go @@ -13,10 +13,6 @@ import ( "go.etcd.io/bbolt/internal/surgeon" ) -var ( - ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") -) - func newSurgeryCommand() *cobra.Command { surgeryCmd := &cobra.Command{ Use: "surgery ", diff --git a/cmd/bbolt/errors.go b/cmd/bbolt/errors.go new file mode 100644 index 000000000..a62a69a39 --- /dev/null +++ b/cmd/bbolt/errors.go @@ -0,0 +1,33 @@ +package main + +import "errors" + +var ( + // ErrBatchInvalidWriteMode is returned when the write mode is other than seq, rnd, seq-nest, or rnd-nest. + ErrBatchInvalidWriteMode = errors.New("the write mode should be one of seq, rnd, seq-nest, or rnd-nest") + + // ErrBatchNonDivisibleBatchSize is returned when the batch size can't be evenly + // divided by the iteration count. + ErrBatchNonDivisibleBatchSize = errors.New("the number of iterations must be divisible by the batch size") + + // ErrBucketRequired is returned when a bucket is not specified. + ErrBucketRequired = errors.New("bucket required") + + // ErrInvalidPageArgs is returned when Page cmd receives pageIds and all option is true. + ErrInvalidPageArgs = errors.New("invalid args: either use '--all' or 'pageid...'") + + // ErrInvalidValue is returned when a benchmark reads an unexpected value. + ErrInvalidValue = errors.New("invalid value") + + // ErrKeyNotFound is returned when a key is not found. + ErrKeyNotFound = errors.New("key not found") + + // ErrPageIDRequired is returned when a required page id is not specified. + ErrPageIDRequired = errors.New("page id required") + + // ErrPathRequired is returned when the path to a Bolt database is not specified. + ErrPathRequired = errors.New("path required") + + // ErrSurgeryFreelistAlreadyExist is returned when boltdb database file already has a freelist. + ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") +) diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index f8a38f728..fe5d61bac 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -1,31 +1,10 @@ package main import ( - "errors" "fmt" "os" ) -var ( - // ErrPathRequired is returned when the path to a Bolt database is not specified. - ErrPathRequired = errors.New("path required") - - // ErrInvalidValue is returned when a benchmark reads an unexpected value. - ErrInvalidValue = errors.New("invalid value") - - // ErrPageIDRequired is returned when a required page id is not specified. - ErrPageIDRequired = errors.New("page id required") - - // ErrInvalidPageArgs is returned when Page cmd receives pageIds and all option is true. - ErrInvalidPageArgs = errors.New("invalid args: either use '--all' or 'pageid...'") - - // ErrBucketRequired is returned when a bucket is not specified. - ErrBucketRequired = errors.New("bucket required") - - // ErrKeyNotFound is returned when a key is not found. - ErrKeyNotFound = errors.New("key not found") -) - func main() { rootCmd := NewRootCommand() if err := rootCmd.Execute(); err != nil { From 961e0ac3657b88e1f613d88d3735245aea601e30 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 12 Sep 2025 10:43:20 +0100 Subject: [PATCH 427/439] Move all comamnds into a new package command under cmd/bbolt The intention is to ensure other tool (e.g. etcdutl) can import bbolt commands. Signed-off-by: Benjamin Wang --- Makefile | 4 ++-- cmd/bbolt/{ => command}/command_bench.go | 2 +- cmd/bbolt/{ => command}/command_bench_test.go | 6 ++++-- cmd/bbolt/{ => command}/command_buckets.go | 2 +- cmd/bbolt/{ => command}/command_buckets_test.go | 6 +++--- cmd/bbolt/{ => command}/command_check.go | 2 +- cmd/bbolt/{ => command}/command_check_test.go | 6 +++--- cmd/bbolt/{ => command}/command_compact.go | 2 +- cmd/bbolt/{ => command}/command_compact_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_dump.go | 2 +- cmd/bbolt/{ => command}/command_dump_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_get.go | 2 +- cmd/bbolt/{ => command}/command_get_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_info.go | 2 +- cmd/bbolt/{ => command}/command_info_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_inspect.go | 2 +- cmd/bbolt/{ => command}/command_inspect_test.go | 6 +++--- cmd/bbolt/{ => command}/command_keys.go | 2 +- cmd/bbolt/{ => command}/command_keys_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_page.go | 2 +- cmd/bbolt/{ => command}/command_page_item.go | 2 +- .../{ => command}/command_page_item_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_page_test.go | 12 ++++++------ cmd/bbolt/{ => command}/command_pages.go | 2 +- cmd/bbolt/{ => command}/command_pages_test.go | 8 ++++---- cmd/bbolt/{ => command}/command_root.go | 2 +- cmd/bbolt/{ => command}/command_stats.go | 2 +- cmd/bbolt/{ => command}/command_stats_test.go | 10 +++++----- cmd/bbolt/{ => command}/command_surgery.go | 2 +- .../{ => command}/command_surgery_freelist.go | 2 +- .../command_surgery_freelist_test.go | 10 +++++----- cmd/bbolt/{ => command}/command_surgery_meta.go | 2 +- .../{ => command}/command_surgery_meta_test.go | 10 +++++----- cmd/bbolt/{ => command}/command_surgery_test.go | 16 ++++++++-------- cmd/bbolt/{ => command}/command_version.go | 2 +- cmd/bbolt/{ => command}/errors.go | 2 +- cmd/bbolt/{ => command}/main_test.go | 2 +- cmd/bbolt/{ => command}/utils.go | 2 +- cmd/bbolt/{ => command}/utils_test.go | 2 +- cmd/bbolt/main.go | 4 +++- 40 files changed, 97 insertions(+), 93 deletions(-) rename cmd/bbolt/{ => command}/command_bench.go (99%) rename cmd/bbolt/{ => command}/command_bench_test.go (94%) rename cmd/bbolt/{ => command}/command_buckets.go (98%) rename cmd/bbolt/{ => command}/command_buckets_test.go (93%) rename cmd/bbolt/{ => command}/command_check.go (99%) rename cmd/bbolt/{ => command}/command_check_test.go (93%) rename cmd/bbolt/{ => command}/command_compact.go (99%) rename cmd/bbolt/{ => command}/command_compact_test.go (94%) rename cmd/bbolt/{ => command}/command_dump.go (99%) rename cmd/bbolt/{ => command}/command_dump_test.go (88%) rename cmd/bbolt/{ => command}/command_get.go (99%) rename cmd/bbolt/{ => command}/command_get_test.go (93%) rename cmd/bbolt/{ => command}/command_info.go (97%) rename cmd/bbolt/{ => command}/command_info_test.go (86%) rename cmd/bbolt/{ => command}/command_inspect.go (98%) rename cmd/bbolt/{ => command}/command_inspect_test.go (82%) rename cmd/bbolt/{ => command}/command_keys.go (98%) rename cmd/bbolt/{ => command}/command_keys_test.go (94%) rename cmd/bbolt/{ => command}/command_page.go (99%) rename cmd/bbolt/{ => command}/command_page_item.go (99%) rename cmd/bbolt/{ => command}/command_page_item_test.go (95%) rename cmd/bbolt/{ => command}/command_page_test.go (90%) rename cmd/bbolt/{ => command}/command_pages.go (99%) rename cmd/bbolt/{ => command}/command_pages_test.go (90%) rename cmd/bbolt/{ => command}/command_root.go (97%) rename cmd/bbolt/{ => command}/command_stats.go (99%) rename cmd/bbolt/{ => command}/command_stats_test.go (96%) rename cmd/bbolt/{ => command}/command_surgery.go (99%) rename cmd/bbolt/{ => command}/command_surgery_freelist.go (99%) rename cmd/bbolt/{ => command}/command_surgery_freelist_test.go (92%) rename cmd/bbolt/{ => command}/command_surgery_meta.go (99%) rename cmd/bbolt/{ => command}/command_surgery_meta_test.go (92%) rename cmd/bbolt/{ => command}/command_surgery_test.go (98%) rename cmd/bbolt/{ => command}/command_version.go (97%) rename cmd/bbolt/{ => command}/errors.go (98%) rename cmd/bbolt/{ => command}/main_test.go (99%) rename cmd/bbolt/{ => command}/utils.go (99%) rename cmd/bbolt/{ => command}/utils_test.go (98%) diff --git a/Makefile b/Makefile index f5a6703a0..eb4393dad 100644 --- a/Makefile +++ b/Makefile @@ -44,12 +44,12 @@ test: @echo "hashmap freelist test" BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... - BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt/... @echo "array freelist test" BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... - BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt/... .PHONY: coverage coverage: diff --git a/cmd/bbolt/command_bench.go b/cmd/bbolt/command/command_bench.go similarity index 99% rename from cmd/bbolt/command_bench.go rename to cmd/bbolt/command/command_bench.go index d2d9bbe1e..c29d29a3a 100644 --- a/cmd/bbolt/command_bench.go +++ b/cmd/bbolt/command/command_bench.go @@ -1,4 +1,4 @@ -package main +package command import ( "encoding/binary" diff --git a/cmd/bbolt/command_bench_test.go b/cmd/bbolt/command/command_bench_test.go similarity index 94% rename from cmd/bbolt/command_bench_test.go rename to cmd/bbolt/command/command_bench_test.go index dc023eeba..b54e61a11 100644 --- a/cmd/bbolt/command_bench_test.go +++ b/cmd/bbolt/command/command_bench_test.go @@ -1,4 +1,4 @@ -package main +package command_test import ( "bytes" @@ -8,6 +8,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "go.etcd.io/bbolt/cmd/bbolt/command" ) type safeWriter struct { @@ -43,7 +45,7 @@ func TestBenchCommand_Run(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { // Run the command. - rootCmd := NewRootCommand() + rootCmd := command.NewRootCommand() outputWriter := newSafeWriter() rootCmd.SetOut(outputWriter) diff --git a/cmd/bbolt/command_buckets.go b/cmd/bbolt/command/command_buckets.go similarity index 98% rename from cmd/bbolt/command_buckets.go rename to cmd/bbolt/command/command_buckets.go index b0377edb9..f185fd3f1 100644 --- a/cmd/bbolt/command_buckets.go +++ b/cmd/bbolt/command/command_buckets.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_buckets_test.go b/cmd/bbolt/command/command_buckets_test.go similarity index 93% rename from cmd/bbolt/command_buckets_test.go rename to cmd/bbolt/command/command_buckets_test.go index 39ffed66f..afe13e26d 100644 --- a/cmd/bbolt/command_buckets_test.go +++ b/cmd/bbolt/command/command_buckets_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -49,7 +49,7 @@ func TestBucketsCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running buckets cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) diff --git a/cmd/bbolt/command_check.go b/cmd/bbolt/command/command_check.go similarity index 99% rename from cmd/bbolt/command_check.go rename to cmd/bbolt/command/command_check.go index cb6e3b47d..a5cdf0897 100644 --- a/cmd/bbolt/command_check.go +++ b/cmd/bbolt/command/command_check.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_check_test.go b/cmd/bbolt/command/command_check_test.go similarity index 93% rename from cmd/bbolt/command_check_test.go rename to cmd/bbolt/command/command_check_test.go index a2cdc6716..bf36d32bb 100644 --- a/cmd/bbolt/command_check_test.go +++ b/cmd/bbolt/command/command_check_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -48,7 +48,7 @@ func TestCheckCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running check cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") // capture output for assertion rootCmd.SetOut(outputBuf) diff --git a/cmd/bbolt/command_compact.go b/cmd/bbolt/command/command_compact.go similarity index 99% rename from cmd/bbolt/command_compact.go rename to cmd/bbolt/command/command_compact.go index 673e7a6f1..7202cd8c8 100644 --- a/cmd/bbolt/command_compact.go +++ b/cmd/bbolt/command/command_compact.go @@ -1,4 +1,4 @@ -package main +package command import ( "errors" diff --git a/cmd/bbolt/command_compact_test.go b/cmd/bbolt/command/command_compact_test.go similarity index 94% rename from cmd/bbolt/command_compact_test.go rename to cmd/bbolt/command/command_compact_test.go index 121c1c96d..88de69023 100644 --- a/cmd/bbolt/command_compact_test.go +++ b/cmd/bbolt/command/command_compact_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( crypto "crypto/rand" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -78,7 +78,7 @@ func TestCompactCommand_Run(t *testing.T) { require.NoError(t, err) t.Log("Running compact cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"compact", "-o", dstdb.Path(), db.Path()}) err = rootCmd.Execute() require.NoError(t, err) @@ -94,7 +94,7 @@ func TestCompactCommand_Run(t *testing.T) { func TestCompactCommand_NoArgs(t *testing.T) { expErr := errors.New("requires at least 1 arg(s), only received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"compact"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_dump.go b/cmd/bbolt/command/command_dump.go similarity index 99% rename from cmd/bbolt/command_dump.go rename to cmd/bbolt/command/command_dump.go index 72826e66b..f0b0aeb58 100644 --- a/cmd/bbolt/command_dump.go +++ b/cmd/bbolt/command/command_dump.go @@ -1,4 +1,4 @@ -package main +package command import ( "bytes" diff --git a/cmd/bbolt/command_dump_test.go b/cmd/bbolt/command/command_dump_test.go similarity index 88% rename from cmd/bbolt/command_dump_test.go rename to cmd/bbolt/command/command_dump_test.go index 6b1820f82..6ead923ff 100644 --- a/cmd/bbolt/command_dump_test.go +++ b/cmd/bbolt/command/command_dump_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -21,7 +21,7 @@ func TestDumpCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running dump command") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) rootCmd.SetArgs([]string{"dump", db.Path(), "0"}) @@ -37,7 +37,7 @@ func TestDumpCommand_Run(t *testing.T) { func TestDumpCommand_NoArgs(t *testing.T) { expErr := errors.New("requires at least 2 arg(s), only received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"dump"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_get.go b/cmd/bbolt/command/command_get.go similarity index 99% rename from cmd/bbolt/command_get.go rename to cmd/bbolt/command/command_get.go index fa58de4a3..f9a20b165 100644 --- a/cmd/bbolt/command_get.go +++ b/cmd/bbolt/command/command_get.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_get_test.go b/cmd/bbolt/command/command_get_test.go similarity index 93% rename from cmd/bbolt/command_get_test.go rename to cmd/bbolt/command/command_get_test.go index fc6d268f6..46a2f5f12 100644 --- a/cmd/bbolt/command_get_test.go +++ b/cmd/bbolt/command/command_get_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -62,7 +62,7 @@ func TestGetCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running get command") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) rootCmd.SetArgs([]string{"get", db.Path(), tc.testBucket, tc.testKey}) @@ -79,7 +79,7 @@ func TestGetCommand_Run(t *testing.T) { func TestGetCommand_NoArgs(t *testing.T) { expErr := errors.New("requires at least 3 arg(s), only received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"get"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_info.go b/cmd/bbolt/command/command_info.go similarity index 97% rename from cmd/bbolt/command_info.go rename to cmd/bbolt/command/command_info.go index b7e3922ab..3ff4188d8 100644 --- a/cmd/bbolt/command_info.go +++ b/cmd/bbolt/command/command_info.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_info_test.go b/cmd/bbolt/command/command_info_test.go similarity index 86% rename from cmd/bbolt/command_info_test.go rename to cmd/bbolt/command/command_info_test.go index bd608043f..4192add85 100644 --- a/cmd/bbolt/command_info_test.go +++ b/cmd/bbolt/command/command_info_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -20,7 +20,7 @@ func TestInfoCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running info cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) @@ -35,7 +35,7 @@ func TestInfoCommand_Run(t *testing.T) { func TestInfoCommand_NoArgs(t *testing.T) { expErr := errors.New("accepts 1 arg(s), received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"info"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_inspect.go b/cmd/bbolt/command/command_inspect.go similarity index 98% rename from cmd/bbolt/command_inspect.go rename to cmd/bbolt/command/command_inspect.go index 7f150835a..b1954141f 100644 --- a/cmd/bbolt/command_inspect.go +++ b/cmd/bbolt/command/command_inspect.go @@ -1,4 +1,4 @@ -package main +package command import ( "encoding/json" diff --git a/cmd/bbolt/command_inspect_test.go b/cmd/bbolt/command/command_inspect_test.go similarity index 82% rename from cmd/bbolt/command_inspect_test.go rename to cmd/bbolt/command/command_inspect_test.go index f1ec8de73..bb704daa5 100644 --- a/cmd/bbolt/command_inspect_test.go +++ b/cmd/bbolt/command/command_inspect_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "testing" @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -18,7 +18,7 @@ func TestInspect(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{ "inspect", srcPath, }) diff --git a/cmd/bbolt/command_keys.go b/cmd/bbolt/command/command_keys.go similarity index 98% rename from cmd/bbolt/command_keys.go rename to cmd/bbolt/command/command_keys.go index 396f38042..fcb3b73ec 100644 --- a/cmd/bbolt/command_keys.go +++ b/cmd/bbolt/command/command_keys.go @@ -1,4 +1,4 @@ -package main +package command import ( "github.com/spf13/cobra" diff --git a/cmd/bbolt/command_keys_test.go b/cmd/bbolt/command/command_keys_test.go similarity index 94% rename from cmd/bbolt/command_keys_test.go rename to cmd/bbolt/command/command_keys_test.go index 3de2c25db..2a733b817 100644 --- a/cmd/bbolt/command_keys_test.go +++ b/cmd/bbolt/command/command_keys_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -70,7 +70,7 @@ func TestKeysCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running Keys cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) rootCmd.SetArgs([]string{"keys", db.Path(), tc.testBucket}) @@ -87,7 +87,7 @@ func TestKeysCommand_Run(t *testing.T) { func TestKeyCommand_NoArgs(t *testing.T) { expErr := errors.New("requires at least 2 arg(s), only received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"keys"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_page.go b/cmd/bbolt/command/command_page.go similarity index 99% rename from cmd/bbolt/command_page.go rename to cmd/bbolt/command/command_page.go index 1b02cc317..678537e8e 100644 --- a/cmd/bbolt/command_page.go +++ b/cmd/bbolt/command/command_page.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_page_item.go b/cmd/bbolt/command/command_page_item.go similarity index 99% rename from cmd/bbolt/command_page_item.go rename to cmd/bbolt/command/command_page_item.go index ce0af2da9..78e277c00 100644 --- a/cmd/bbolt/command_page_item.go +++ b/cmd/bbolt/command/command_page_item.go @@ -1,4 +1,4 @@ -package main +package command import ( "errors" diff --git a/cmd/bbolt/command_page_item_test.go b/cmd/bbolt/command/command_page_item_test.go similarity index 95% rename from cmd/bbolt/command_page_item_test.go rename to cmd/bbolt/command/command_page_item_test.go index b4c583666..f692a2aa1 100644 --- a/cmd/bbolt/command_page_item_test.go +++ b/cmd/bbolt/command/command_page_item_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -82,7 +82,7 @@ func TestPageItemCommand_Run(t *testing.T) { require.NotEqual(t, 0, leafPageId) t.Log("Running page-item command") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outBuf := &bytes.Buffer{} rootCmd.SetOut(outBuf) rootCmd.SetArgs([]string{"page-item", db.Path(), fmt.Sprintf("%d", leafPageId), tc.itemId}) @@ -99,7 +99,7 @@ func TestPageItemCommand_Run(t *testing.T) { func TestPageItemCommand_NoArgs(t *testing.T) { expErr := errors.New("accepts 3 arg(s), received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"page-item"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_page_test.go b/cmd/bbolt/command/command_page_test.go similarity index 90% rename from cmd/bbolt/command_page_test.go rename to cmd/bbolt/command/command_page_test.go index e900a0372..26dbae7bc 100644 --- a/cmd/bbolt/command_page_test.go +++ b/cmd/bbolt/command/command_page_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -33,7 +33,7 @@ func TestPageCommand_Run(t *testing.T) { "Checksum: 07516e114689fdee\n\n" t.Log("Running page command") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outBuf := &bytes.Buffer{} rootCmd.SetOut(outBuf) rootCmd.SetArgs([]string{"page", db.Path(), "0"}) @@ -66,7 +66,7 @@ func TestPageCommand_ExclusiveArgs(t *testing.T) { name: "pageIds and flag", pageIds: "0", allFlag: "--all", - expErr: main.ErrInvalidPageArgs, + expErr: command.ErrInvalidPageArgs, }, } @@ -79,7 +79,7 @@ func TestPageCommand_ExclusiveArgs(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running page command") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outBuf := &bytes.Buffer{} rootCmd.SetOut(outBuf) rootCmd.SetArgs([]string{"page", db.Path(), tc.pageIds, tc.allFlag}) @@ -92,7 +92,7 @@ func TestPageCommand_ExclusiveArgs(t *testing.T) { func TestPageCommand_NoArgs(t *testing.T) { expErr := errors.New("requires at least 1 arg(s), only received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"page"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_pages.go b/cmd/bbolt/command/command_pages.go similarity index 99% rename from cmd/bbolt/command_pages.go rename to cmd/bbolt/command/command_pages.go index 5fd1ffa9d..e45e00675 100644 --- a/cmd/bbolt/command_pages.go +++ b/cmd/bbolt/command/command_pages.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_pages_test.go b/cmd/bbolt/command/command_pages_test.go similarity index 90% rename from cmd/bbolt/command_pages_test.go rename to cmd/bbolt/command/command_pages_test.go index c367f7180..0adaa004d 100644 --- a/cmd/bbolt/command_pages_test.go +++ b/cmd/bbolt/command/command_pages_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -39,7 +39,7 @@ func TestPagesCommand_Run(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) t.Log("Running pages cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) @@ -54,7 +54,7 @@ func TestPagesCommand_Run(t *testing.T) { func TestPagesCommand_NoArgs(t *testing.T) { expErr := errors.New("accepts 1 arg(s), received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"pages"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_root.go b/cmd/bbolt/command/command_root.go similarity index 97% rename from cmd/bbolt/command_root.go rename to cmd/bbolt/command/command_root.go index 23b1313dd..8313d77c3 100644 --- a/cmd/bbolt/command_root.go +++ b/cmd/bbolt/command/command_root.go @@ -1,4 +1,4 @@ -package main +package command import ( "github.com/spf13/cobra" diff --git a/cmd/bbolt/command_stats.go b/cmd/bbolt/command/command_stats.go similarity index 99% rename from cmd/bbolt/command_stats.go rename to cmd/bbolt/command/command_stats.go index d7c37205d..bff56121e 100644 --- a/cmd/bbolt/command_stats.go +++ b/cmd/bbolt/command/command_stats.go @@ -1,4 +1,4 @@ -package main +package command import ( "bytes" diff --git a/cmd/bbolt/command_stats_test.go b/cmd/bbolt/command/command_stats_test.go similarity index 96% rename from cmd/bbolt/command_stats_test.go rename to cmd/bbolt/command/command_stats_test.go index c0b4a613e..cc7fcf927 100644 --- a/cmd/bbolt/command_stats_test.go +++ b/cmd/bbolt/command/command_stats_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" ) @@ -48,7 +48,7 @@ func TestStatsCommand_Run_EmptyDatabase(t *testing.T) { "\tBytes used for inlined buckets: 0 (0%)\n" t.Log("Running stats cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) @@ -131,7 +131,7 @@ func TestStatsCommand_Run(t *testing.T) { "\tBytes used for inlined buckets: 236 (11%)\n" t.Log("Running stats cmd") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() outputBuf := bytes.NewBufferString("") rootCmd.SetOut(outputBuf) @@ -147,7 +147,7 @@ func TestStatsCommand_Run(t *testing.T) { func TestStatsCommand_NoArgs(t *testing.T) { expErr := errors.New("accepts between 1 and 2 arg(s), received 0") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{"stats"}) err := rootCmd.Execute() require.ErrorContains(t, err, expErr.Error()) diff --git a/cmd/bbolt/command_surgery.go b/cmd/bbolt/command/command_surgery.go similarity index 99% rename from cmd/bbolt/command_surgery.go rename to cmd/bbolt/command/command_surgery.go index 107345e4a..57eef70ee 100644 --- a/cmd/bbolt/command_surgery.go +++ b/cmd/bbolt/command/command_surgery.go @@ -1,4 +1,4 @@ -package main +package command import ( "errors" diff --git a/cmd/bbolt/command_surgery_freelist.go b/cmd/bbolt/command/command_surgery_freelist.go similarity index 99% rename from cmd/bbolt/command_surgery_freelist.go rename to cmd/bbolt/command/command_surgery_freelist.go index 9b9da0b48..b57d88b9b 100644 --- a/cmd/bbolt/command_surgery_freelist.go +++ b/cmd/bbolt/command/command_surgery_freelist.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_surgery_freelist_test.go b/cmd/bbolt/command/command_surgery_freelist_test.go similarity index 92% rename from cmd/bbolt/command_surgery_freelist_test.go rename to cmd/bbolt/command/command_surgery_freelist_test.go index 87f274760..b4235b22d 100644 --- a/cmd/bbolt/command_surgery_freelist_test.go +++ b/cmd/bbolt/command/command_surgery_freelist_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "path/filepath" @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/common" ) @@ -20,7 +20,7 @@ func TestSurgery_Freelist_Abandon(t *testing.T) { defer requireDBNoChange(t, dbData(t, srcPath), srcPath) - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "freelist", "abandon", srcPath, @@ -49,7 +49,7 @@ func TestSurgery_Freelist_Rebuild(t *testing.T) { { name: "already has freelist", hasFreelist: true, - expectedError: main.ErrSurgeryFreelistAlreadyExist, + expectedError: command.ErrSurgeryFreelistAlreadyExist, }, } @@ -82,7 +82,7 @@ func TestSurgery_Freelist_Rebuild(t *testing.T) { } // Execute `surgery freelist rebuild` command - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "freelist", "rebuild", srcPath, diff --git a/cmd/bbolt/command_surgery_meta.go b/cmd/bbolt/command/command_surgery_meta.go similarity index 99% rename from cmd/bbolt/command_surgery_meta.go rename to cmd/bbolt/command/command_surgery_meta.go index 513c1fb2e..f75bbd6c3 100644 --- a/cmd/bbolt/command_surgery_meta.go +++ b/cmd/bbolt/command/command_surgery_meta.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/command_surgery_meta_test.go b/cmd/bbolt/command/command_surgery_meta_test.go similarity index 92% rename from cmd/bbolt/command_surgery_meta_test.go rename to cmd/bbolt/command/command_surgery_meta_test.go index 399cad18c..d35eab718 100644 --- a/cmd/bbolt/command_surgery_meta_test.go +++ b/cmd/bbolt/command/command_surgery_meta_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "fmt" @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/common" ) @@ -22,7 +22,7 @@ func TestSurgery_Meta_Validate(t *testing.T) { defer requireDBNoChange(t, dbData(t, db.Path()), db.Path()) // validate the meta pages - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs([]string{ "surgery", "meta", "validate", srcPath, }) @@ -94,7 +94,7 @@ func TestSurgery_Meta_Update(t *testing.T) { fields = append(fields, fmt.Sprintf("pgid:%d", tc.pgid)) } - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "meta", "update", srcPath, @@ -105,7 +105,7 @@ func TestSurgery_Meta_Update(t *testing.T) { err := rootCmd.Execute() require.NoError(t, err) - m, _, err := main.ReadMetaPageAt(output, metaPageId, 4096) + m, _, err := command.ReadMetaPageAt(output, metaPageId, 4096) require.NoError(t, err) require.Equal(t, common.Magic, m.Magic()) diff --git a/cmd/bbolt/command_surgery_test.go b/cmd/bbolt/command/command_surgery_test.go similarity index 98% rename from cmd/bbolt/command_surgery_test.go rename to cmd/bbolt/command/command_surgery_test.go index dc8bdabb9..f0474ba95 100644 --- a/cmd/bbolt/command_surgery_test.go +++ b/cmd/bbolt/command/command_surgery_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "fmt" @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" bolt "go.etcd.io/bbolt" - main "go.etcd.io/bbolt/cmd/bbolt" + "go.etcd.io/bbolt/cmd/bbolt/command" "go.etcd.io/bbolt/internal/btesting" "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" @@ -43,7 +43,7 @@ func TestSurgery_RevertMetaPage(t *testing.T) { t.Logf("non active meta page id: %d", nonActiveMetaPageId) // revert the meta page - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "revert-meta-page", srcPath, @@ -78,7 +78,7 @@ func TestSurgery_CopyPage(t *testing.T) { // copy page 3 to page 2 t.Log("copy page 3 to page 2") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "dstdb") rootCmd.SetArgs([]string{ "surgery", "copy-page", srcPath, @@ -118,7 +118,7 @@ func TestSurgery_ClearPage(t *testing.T) { // clear page 3 t.Log("clear page 3") - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "dstdb") rootCmd.SetArgs([]string{ "surgery", "clear-page", srcPath, @@ -314,7 +314,7 @@ func testSurgeryClearPageElementsWithoutOverflow(t *testing.T, startIdx, endIdx } // clear elements [startIdx, endIdx) in the page - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "clear-page-elements", srcPath, @@ -523,7 +523,7 @@ func testSurgeryClearPageElementsWithOverflow(t *testing.T, startIdx, endIdx int t.Logf("The original element count: %d", elementCount) // clear elements [startIdx, endIdx) in the page - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() output := filepath.Join(t.TempDir(), "db") rootCmd.SetArgs([]string{ "surgery", "clear-page-elements", srcPath, @@ -627,7 +627,7 @@ func TestSurgeryRequiredFlags(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - rootCmd := main.NewRootCommand() + rootCmd := command.NewRootCommand() rootCmd.SetArgs(tc.args) err := rootCmd.Execute() require.ErrorContains(t, err, tc.expectedErrMsg) diff --git a/cmd/bbolt/command_version.go b/cmd/bbolt/command/command_version.go similarity index 97% rename from cmd/bbolt/command_version.go rename to cmd/bbolt/command/command_version.go index 39d756bd9..1df53f950 100644 --- a/cmd/bbolt/command_version.go +++ b/cmd/bbolt/command/command_version.go @@ -1,4 +1,4 @@ -package main +package command import ( "fmt" diff --git a/cmd/bbolt/errors.go b/cmd/bbolt/command/errors.go similarity index 98% rename from cmd/bbolt/errors.go rename to cmd/bbolt/command/errors.go index a62a69a39..1099cc333 100644 --- a/cmd/bbolt/errors.go +++ b/cmd/bbolt/command/errors.go @@ -1,4 +1,4 @@ -package main +package command import "errors" diff --git a/cmd/bbolt/main_test.go b/cmd/bbolt/command/main_test.go similarity index 99% rename from cmd/bbolt/main_test.go rename to cmd/bbolt/command/main_test.go index d75172535..529e1c5b1 100644 --- a/cmd/bbolt/main_test.go +++ b/cmd/bbolt/command/main_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "bytes" diff --git a/cmd/bbolt/utils.go b/cmd/bbolt/command/utils.go similarity index 99% rename from cmd/bbolt/utils.go rename to cmd/bbolt/command/utils.go index a87328a3c..804a43d32 100644 --- a/cmd/bbolt/utils.go +++ b/cmd/bbolt/command/utils.go @@ -1,4 +1,4 @@ -package main +package command import ( "crypto/sha256" diff --git a/cmd/bbolt/utils_test.go b/cmd/bbolt/command/utils_test.go similarity index 98% rename from cmd/bbolt/utils_test.go rename to cmd/bbolt/command/utils_test.go index 7a7fc7c92..88da5a30a 100644 --- a/cmd/bbolt/utils_test.go +++ b/cmd/bbolt/command/utils_test.go @@ -1,4 +1,4 @@ -package main_test +package command_test import ( "os" diff --git a/cmd/bbolt/main.go b/cmd/bbolt/main.go index fe5d61bac..a77221591 100644 --- a/cmd/bbolt/main.go +++ b/cmd/bbolt/main.go @@ -3,10 +3,12 @@ package main import ( "fmt" "os" + + "go.etcd.io/bbolt/cmd/bbolt/command" ) func main() { - rootCmd := NewRootCommand() + rootCmd := command.NewRootCommand() if err := rootCmd.Execute(); err != nil { if rootCmd.SilenceErrors { fmt.Fprintln(os.Stderr, "Error:", err) From 946536d7edcb091fe992722f785432a67dbf1893 Mon Sep 17 00:00:00 2001 From: Benjamin Wang Date: Fri, 12 Sep 2025 10:45:48 +0100 Subject: [PATCH 428/439] Move all stuff in main_test.go into utitls_test.go Signed-off-by: Benjamin Wang --- cmd/bbolt/command/main_test.go | 140 -------------------------------- cmd/bbolt/command/utils_test.go | 131 ++++++++++++++++++++++++++++++ 2 files changed, 131 insertions(+), 140 deletions(-) delete mode 100644 cmd/bbolt/command/main_test.go diff --git a/cmd/bbolt/command/main_test.go b/cmd/bbolt/command/main_test.go deleted file mode 100644 index 529e1c5b1..000000000 --- a/cmd/bbolt/command/main_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package command_test - -import ( - "bytes" - crypto "crypto/rand" - "encoding/binary" - "encoding/hex" - "fmt" - "io" - "math/rand" - "os" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - bolt "go.etcd.io/bbolt" -) - -type ConcurrentBuffer struct { - m sync.Mutex - buf bytes.Buffer -} - -func (b *ConcurrentBuffer) Read(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - - return b.buf.Read(p) -} - -func (b *ConcurrentBuffer) Write(p []byte) (n int, err error) { - b.m.Lock() - defer b.m.Unlock() - - return b.buf.Write(p) -} - -func (b *ConcurrentBuffer) String() string { - b.m.Lock() - defer b.m.Unlock() - - return b.buf.String() -} - -func fillBucket(b *bolt.Bucket, prefix []byte) error { - n := 10 + rand.Intn(50) - for i := 0; i < n; i++ { - v := make([]byte, 10*(1+rand.Intn(4))) - _, err := crypto.Read(v) - if err != nil { - return err - } - k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) - if err := b.Put(k, v); err != nil { - return err - } - } - // limit depth of subbuckets - s := 2 + rand.Intn(4) - if len(prefix) > (2*s + 1) { - return nil - } - n = 1 + rand.Intn(3) - for i := 0; i < n; i++ { - k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) - sb, err := b.CreateBucket(k) - if err != nil { - return err - } - if err := fillBucket(sb, append(k, '.')); err != nil { - return err - } - } - return nil -} - -func chkdb(path string) ([]byte, error) { - db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return nil, err - } - defer db.Close() - var buf bytes.Buffer - err = db.View(func(tx *bolt.Tx) error { - return tx.ForEach(func(name []byte, b *bolt.Bucket) error { - return walkBucket(b, name, nil, &buf) - }) - }) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { - if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { - return err - } - - // not a bucket, exit. - if v != nil { - return nil - } - return parent.ForEach(func(k, v []byte) error { - if v == nil { - return walkBucket(parent.Bucket(k), k, nil, w) - } - return walkBucket(parent, k, v, w) - }) -} - -func dbData(t *testing.T, filePath string) []byte { - data, err := os.ReadFile(filePath) - require.NoError(t, err) - return data -} - -func requireDBNoChange(t *testing.T, oldData []byte, filePath string) { - newData, err := os.ReadFile(filePath) - require.NoError(t, err) - - noChange := bytes.Equal(oldData, newData) - require.True(t, noChange) -} - -func convertInt64IntoBytes(num int64) []byte { - buf := make([]byte, binary.MaxVarintLen64) - n := binary.PutVarint(buf, num) - return buf[:n] -} - -func convertInt64KeysIntoHexString(nums ...int64) string { - var res []string - for _, num := range nums { - res = append(res, hex.EncodeToString(convertInt64IntoBytes(num))) - } - return strings.Join(res, "\n") + "\n" // last newline char -} diff --git a/cmd/bbolt/command/utils_test.go b/cmd/bbolt/command/utils_test.go index 88da5a30a..a6470d46e 100644 --- a/cmd/bbolt/command/utils_test.go +++ b/cmd/bbolt/command/utils_test.go @@ -1,11 +1,21 @@ package command_test import ( + "bytes" + crypto "crypto/rand" + "encoding/binary" + "encoding/hex" + "fmt" + "io" + "math/rand" "os" + "strings" + "sync" "testing" "github.com/stretchr/testify/require" + bolt "go.etcd.io/bbolt" "go.etcd.io/bbolt/internal/common" "go.etcd.io/bbolt/internal/guts_cli" ) @@ -44,3 +54,124 @@ func readPage(t *testing.T, path string, pageId int, pageSize int) []byte { func pageDataWithoutPageId(buf []byte) []byte { return buf[8:] } + +type ConcurrentBuffer struct { + m sync.Mutex + buf bytes.Buffer +} + +func (b *ConcurrentBuffer) Read(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.Read(p) +} + +func (b *ConcurrentBuffer) Write(p []byte) (n int, err error) { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.Write(p) +} + +func (b *ConcurrentBuffer) String() string { + b.m.Lock() + defer b.m.Unlock() + + return b.buf.String() +} + +func fillBucket(b *bolt.Bucket, prefix []byte) error { + n := 10 + rand.Intn(50) + for i := 0; i < n; i++ { + v := make([]byte, 10*(1+rand.Intn(4))) + _, err := crypto.Read(v) + if err != nil { + return err + } + k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) + if err := b.Put(k, v); err != nil { + return err + } + } + // limit depth of subbuckets + s := 2 + rand.Intn(4) + if len(prefix) > (2*s + 1) { + return nil + } + n = 1 + rand.Intn(3) + for i := 0; i < n; i++ { + k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) + sb, err := b.CreateBucket(k) + if err != nil { + return err + } + if err := fillBucket(sb, append(k, '.')); err != nil { + return err + } + } + return nil +} + +func chkdb(path string) ([]byte, error) { + db, err := bolt.Open(path, 0600, &bolt.Options{ReadOnly: true}) + if err != nil { + return nil, err + } + defer db.Close() + var buf bytes.Buffer + err = db.View(func(tx *bolt.Tx) error { + return tx.ForEach(func(name []byte, b *bolt.Bucket) error { + return walkBucket(b, name, nil, &buf) + }) + }) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func walkBucket(parent *bolt.Bucket, k []byte, v []byte, w io.Writer) error { + if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil { + return err + } + + // not a bucket, exit. + if v != nil { + return nil + } + return parent.ForEach(func(k, v []byte) error { + if v == nil { + return walkBucket(parent.Bucket(k), k, nil, w) + } + return walkBucket(parent, k, v, w) + }) +} + +func dbData(t *testing.T, filePath string) []byte { + data, err := os.ReadFile(filePath) + require.NoError(t, err) + return data +} + +func requireDBNoChange(t *testing.T, oldData []byte, filePath string) { + newData, err := os.ReadFile(filePath) + require.NoError(t, err) + + noChange := bytes.Equal(oldData, newData) + require.True(t, noChange) +} + +func convertInt64IntoBytes(num int64) []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutVarint(buf, num) + return buf[:n] +} + +func convertInt64KeysIntoHexString(nums ...int64) string { + var res []string + for _, num := range nums { + res = append(res, hex.EncodeToString(convertInt64IntoBytes(num))) + } + return strings.Join(res, "\n") + "\n" // last newline char +} From e0104c6c01acf7cd51ffe9798fa583fa0b3068fb Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sat, 13 Sep 2025 11:20:51 +0200 Subject: [PATCH 429/439] chore: fix some typos Signed-off-by: Mustafa Elbehery --- cmd/bbolt/command/command_get.go | 2 +- cmd/bbolt/command/errors.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/bbolt/command/command_get.go b/cmd/bbolt/command/command_get.go index f9a20b165..865c79e82 100644 --- a/cmd/bbolt/command/command_get.go +++ b/cmd/bbolt/command/command_get.go @@ -53,7 +53,7 @@ func newGetCommand() *cobra.Command { return cmd } -// getFunc opens the BoltDB and retrieves the key value from the bucket path. +// getFunc opens the given bbolt db file and retrieves the key value from the bucket path. func getFunc(cmd *cobra.Command, path string, buckets []string, key []byte, opts getOptions) error { // check if the source DB path is valid if _, err := checkSourceDBPath(path); err != nil { diff --git a/cmd/bbolt/command/errors.go b/cmd/bbolt/command/errors.go index 1099cc333..244bda1b2 100644 --- a/cmd/bbolt/command/errors.go +++ b/cmd/bbolt/command/errors.go @@ -25,9 +25,9 @@ var ( // ErrPageIDRequired is returned when a required page id is not specified. ErrPageIDRequired = errors.New("page id required") - // ErrPathRequired is returned when the path to a Bolt database is not specified. + // ErrPathRequired is returned when the path to a bbolt database is not specified. ErrPathRequired = errors.New("path required") - // ErrSurgeryFreelistAlreadyExist is returned when boltdb database file already has a freelist. + // ErrSurgeryFreelistAlreadyExist is returned when a bbolt database file already has a freelist. ErrSurgeryFreelistAlreadyExist = errors.New("the file already has freelist, please consider to abandon the freelist to forcibly rebuild it") ) From 5bec2f2d7c168c5a01881192b5dfe217bf583569 Mon Sep 17 00:00:00 2001 From: Mustafa Elbehery Date: Sun, 14 Sep 2025 11:55:15 +0200 Subject: [PATCH 430/439] OWNERS: add Elbehery as reviewer Signed-off-by: Mustafa Elbehery --- OWNERS | 1 + cmd/bbolt/OWNERS | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/OWNERS b/OWNERS index 91f168a79..f8ab19db2 100644 --- a/OWNERS +++ b/OWNERS @@ -6,5 +6,6 @@ approvers: - ptabor # Piotr Tabor - spzala # Sahdev Zala reviewers: + - elbehery # Mustafa Elbehery - fuweid # Wei Fu - tjungblu # Thomas Jungblut diff --git a/cmd/bbolt/OWNERS b/cmd/bbolt/OWNERS index d4d42d4af..874811495 100644 --- a/cmd/bbolt/OWNERS +++ b/cmd/bbolt/OWNERS @@ -2,11 +2,11 @@ approvers: - ahrtr # Benjamin Wang + - elbehery # Mustafa Elbehery - fuweid # Wei Fu - serathius # Marek Siarkowicz - ptabor # Piotr Tabor - spzala # Sahdev Zala - tjungblu # Thomas Jungblut reviewers: - - elbehery # Mustafa Elbehery - ivanvc # Ivan Valdes From 1530ace14bcf5daaf48e39158d3d744822f6a8f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20R=C3=BCger?= Date: Fri, 15 Aug 2025 17:34:20 +0200 Subject: [PATCH 431/439] chore: Use go tool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Manuel Rüger --- Makefile | 20 ++++++-------------- go.mod | 10 ++++++++++ go.sum | 10 ++++++++++ scripts/compare_benchmarks.sh | 6 +++--- scripts/fix.sh | 2 +- 5 files changed, 30 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index eb4393dad..b1062cabe 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ fmt: @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" - @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') + @!(go tool golang.org/x/tools/cmd/goimports -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -71,16 +71,12 @@ clean: # Clean binaries rm -f ./bin/${BOLT_CMD} .PHONY: gofail-enable -gofail-enable: install-gofail - gofail enable . +gofail-enable: + go tool go.etcd.io/gofail enable . .PHONY: gofail-disable -gofail-disable: install-gofail - gofail disable . - -.PHONY: install-gofail -install-gofail: - go install go.etcd.io/gofail +gofail-disable: + go tool go.etcd.io/gofail disable . .PHONY: test-failpoint test-failpoint: @@ -99,10 +95,6 @@ test-robustness: gofail-enable build .PHONY: test-benchmark-compare # Runs benchmark tests on the current git ref and the given REF, and compares # the two. -test-benchmark-compare: install-benchstat +test-benchmark-compare: @git fetch ./scripts/compare_benchmarks.sh $(REF) - -.PHONY: install-benchstat -install-benchstat: - go install golang.org/x/perf/cmd/benchstat@latest diff --git a/go.mod b/go.mod index f6d0a1abc..255fd5c52 100644 --- a/go.mod +++ b/go.mod @@ -14,8 +14,18 @@ require ( ) require ( + github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/perf v0.0.0-20250813145418-2f7363a06fe1 // indirect + golang.org/x/tools v0.36.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +tool ( + go.etcd.io/gofail + golang.org/x/perf/cmd/benchstat + golang.org/x/tools/cmd/goimports +) diff --git a/go.sum b/go.sum index 3484269ab..1056bcf8e 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,10 @@ +github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 h1:xlwdaKcTNVW4PtpQb8aKA4Pjy0CdJHEqvFbAnvR5m2g= +github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -15,10 +19,16 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= go.etcd.io/gofail v0.2.0 h1:p19drv16FKK345a09a1iubchlw/vmRuksmRzgBIGjcA= go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/perf v0.0.0-20250813145418-2f7363a06fe1 h1:stGRioFgvBd3x8HoGVg9bb41lLTWLjBMFT/dMB7f4mQ= +golang.org/x/perf v0.0.0-20250813145418-2f7363a06fe1/go.mod h1:rjfRjhHXb3XNVh/9i5Jr2tXoTd0vOlZN5rzsM8cQE6k= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/scripts/compare_benchmarks.sh b/scripts/compare_benchmarks.sh index af397cb44..6d76249f0 100755 --- a/scripts/compare_benchmarks.sh +++ b/scripts/compare_benchmarks.sh @@ -57,12 +57,12 @@ function main() { echo "BASE=${BASE_TO_COMPARE} HEAD=${REF_CURRENT}" if [[ "${BENCHSTAT_FORMAT}" == "csv" ]]; then - benchstat -format=csv -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 2>/dev/null 1>"${BENCHSTAT_OUTPUT_FILE}" + go tool golang.org/x/perf/cmd/benchstat -format=csv -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 2>/dev/null 1>"${BENCHSTAT_OUTPUT_FILE}" else if [[ -z "${BENCHSTAT_OUTPUT_FILE}" ]]; then - benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" + go tool golang.org/x/perf/cmd/benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" else - benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 1>"${BENCHSTAT_OUTPUT_FILE}" + go tool golang.org/x/perf/cmd/benchstat -confidence="${BENCHSTAT_CONFIDENCE_LEVEL}" BASE="${RESULT_TO_COMPARE}" HEAD="${RESULT_CURRENT}" 1>"${BENCHSTAT_OUTPUT_FILE}" fi fi } diff --git a/scripts/fix.sh b/scripts/fix.sh index 6b933c988..13309dbaf 100755 --- a/scripts/fix.sh +++ b/scripts/fix.sh @@ -7,7 +7,7 @@ TESTGOFILES=$(${GO_CMD} list --f "{{with \$d:=.}}{{range .TestGoFiles}}{{\$d.Di XTESTGOFILES=$(${GO_CMD} list --f "{{with \$d:=.}}{{range .XTestGoFiles}}{{\$d.Dir}}/{{.}}{{\"\n\"}}{{end}}{{end}}" ./...) -echo "${GOFILES}" "${TESTGOFILES}" "${XTESTGOFILES}"| xargs -n 100 go run golang.org/x/tools/cmd/goimports@latest -w -local go.etcd.io +echo "${GOFILES}" "${TESTGOFILES}" "${XTESTGOFILES}"| xargs -n 100 go tool golang.org/x/tools/cmd/goimports -w -local go.etcd.io go fmt ./... go mod tidy From 8916debec7298c013201b8b874938e1c8864eb2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 14:35:13 +0000 Subject: [PATCH 432/439] build(deps): Bump actions/github-script from 7.0.1 to 8.0.0 Bumps [actions/github-script](https://github.com/actions/github-script) from 7.0.1 to 8.0.0. - [Release notes](https://github.com/actions/github-script/releases) - [Commits](https://github.com/actions/github-script/compare/60a0d83039c74a4aee543508d2ffcb1c3799cdea...ed597411d8f924073f98dfc5c65a23a2325f34cd) --- updated-dependencies: - dependency-name: actions/github-script dependency-version: 8.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/gh-workflow-approve.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gh-workflow-approve.yaml b/.github/workflows/gh-workflow-approve.yaml index 4a51970b8..f81bbd6b8 100644 --- a/.github/workflows/gh-workflow-approve.yaml +++ b/.github/workflows/gh-workflow-approve.yaml @@ -19,7 +19,7 @@ jobs: actions: write steps: - name: Update PR - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 continue-on-error: true with: github-token: ${{ secrets.GITHUB_TOKEN }} From 47d8fb7e119e57c7ab8d4a459a965c96e0ac1ea6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 14:51:46 +0000 Subject: [PATCH 433/439] build(deps): Bump actions/stale from 9.1.0 to 10.0.0 Bumps [actions/stale](https://github.com/actions/stale) from 9.1.0 to 10.0.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/5bef64f19d7facfb25b37b414482c7164d639639...3a9db7e6a41a89f618792c92c0e97cc736e1b13f) --- updated-dependencies: - dependency-name: actions/stale dependency-version: 10.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 1abb63ab8..492154c1a 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@5bef64f19d7facfb25b37b414482c7164d639639 # v9.1.0 + - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0 with: days-before-stale: 90 days-before-close: 21 From 377442823629eb2e5603478fd7ea005821770388 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 14:58:27 +0000 Subject: [PATCH 434/439] build(deps): Bump actions/setup-go from 5.5.0 to 6.0.0 Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5.5.0 to 6.0.0. - [Release notes](https://github.com/actions/setup-go/releases) - [Commits](https://github.com/actions/setup-go/compare/d35c59abb061a4a6fb18e82ac0862c26744d6ab5...44694675825211faa026b3c33043df3e48a5fa00) --- updated-dependencies: - dependency-name: actions/setup-go dependency-version: 6.0.0 dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/benchmark-template.yaml | 2 +- .github/workflows/cross-arch-template.yaml | 2 +- .github/workflows/failpoint_test.yaml | 2 +- .github/workflows/robustness_template.yaml | 2 +- .github/workflows/tests-template.yml | 2 +- .github/workflows/tests_amd64.yaml | 2 +- .github/workflows/tests_arm64.yaml | 2 +- .github/workflows/tests_windows.yml | 4 ++-- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/workflows/benchmark-template.yaml b/.github/workflows/benchmark-template.yaml index 0eb9aaaa0..74f5dd3e5 100644 --- a/.github/workflows/benchmark-template.yaml +++ b/.github/workflows/benchmark-template.yaml @@ -26,7 +26,7 @@ jobs: fetch-depth: 0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run Benchmarks diff --git a/.github/workflows/cross-arch-template.yaml b/.github/workflows/cross-arch-template.yaml index 83229c072..4895b748b 100644 --- a/.github/workflows/cross-arch-template.yaml +++ b/.github/workflows/cross-arch-template.yaml @@ -21,7 +21,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Build for ${{ inputs.os }}/${{ matrix.arch }} diff --git a/.github/workflows/failpoint_test.yaml b/.github/workflows/failpoint_test.yaml index 21934adba..df457656c 100644 --- a/.github/workflows/failpoint_test.yaml +++ b/.github/workflows/failpoint_test.yaml @@ -12,7 +12,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run golangci-lint diff --git a/.github/workflows/robustness_template.yaml b/.github/workflows/robustness_template.yaml index 6e5a1e0a1..4dc2fb759 100644 --- a/.github/workflows/robustness_template.yaml +++ b/.github/workflows/robustness_template.yaml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run golangci-lint diff --git a/.github/workflows/tests-template.yml b/.github/workflows/tests-template.yml index c2d38c6f9..a3d3a67ed 100644 --- a/.github/workflows/tests-template.yml +++ b/.github/workflows/tests-template.yml @@ -24,7 +24,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt diff --git a/.github/workflows/tests_amd64.yaml b/.github/workflows/tests_amd64.yaml index 76b627daa..35ff325df 100644 --- a/.github/workflows/tests_amd64.yaml +++ b/.github/workflows/tests_amd64.yaml @@ -20,7 +20,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run golangci-lint diff --git a/.github/workflows/tests_arm64.yaml b/.github/workflows/tests_arm64.yaml index e10841924..f9f7a2cec 100644 --- a/.github/workflows/tests_arm64.yaml +++ b/.github/workflows/tests_arm64.yaml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run golangci-lint diff --git a/.github/workflows/tests_windows.yml b/.github/workflows/tests_windows.yml index 878a41ee0..0952dd494 100644 --- a/.github/workflows/tests_windows.yml +++ b/.github/workflows/tests_windows.yml @@ -22,7 +22,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - run: make fmt @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - id: goversion run: echo "goversion=$(cat .go-version)" >> "$GITHUB_OUTPUT" - - uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0 + - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 with: go-version: ${{ steps.goversion.outputs.goversion }} - name: Run golangci-lint From f857d202933e759a75f33b318e0588c3e28e5ad6 Mon Sep 17 00:00:00 2001 From: hwdef Date: Sun, 28 Sep 2025 14:13:26 +0800 Subject: [PATCH 435/439] Bump Go to 1.24.7 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 7a429d68a..8407e2600 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.6 +1.24.7 diff --git a/go.mod b/go.mod index f6d0a1abc..4e74a43b9 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24.0 -toolchain go1.24.6 +toolchain go1.24.7 require ( github.com/spf13/cobra v1.10.1 From 382d43e5c7dab372a5ce51b031b587178f5b5a33 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 14:26:24 +0000 Subject: [PATCH 436/439] build(deps): Bump actions/stale from 10.0.0 to 10.1.0 Bumps [actions/stale](https://github.com/actions/stale) from 10.0.0 to 10.1.0. - [Release notes](https://github.com/actions/stale/releases) - [Changelog](https://github.com/actions/stale/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/stale/compare/3a9db7e6a41a89f618792c92c0e97cc736e1b13f...5f858e3efba33a5ca4407a664cc011ad407f2008) --- updated-dependencies: - dependency-name: actions/stale dependency-version: 10.1.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/stale.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 492154c1a..27a52dfd5 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@3a9db7e6a41a89f618792c92c0e97cc736e1b13f # v10.0.0 + - uses: actions/stale@5f858e3efba33a5ca4407a664cc011ad407f2008 # v10.1.0 with: days-before-stale: 90 days-before-close: 21 From 77f9d48de706a4bee6621fea54fbba62b6570f90 Mon Sep 17 00:00:00 2001 From: hwdef Date: Wed, 8 Oct 2025 23:24:06 +0800 Subject: [PATCH 437/439] Bump go to 1.24.8 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 8407e2600..f666fce1a 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.7 +1.24.8 diff --git a/go.mod b/go.mod index 0e9646669..919ffe26b 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24.0 -toolchain go1.24.7 +toolchain go1.24.8 require ( github.com/spf13/cobra v1.10.1 From e8582988db8d9da4285e527cc7e1a756367bb7a2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Oct 2025 14:29:01 +0000 Subject: [PATCH 438/439] build(deps): Bump golang.org/x/sys from 0.36.0 to 0.37.0 Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.36.0 to 0.37.0. - [Commits](https://github.com/golang/sys/compare/v0.36.0...v0.37.0) --- updated-dependencies: - dependency-name: golang.org/x/sys dependency-version: 0.37.0 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 919ffe26b..a1e9c1fea 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/stretchr/testify v1.11.1 go.etcd.io/gofail v0.2.0 golang.org/x/sync v0.17.0 - golang.org/x/sys v0.36.0 + golang.org/x/sys v0.37.0 ) require ( diff --git a/go.sum b/go.sum index 1056bcf8e..fd0b73776 100644 --- a/go.sum +++ b/go.sum @@ -25,8 +25,8 @@ golang.org/x/perf v0.0.0-20250813145418-2f7363a06fe1 h1:stGRioFgvBd3x8HoGVg9bb41 golang.org/x/perf v0.0.0-20250813145418-2f7363a06fe1/go.mod h1:rjfRjhHXb3XNVh/9i5Jr2tXoTd0vOlZN5rzsM8cQE6k= golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= From 2a0ae9d7f53d32bc47329d582238dc274d23c782 Mon Sep 17 00:00:00 2001 From: hwdef Date: Thu, 16 Oct 2025 11:02:15 +0800 Subject: [PATCH 439/439] Bump go to 1.24.9 Signed-off-by: hwdef --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index f666fce1a..eb716f77a 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.24.8 +1.24.9 diff --git a/go.mod b/go.mod index a1e9c1fea..3fe8d6a60 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.etcd.io/bbolt go 1.24.0 -toolchain go1.24.8 +toolchain go1.24.9 require ( github.com/spf13/cobra v1.10.1