Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

12
vendor/go.etcd.io/bbolt/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,12 @@
*.prof
*.test
*.swp
/bin/
cover.out
cover-*.out
/.idea
*.iml
/bbolt
/cmd/bbolt/bbolt
.DS_Store

1
vendor/go.etcd.io/bbolt/.go-version generated vendored Normal file
View File

@@ -0,0 +1 @@
1.23.6

20
vendor/go.etcd.io/bbolt/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

108
vendor/go.etcd.io/bbolt/Makefile generated vendored Normal file
View File

@@ -0,0 +1,108 @@
BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
GOFILES = $(shell find . -name \*.go)
TESTFLAGS_RACE=-race=false
ifdef ENABLE_RACE
TESTFLAGS_RACE=-race=true
endif
TESTFLAGS_CPU=
ifdef CPU
TESTFLAGS_CPU=-cpu=$(CPU)
endif
TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
TESTFLAGS_TIMEOUT=30m
ifdef TIMEOUT
TESTFLAGS_TIMEOUT=$(TIMEOUT)
endif
TESTFLAGS_ENABLE_STRICT_MODE=false
ifdef ENABLE_STRICT_MODE
TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE)
endif
.EXPORT_ALL_VARIABLES:
TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE}
.PHONY: fmt
fmt:
@echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh"
@!(gofmt -l -s -d ${GOFILES} | grep '[a-z]')
@echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh"
@!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]')
.PHONY: lint
lint:
golangci-lint run ./...
.PHONY: test
test:
@echo "hashmap freelist test"
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/...
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
@echo "array freelist test"
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/...
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
.PHONY: coverage
coverage:
@echo "hashmap freelist test"
TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \
-coverprofile cover-freelist-hashmap.out -covermode atomic
@echo "array freelist test"
TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \
-coverprofile cover-freelist-array.out -covermode atomic
BOLT_CMD=bbolt
build:
go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
.PHONY: clean
clean: # Clean binaries
rm -f ./bin/${BOLT_CMD}
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
.PHONY: gofail-disable
gofail-disable: install-gofail
gofail disable .
.PHONY: install-gofail
install-gofail:
go install go.etcd.io/gofail
.PHONY: test-failpoint
test-failpoint:
@echo "[failpoint] hashmap freelist test"
BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
@echo "[failpoint] array freelist test"
BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
.PHONY: test-robustness # Running robustness tests requires root permission for now
# TODO: Remove sudo once we fully migrate to the prow infrastructure
test-robustness: gofail-enable build
sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root
.PHONY: test-benchmark-compare
# Runs benchmark tests on the current git ref and the given REF, and compares
# the two.
test-benchmark-compare: install-benchstat
@git fetch
./scripts/compare_benchmarks.sh $(REF)
.PHONY: install-benchstat
install-benchstat:
go install golang.org/x/perf/cmd/benchstat@latest

10
vendor/go.etcd.io/bbolt/OWNERS generated vendored Normal file
View File

@@ -0,0 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- ahrtr # Benjamin Wang <benjamin.ahrtr@gmail.com> <benjamin.wang@broadcom.com>
- serathius # Marek Siarkowicz <siarkowicz@google.com> <marek.siarkowicz@gmail.com>
- ptabor # Piotr Tabor <piotr.tabor@gmail.com>
- spzala # Sahdev Zala <spzala@us.ibm.com>
reviewers:
- fuweid # Wei Fu <fuweid89@gmail.com>
- tjungblu # Thomas Jungblut <tjungblu@redhat.com>

1032
vendor/go.etcd.io/bbolt/README.md generated vendored Normal file

File diff suppressed because it is too large Load Diff

7
vendor/go.etcd.io/bbolt/bolt_386.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

90
vendor/go.etcd.io/bbolt/bolt_aix.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
//go:build aix
package bbolt
import (
"fmt"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
var lockType int16
if exclusive {
lockType = syscall.F_WRLCK
} else {
lockType = syscall.F_RDLCK
}
for {
// Attempt to obtain an exclusive lock.
lock := syscall.Flock_t{Type: lockType}
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

7
vendor/go.etcd.io/bbolt/bolt_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

90
vendor/go.etcd.io/bbolt/bolt_android.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
package bbolt
import (
"fmt"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
var lockType int16
if exclusive {
lockType = syscall.F_WRLCK
} else {
lockType = syscall.F_RDLCK
}
for {
// Attempt to obtain an exclusive lock.
lock := syscall.Flock_t{Type: lockType}
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
err = unix.Madvise(b, syscall.MADV_RANDOM)
if err != nil && err != syscall.ENOSYS {
// Ignore not implemented error in kernel because it still works.
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

7
vendor/go.etcd.io/bbolt/bolt_arm.go generated vendored Normal file
View File

@@ -0,0 +1,7 @@
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_arm64.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build arm64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

10
vendor/go.etcd.io/bbolt/bolt_linux.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package bbolt
import (
"syscall"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd()))
}

9
vendor/go.etcd.io/bbolt/bolt_loong64.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build loong64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_mips64x.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build mips64 || mips64le
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x8000000000 // 512GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_mipsx.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build mips || mipsle
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x40000000 // 1GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

16
vendor/go.etcd.io/bbolt/bolt_openbsd.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package bbolt
import (
"golang.org/x/sys/unix"
)
func msync(db *DB) error {
return unix.Msync(db.data[:db.datasz], unix.MS_INVALIDATE)
}
func fdatasync(db *DB) error {
if db.data != nil {
return msync(db)
}
return db.file.Sync()
}

9
vendor/go.etcd.io/bbolt/bolt_ppc.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build ppc
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_ppc64.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build ppc64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_ppc64le.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build ppc64le
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_riscv64.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build riscv64
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

9
vendor/go.etcd.io/bbolt/bolt_s390x.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
//go:build s390x
package bbolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF

88
vendor/go.etcd.io/bbolt/bolt_solaris.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
package bbolt
import (
"fmt"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
var lockType int16
if exclusive {
lockType = syscall.F_WRLCK
} else {
lockType = syscall.F_RDLCK
}
for {
// Attempt to obtain an exclusive lock.
lock := syscall.Flock_t{Type: lockType}
err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

88
vendor/go.etcd.io/bbolt/bolt_unix.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
//go:build !windows && !plan9 && !solaris && !aix && !android
package bbolt
import (
"fmt"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
"go.etcd.io/bbolt/errors"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
fd := db.file.Fd()
flag := syscall.LOCK_NB
if exclusive {
flag |= syscall.LOCK_EX
} else {
flag |= syscall.LOCK_SH
}
for {
// Attempt to obtain an exclusive lock.
err := syscall.Flock(int(fd), flag)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// If we timed out then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return errors.ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
err = unix.Madvise(b, syscall.MADV_RANDOM)
if err != nil && err != syscall.ENOSYS {
// Ignore not implemented error in kernel because it still works.
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

119
vendor/go.etcd.io/bbolt/bolt_windows.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
package bbolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/windows"
"go.etcd.io/bbolt/errors"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, exclusive bool, timeout time.Duration) error {
var t time.Time
if timeout != 0 {
t = time.Now()
}
var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
if exclusive {
flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
}
for {
// Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
// -1..0 as the lock on the database file.
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
Offset: m1,
OffsetHigh: m1,
})
if err == nil {
return nil
} else if err != windows.ERROR_LOCK_VIOLATION {
return err
}
// If we timed oumercit then return an error.
if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
return errors.ErrTimeout
}
// Wait for a bit and try again.
time.Sleep(flockRetryTimeout)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
Offset: m1,
OffsetHigh: m1,
})
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
var sizelo, sizehi uint32
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
sizehi = uint32(sz >> 32)
sizelo = uint32(sz)
}
// Open a file mapping handle.
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
if addr == 0 {
// Do our best and report error returned from MapViewOfFile.
_ = syscall.CloseHandle(h)
return os.NewSyscallError("MapViewOfFile", errno)
}
// Close mapping handle.
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return os.NewSyscallError("CloseHandle", err)
}
// Convert to a byte array.
db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr))
db.datasz = sz
return nil
}
// munmap unmaps a pointer from a file.
// Based on: https://github.com/edsrzf/mmap-go
func munmap(db *DB) error {
if db.data == nil {
return nil
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
var err1 error
if err := syscall.UnmapViewOfFile(addr); err != nil {
err1 = os.NewSyscallError("UnmapViewOfFile", err)
}
db.data = nil
db.datasz = 0
return err1
}

8
vendor/go.etcd.io/bbolt/boltsync_unix.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
//go:build !windows && !plan9 && !linux && !openbsd
package bbolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}

1005
vendor/go.etcd.io/bbolt/bucket.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

119
vendor/go.etcd.io/bbolt/compact.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
package bbolt
// Compact will create a copy of the source DB and in the destination DB. This may
// reclaim space that the source database no longer has use for. txMaxSize can be
// used to limit the transactions size of this process and may trigger intermittent
// commits. A value of zero will ignore transaction sizes.
// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349
func Compact(dst, src *DB, txMaxSize int64) error {
// commit regularly, or we'll run out of memory for large datasets if using one transaction.
var size int64
tx, err := dst.Begin(true)
if err != nil {
return err
}
defer func() {
if tempErr := tx.Rollback(); tempErr != nil {
err = tempErr
}
}()
if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
// On each key/value, check if we have exceeded tx size.
sz := int64(len(k) + len(v))
if size+sz > txMaxSize && txMaxSize != 0 {
// Commit previous transaction.
if err := tx.Commit(); err != nil {
return err
}
// Start new transaction.
tx, err = dst.Begin(true)
if err != nil {
return err
}
size = 0
}
size += sz
// Create bucket on the root transaction if this is the first level.
nk := len(keys)
if nk == 0 {
bkt, err := tx.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Create buckets on subsequent levels, if necessary.
b := tx.Bucket(keys[0])
if nk > 1 {
for _, k := range keys[1:] {
b = b.Bucket(k)
}
}
// Fill the entire page for best compaction.
b.FillPercent = 1.0
// If there is no value then this is a bucket call.
if v == nil {
bkt, err := b.CreateBucket(k)
if err != nil {
return err
}
if err := bkt.SetSequence(seq); err != nil {
return err
}
return nil
}
// Otherwise treat it as a key/value pair.
return b.Put(k, v)
}); err != nil {
return err
}
err = tx.Commit()
return err
}
// walkFunc is the type of the function called for keys (buckets and "normal"
// values) discovered by Walk. keys is the list of keys to descend to the bucket
// owning the discovered key/value pair k/v.
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
// walk walks recursively the bolt database db, calling walkFn for each key it finds.
func walk(db *DB, walkFn walkFunc) error {
return db.View(func(tx *Tx) error {
return tx.ForEach(func(name []byte, b *Bucket) error {
return walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
})
})
}
func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
// Execute callback.
if err := fn(keypath, k, v, seq); err != nil {
return err
}
// If this is not a bucket then stop.
if v != nil {
return nil
}
// Iterate over each child key/value.
keypath = append(keypath, k)
return b.ForEach(func(k, v []byte) error {
if v == nil {
bkt := b.Bucket(k)
return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
}
return walkBucket(b, keypath, k, v, b.Sequence(), fn)
})
}

432
vendor/go.etcd.io/bbolt/cursor.go generated vendored Normal file
View File

@@ -0,0 +1,432 @@
package bbolt
import (
"bytes"
"fmt"
"sort"
"go.etcd.io/bbolt/errors"
"go.etcd.io/bbolt/internal/common"
)
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket
// in lexicographical order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
//
// Keys and values returned from the cursor are only valid for the life of the transaction.
//
// Changing data while traversing with a cursor may cause it to be invalidated
// and return unexpected keys and/or values. You must reposition your cursor
// after mutating data.
type Cursor struct {
bucket *Bucket
stack []elemRef
}
// Bucket returns the bucket that this cursor was created from.
func (c *Cursor) Bucket() *Bucket {
return c.bucket
}
// First moves the cursor to the first item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) {
common.Assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.first()
if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.RootPage())
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.goToFirstElementOnTheStack()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
c.next()
}
k, v, flags := c.keyValue()
if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil, flags
}
return k, v, flags
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Last() (key []byte, value []byte) {
common.Assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.RootPage())
ref := elemRef{page: p, node: n}
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
// If this is an empty page (calling Delete may result in empty pages)
// we call prev to find the last page that is not empty
for len(c.stack) > 1 && c.stack[len(c.stack)-1].count() == 0 {
c.prev()
}
if len(c.stack) == 0 {
return nil, nil
}
k, v, flags := c.keyValue()
if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Next moves the cursor to the next item in the bucket and returns its key and value.
// If the cursor is at the end of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Next() (key []byte, value []byte) {
common.Assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.next()
if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
common.Assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.prev()
if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Seek moves the cursor to a given key using a b-tree search and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
common.Assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one.
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
k, v, flags = c.next()
}
if k == nil {
return nil, nil
} else if (flags & uint32(common.BucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Delete removes the current key/value under the cursor from the bucket.
// Delete fails if current key/value is a bucket or if the transaction is not writable.
func (c *Cursor) Delete() error {
if c.bucket.tx.db == nil {
return errors.ErrTxClosed
} else if !c.bucket.Writable() {
return errors.ErrTxNotWritable
}
key, _, flags := c.keyValue()
// Return an error if current value is a bucket.
if (flags & common.BucketLeafFlag) != 0 {
return errors.ErrIncompatibleValue
}
c.node().del(key)
return nil
}
// seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.RootPage())
// If this is a bucket then return a nil value.
return c.keyValue()
}
// first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) goToFirstElementOnTheStack() {
for {
// Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the first element to the stack.
var pgId common.Pgid
if ref.node != nil {
pgId = ref.node.inodes[ref.index].Pgid()
} else {
pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
}
p, n := c.bucket.pageNode(pgId)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
// last moves the cursor to the last leaf element under the last page in the stack.
func (c *Cursor) last() {
for {
// Exit when we hit a leaf page.
ref := &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the last element in the stack.
var pgId common.Pgid
if ref.node != nil {
pgId = ref.node.inodes[ref.index].Pgid()
} else {
pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
}
p, n := c.bucket.pageNode(pgId)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
c.stack = append(c.stack, nextRef)
}
}
// next moves to the next leaf element and returns the key and value.
// If the cursor is at the last leaf element then it stays there and returns nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
for {
// Attempt to move over one element until we're successful.
// Move up the stack as we hit the end of each page in our stack.
var i int
for i = len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index < elem.count()-1 {
elem.index++
break
}
}
// If we've hit the root page then stop and return. This will leave the
// cursor on the last element of the last page.
if i == -1 {
return nil, nil, 0
}
// Otherwise start from where we left off in the stack and find the
// first element of the first leaf page.
c.stack = c.stack[:i+1]
c.goToFirstElementOnTheStack()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
}
return c.keyValue()
}
}
// prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
// If we've hit the beginning, we should stop moving the cursor,
// and stay at the first element, so that users can continue to
// iterate over the elements in reverse direction by calling `Next`.
// We should return nil in such case.
// Refer to https://github.com/etcd-io/bbolt/issues/733
if len(c.stack) == 1 {
c.first()
return nil, nil, 0
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil, 0
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
return c.keyValue()
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgId common.Pgid) {
p, n := c.bucket.pageNode(pgId)
if p != nil && !p.IsBranchPage() && !p.IsLeafPage() {
panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags()))
}
e := elemRef{page: p, node: n}
c.stack = append(c.stack, e)
// If we're on a leaf page/node then find the specific node.
if e.isLeaf() {
c.nsearch(key)
return
}
if n != nil {
c.searchNode(key, n)
return
}
c.searchPage(key, p)
}
func (c *Cursor) searchNode(key []byte, n *node) {
var exact bool
index := sort.Search(len(n.inodes), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(n.inodes[i].Key(), key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, n.inodes[index].Pgid())
}
func (c *Cursor) searchPage(key []byte, p *common.Page) {
// Binary search for the correct range.
inodes := p.BranchPageElements()
var exact bool
index := sort.Search(int(p.Count()), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(inodes[i].Key(), key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, inodes[index].Pgid())
}
// nsearch searches the leaf node on the top of the stack for a key.
func (c *Cursor) nsearch(key []byte) {
e := &c.stack[len(c.stack)-1]
p, n := e.page, e.node
// If we have a node then search its inodes.
if n != nil {
index := sort.Search(len(n.inodes), func(i int) bool {
return bytes.Compare(n.inodes[i].Key(), key) != -1
})
e.index = index
return
}
// If we have a page then search its leaf elements.
inodes := p.LeafPageElements()
index := sort.Search(int(p.Count()), func(i int) bool {
return bytes.Compare(inodes[i].Key(), key) != -1
})
e.index = index
}
// keyValue returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
// If the cursor is pointing to the end of page/node then return nil.
if ref.count() == 0 || ref.index >= ref.count() {
return nil, nil, 0
}
// Retrieve value from node.
if ref.node != nil {
inode := &ref.node.inodes[ref.index]
return inode.Key(), inode.Value(), inode.Flags()
}
// Or retrieve value from page.
elem := ref.page.LeafPageElement(uint16(ref.index))
return elem.Key(), elem.Value(), elem.Flags()
}
// node returns the node that the cursor is currently positioned on.
func (c *Cursor) node() *node {
common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
// If the top of the stack is a leaf node then just return it.
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
return ref.node
}
// Start from root and traverse down the hierarchy.
var n = c.stack[0].node
if n == nil {
n = c.bucket.node(c.stack[0].page.Id(), nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
common.Assert(!n.isLeaf, "expected branch node")
n = n.childAt(ref.index)
}
common.Assert(n.isLeaf, "expected leaf node")
return n
}
// elemRef represents a reference to an element on a given page/node.
type elemRef struct {
page *common.Page
node *node
index int
}
// isLeaf returns whether the ref is pointing at a leaf page/node.
func (r *elemRef) isLeaf() bool {
if r.node != nil {
return r.node.isLeaf
}
return r.page.IsLeafPage()
}
// count returns the number of inodes or page elements.
func (r *elemRef) count() int {
if r.node != nil {
return len(r.node.inodes)
}
return int(r.page.Count())
}

1392
vendor/go.etcd.io/bbolt/db.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

40
vendor/go.etcd.io/bbolt/doc.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
/*
package bbolt implements a low-level key/value store in pure Go. It supports
fully serializable transactions, ACID semantics, and lock-free MVCC with
multiple readers and a single writer. Bolt can be used for projects that
want a simple data store without the need to add large dependencies such as
Postgres or MySQL.
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
optimized for fast read access and does not require recovery in the event of a
system crash. Transactions which have not finished committing will simply be
rolled back in the event of a crash.
The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux.
# Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is
a collection of unique keys that are associated with values.
Transactions provide either read-only or read-write access to the database.
Read-only transactions can retrieve key/value pairs and can use Cursors to
iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time.
# Caveats
The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and
values returned from Bolt cannot be changed. Writing to a read-only byte slice
will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic.
*/
package bbolt

108
vendor/go.etcd.io/bbolt/errors.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
package bbolt
import "go.etcd.io/bbolt/errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrInvalid = errors.ErrInvalid
// ErrInvalidMapping is returned when the database file fails to get mapped.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrInvalidMapping = errors.ErrInvalidMapping
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrVersionMismatch = errors.ErrVersionMismatch
// ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrChecksum = errors.ErrChecksum
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrTimeout = errors.ErrTimeout
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrTxNotWritable = errors.ErrTxNotWritable
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrTxClosed = errors.ErrTxClosed
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly
// ErrFreePagesNotLoaded is returned when a readonly transaction without
// preloading the free pages is trying to access the free pages.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrBucketNotFound = errors.ErrBucketNotFound
// ErrBucketExists is returned when creating a bucket that already exists.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrBucketExists = errors.ErrBucketExists
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrBucketNameRequired = errors.ErrBucketNameRequired
// ErrKeyRequired is returned when inserting a zero-length key.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrKeyRequired = errors.ErrKeyRequired
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrKeyTooLarge = errors.ErrKeyTooLarge
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrValueTooLarge = errors.ErrValueTooLarge
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
//
// Deprecated: Use the error variables defined in the bbolt/errors package.
ErrIncompatibleValue = errors.ErrIncompatibleValue
)

84
vendor/go.etcd.io/bbolt/errors/errors.go generated vendored Normal file
View File

@@ -0,0 +1,84 @@
// Package errors defines the error variables that may be returned
// during bbolt operations.
package errors
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrInvalidMapping is returned when the database file fails to get mapped.
ErrInvalidMapping = errors.New("database isn't correctly mapped")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
// ErrFreePagesNotLoaded is returned when a readonly transaction without
// preloading the free pages is trying to access the free pages.
ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying to create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
// ErrSameBuckets is returned when trying to move a sub-bucket between
// source and target buckets, while source and target buckets are the same.
ErrSameBuckets = errors.New("the source and target are the same bucket")
// ErrDifferentDB is returned when trying to move a sub-bucket between
// source and target buckets, while source and target buckets are in different database files.
ErrDifferentDB = errors.New("the source and target buckets are in different database files")
)

54
vendor/go.etcd.io/bbolt/internal/common/bucket.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package common
import (
"fmt"
"unsafe"
)
const BucketHeaderSize = int(unsafe.Sizeof(InBucket{}))
// InBucket represents the on-file representation of a bucket.
// This is stored as the "value" of a bucket key. If the bucket is small enough,
// then its root page can be stored inline in the "value", after the bucket
// header. In the case of inline buckets, the "root" will be 0.
type InBucket struct {
root Pgid // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
func NewInBucket(root Pgid, seq uint64) InBucket {
return InBucket{
root: root,
sequence: seq,
}
}
func (b *InBucket) RootPage() Pgid {
return b.root
}
func (b *InBucket) SetRootPage(id Pgid) {
b.root = id
}
// InSequence returns the sequence. The reason why not naming it `Sequence`
// is to avoid duplicated name as `(*Bucket) Sequence()`
func (b *InBucket) InSequence() uint64 {
return b.sequence
}
func (b *InBucket) SetInSequence(v uint64) {
b.sequence = v
}
func (b *InBucket) IncSequence() {
b.sequence++
}
func (b *InBucket) InlinePage(v []byte) *Page {
return (*Page)(unsafe.Pointer(&v[BucketHeaderSize]))
}
func (b *InBucket) String() string {
return fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
}

115
vendor/go.etcd.io/bbolt/internal/common/inode.go generated vendored Normal file
View File

@@ -0,0 +1,115 @@
package common
import "unsafe"
// Inode represents an internal node inside of a node.
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type Inode struct {
flags uint32
pgid Pgid
key []byte
value []byte
}
type Inodes []Inode
func (in *Inode) Flags() uint32 {
return in.flags
}
func (in *Inode) SetFlags(flags uint32) {
in.flags = flags
}
func (in *Inode) Pgid() Pgid {
return in.pgid
}
func (in *Inode) SetPgid(id Pgid) {
in.pgid = id
}
func (in *Inode) Key() []byte {
return in.key
}
func (in *Inode) SetKey(key []byte) {
in.key = key
}
func (in *Inode) Value() []byte {
return in.value
}
func (in *Inode) SetValue(value []byte) {
in.value = value
}
func ReadInodeFromPage(p *Page) Inodes {
inodes := make(Inodes, int(p.Count()))
isLeaf := p.IsLeafPage()
for i := 0; i < int(p.Count()); i++ {
inode := &inodes[i]
if isLeaf {
elem := p.LeafPageElement(uint16(i))
inode.SetFlags(elem.Flags())
inode.SetKey(elem.Key())
inode.SetValue(elem.Value())
} else {
elem := p.BranchPageElement(uint16(i))
inode.SetPgid(elem.Pgid())
inode.SetKey(elem.Key())
}
Assert(len(inode.Key()) > 0, "read: zero-length inode key")
}
return inodes
}
func WriteInodeToPage(inodes Inodes, p *Page) uint32 {
// Loop over each item and write it to the page.
// off tracks the offset into p of the start of the next data.
off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
isLeaf := p.IsLeafPage()
for i, item := range inodes {
Assert(len(item.Key()) > 0, "write: zero-length inode key")
// Create a slice to write into of needed size and advance
// byte pointer for next iteration.
sz := len(item.Key()) + len(item.Value())
b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
off += uintptr(sz)
// Write the page element.
if isLeaf {
elem := p.LeafPageElement(uint16(i))
elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
elem.SetFlags(item.Flags())
elem.SetKsize(uint32(len(item.Key())))
elem.SetVsize(uint32(len(item.Value())))
} else {
elem := p.BranchPageElement(uint16(i))
elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
elem.SetKsize(uint32(len(item.Key())))
elem.SetPgid(item.Pgid())
Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred")
}
// Write data for the element to the end of the page.
l := copy(b, item.Key())
copy(b[l:], item.Value())
}
return uint32(off)
}
func UsedSpaceInPage(inodes Inodes, p *Page) uint32 {
off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
for _, item := range inodes {
sz := len(item.Key()) + len(item.Value())
off += uintptr(sz)
}
return uint32(off)
}

161
vendor/go.etcd.io/bbolt/internal/common/meta.go generated vendored Normal file
View File

@@ -0,0 +1,161 @@
package common
import (
"fmt"
"hash/fnv"
"io"
"unsafe"
"go.etcd.io/bbolt/errors"
)
type Meta struct {
magic uint32
version uint32
pageSize uint32
flags uint32
root InBucket
freelist Pgid
pgid Pgid
txid Txid
checksum uint64
}
// Validate checks the marker bytes and version of the meta page to ensure it matches this binary.
func (m *Meta) Validate() error {
if m.magic != Magic {
return errors.ErrInvalid
} else if m.version != Version {
return errors.ErrVersionMismatch
} else if m.checksum != m.Sum64() {
return errors.ErrChecksum
}
return nil
}
// Copy copies one meta object to another.
func (m *Meta) Copy(dest *Meta) {
*dest = *m
}
// Write writes the meta onto a page.
func (m *Meta) Write(p *Page) {
if m.root.root >= m.pgid {
panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
} else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist {
// TODO: reject pgidNoFreeList if !NoFreelistSync
panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
}
// Page id is either going to be 0 or 1 which we can determine by the transaction ID.
p.id = Pgid(m.txid % 2)
p.SetFlags(MetaPageFlag)
// Calculate the checksum.
m.checksum = m.Sum64()
m.Copy(p.Meta())
}
// Sum64 generates the checksum for the meta.
func (m *Meta) Sum64() uint64 {
var h = fnv.New64a()
_, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
return h.Sum64()
}
func (m *Meta) Magic() uint32 {
return m.magic
}
func (m *Meta) SetMagic(v uint32) {
m.magic = v
}
func (m *Meta) Version() uint32 {
return m.version
}
func (m *Meta) SetVersion(v uint32) {
m.version = v
}
func (m *Meta) PageSize() uint32 {
return m.pageSize
}
func (m *Meta) SetPageSize(v uint32) {
m.pageSize = v
}
func (m *Meta) Flags() uint32 {
return m.flags
}
func (m *Meta) SetFlags(v uint32) {
m.flags = v
}
func (m *Meta) SetRootBucket(b InBucket) {
m.root = b
}
func (m *Meta) RootBucket() *InBucket {
return &m.root
}
func (m *Meta) Freelist() Pgid {
return m.freelist
}
func (m *Meta) SetFreelist(v Pgid) {
m.freelist = v
}
func (m *Meta) IsFreelistPersisted() bool {
return m.freelist != PgidNoFreelist
}
func (m *Meta) Pgid() Pgid {
return m.pgid
}
func (m *Meta) SetPgid(id Pgid) {
m.pgid = id
}
func (m *Meta) Txid() Txid {
return m.txid
}
func (m *Meta) SetTxid(id Txid) {
m.txid = id
}
func (m *Meta) IncTxid() {
m.txid += 1
}
func (m *Meta) DecTxid() {
m.txid -= 1
}
func (m *Meta) Checksum() uint64 {
return m.checksum
}
func (m *Meta) SetChecksum(v uint64) {
m.checksum = v
}
func (m *Meta) Print(w io.Writer) {
fmt.Fprintf(w, "Version: %d\n", m.version)
fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize)
fmt.Fprintf(w, "Flags: %08x\n", m.flags)
fmt.Fprintf(w, "Root: <pgid=%d>\n", m.root.root)
fmt.Fprintf(w, "Freelist: <pgid=%d>\n", m.freelist)
fmt.Fprintf(w, "HWM: <pgid=%d>\n", m.pgid)
fmt.Fprintf(w, "Txn ID: %d\n", m.txid)
fmt.Fprintf(w, "Checksum: %016x\n", m.checksum)
fmt.Fprintf(w, "\n")
}

391
vendor/go.etcd.io/bbolt/internal/common/page.go generated vendored Normal file
View File

@@ -0,0 +1,391 @@
package common
import (
"fmt"
"os"
"sort"
"unsafe"
)
const PageHeaderSize = unsafe.Sizeof(Page{})
const MinKeysPerPage = 2
const BranchPageElementSize = unsafe.Sizeof(branchPageElement{})
const LeafPageElementSize = unsafe.Sizeof(leafPageElement{})
const pgidSize = unsafe.Sizeof(Pgid(0))
const (
BranchPageFlag = 0x01
LeafPageFlag = 0x02
MetaPageFlag = 0x04
FreelistPageFlag = 0x10
)
const (
BucketLeafFlag = 0x01
)
type Pgid uint64
type Page struct {
id Pgid
flags uint16
count uint16
overflow uint32
}
func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page {
return &Page{
id: id,
flags: flags,
count: count,
overflow: overflow,
}
}
// Typ returns a human-readable page type string used for debugging.
func (p *Page) Typ() string {
if p.IsBranchPage() {
return "branch"
} else if p.IsLeafPage() {
return "leaf"
} else if p.IsMetaPage() {
return "meta"
} else if p.IsFreelistPage() {
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
func (p *Page) IsBranchPage() bool {
return p.flags == BranchPageFlag
}
func (p *Page) IsLeafPage() bool {
return p.flags == LeafPageFlag
}
func (p *Page) IsMetaPage() bool {
return p.flags == MetaPageFlag
}
func (p *Page) IsFreelistPage() bool {
return p.flags == FreelistPageFlag
}
// Meta returns a pointer to the metadata section of the page.
func (p *Page) Meta() *Meta {
return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
}
func (p *Page) FastCheck(id Pgid) {
Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
// Only one flag of page-type can be set.
Assert(p.IsBranchPage() ||
p.IsLeafPage() ||
p.IsMetaPage() ||
p.IsFreelistPage(),
"page %v: has unexpected type/flags: %x", p.id, p.flags)
}
// LeafPageElement retrieves the leaf node by index
func (p *Page) LeafPageElement(index uint16) *leafPageElement {
return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
LeafPageElementSize, int(index)))
}
// LeafPageElements retrieves a list of leaf nodes.
func (p *Page) LeafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
elems := unsafe.Slice((*leafPageElement)(data), int(p.count))
return elems
}
// BranchPageElement retrieves the branch node by index
func (p *Page) BranchPageElement(index uint16) *branchPageElement {
return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
unsafe.Sizeof(branchPageElement{}), int(index)))
}
// BranchPageElements retrieves a list of branch nodes.
func (p *Page) BranchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
elems := unsafe.Slice((*branchPageElement)(data), int(p.count))
return elems
}
func (p *Page) FreelistPageCount() (int, int) {
Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags))
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
var idx, count = 0, int(p.count)
if count == 0xFFFF {
idx = 1
c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
count = int(c)
if count < 0 {
panic(fmt.Sprintf("leading element count %d overflows int", c))
}
}
return idx, count
}
func (p *Page) FreelistPageIds() []Pgid {
Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags))
idx, count := p.FreelistPageCount()
if count == 0 {
return nil
}
data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx)
ids := unsafe.Slice((*Pgid)(data), count)
return ids
}
// dump writes n bytes of the page to STDERR as hex output.
func (p *Page) hexdump(n int) {
buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
fmt.Fprintf(os.Stderr, "%x\n", buf)
}
func (p *Page) PageElementSize() uintptr {
if p.IsLeafPage() {
return LeafPageElementSize
}
return BranchPageElementSize
}
func (p *Page) Id() Pgid {
return p.id
}
func (p *Page) SetId(target Pgid) {
p.id = target
}
func (p *Page) Flags() uint16 {
return p.flags
}
func (p *Page) SetFlags(v uint16) {
p.flags = v
}
func (p *Page) Count() uint16 {
return p.count
}
func (p *Page) SetCount(target uint16) {
p.count = target
}
func (p *Page) Overflow() uint32 {
return p.overflow
}
func (p *Page) SetOverflow(target uint32) {
p.overflow = target
}
func (p *Page) String() string {
return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow)
}
type Pages []*Page
func (s Pages) Len() int { return len(s) }
func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id }
// branchPageElement represents a node on a branch page.
type branchPageElement struct {
pos uint32
ksize uint32
pgid Pgid
}
func (n *branchPageElement) Pos() uint32 {
return n.pos
}
func (n *branchPageElement) SetPos(v uint32) {
n.pos = v
}
func (n *branchPageElement) Ksize() uint32 {
return n.ksize
}
func (n *branchPageElement) SetKsize(v uint32) {
n.ksize = v
}
func (n *branchPageElement) Pgid() Pgid {
return n.pgid
}
func (n *branchPageElement) SetPgid(v Pgid) {
n.pgid = v
}
// Key returns a byte slice of the node key.
func (n *branchPageElement) Key() []byte {
return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
}
// leafPageElement represents a node on a leaf page.
type leafPageElement struct {
flags uint32
pos uint32
ksize uint32
vsize uint32
}
func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement {
return &leafPageElement{
flags: flags,
pos: pos,
ksize: ksize,
vsize: vsize,
}
}
func (n *leafPageElement) Flags() uint32 {
return n.flags
}
func (n *leafPageElement) SetFlags(v uint32) {
n.flags = v
}
func (n *leafPageElement) Pos() uint32 {
return n.pos
}
func (n *leafPageElement) SetPos(v uint32) {
n.pos = v
}
func (n *leafPageElement) Ksize() uint32 {
return n.ksize
}
func (n *leafPageElement) SetKsize(v uint32) {
n.ksize = v
}
func (n *leafPageElement) Vsize() uint32 {
return n.vsize
}
func (n *leafPageElement) SetVsize(v uint32) {
n.vsize = v
}
// Key returns a byte slice of the node key.
func (n *leafPageElement) Key() []byte {
i := int(n.pos)
j := i + int(n.ksize)
return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
}
// Value returns a byte slice of the node value.
func (n *leafPageElement) Value() []byte {
i := int(n.pos) + int(n.ksize)
j := i + int(n.vsize)
return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
}
func (n *leafPageElement) IsBucketEntry() bool {
return n.flags&uint32(BucketLeafFlag) != 0
}
func (n *leafPageElement) Bucket() *InBucket {
if n.IsBucketEntry() {
return LoadBucket(n.Value())
} else {
return nil
}
}
// PageInfo represents human readable information about a page.
type PageInfo struct {
ID int
Type string
Count int
OverflowCount int
}
type Pgids []Pgid
func (s Pgids) Len() int { return len(s) }
func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s Pgids) Less(i, j int) bool { return s[i] < s[j] }
// Merge returns the sorted union of a and b.
func (a Pgids) Merge(b Pgids) Pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
merged := make(Pgids, len(a)+len(b))
Mergepgids(merged, a, b)
return merged
}
// Mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func Mergepgids(dst, a, b Pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
if b[0] < a[0] {
lead, follow = b, a
}
// Continue while there are elements in the lead.
for len(lead) > 0 {
// Merge largest prefix of lead that is ahead of follow[0].
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
merged = append(merged, lead[:n]...)
if n >= len(lead) {
break
}
// Swap lead and follow.
lead, follow = follow, lead[n:]
}
// Append what's left in follow.
_ = append(merged, follow...)
}

40
vendor/go.etcd.io/bbolt/internal/common/types.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
package common
import (
"os"
"runtime"
"time"
)
// MaxMmapStep is the largest step that can be taken when remapping the mmap.
const MaxMmapStep = 1 << 30 // 1GB
// Version represents the data file format version.
const Version uint32 = 2
// Magic represents a marker value to indicate that a file is a Bolt DB.
const Magic uint32 = 0xED0CDAED
const PgidNoFreelist Pgid = 0xffffffffffffffff
// DO NOT EDIT. Copied from the "bolt" package.
const pageMaxAllocSize = 0xFFFFFFF
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
// must be synchronized using the msync(2) syscall.
const IgnoreNoSync = runtime.GOOS == "openbsd"
// Default values if not set in a DB instance.
const (
DefaultMaxBatchSize int = 1000
DefaultMaxBatchDelay = 10 * time.Millisecond
DefaultAllocSize = 16 * 1024 * 1024
)
// DefaultPageSize is the default page size for db which is set to the OS page size.
var DefaultPageSize = os.Getpagesize()
// Txid represents the internal transaction identifier.
type Txid uint64

27
vendor/go.etcd.io/bbolt/internal/common/unsafe.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package common
import (
"unsafe"
)
func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset)
}
func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
}
func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
// See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
//
// This memory is not allocated from C, but it is unmanaged by Go's
// garbage collector and should behave similarly, and the compiler
// should produce similar code. Note that this conversion allows a
// subslice to begin after the base address, with an optional offset,
// while the URL above does not cover this case and only slices from
// index 0. However, the wiki never says that the address must be to
// the beginning of a C allocation (or even that malloc was used at
// all), so this is believed to be correct.
return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j]
}

64
vendor/go.etcd.io/bbolt/internal/common/utils.go generated vendored Normal file
View File

@@ -0,0 +1,64 @@
package common
import (
"fmt"
"io"
"os"
"unsafe"
)
func LoadBucket(buf []byte) *InBucket {
return (*InBucket)(unsafe.Pointer(&buf[0]))
}
func LoadPage(buf []byte) *Page {
return (*Page)(unsafe.Pointer(&buf[0]))
}
func LoadPageMeta(buf []byte) *Meta {
return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize]))
}
func CopyFile(srcPath, dstPath string) error {
// Ensure source file exists.
_, err := os.Stat(srcPath)
if os.IsNotExist(err) {
return fmt.Errorf("source file %q not found", srcPath)
} else if err != nil {
return err
}
// Ensure output file not exist.
_, err = os.Stat(dstPath)
if err == nil {
return fmt.Errorf("output file %q already exists", dstPath)
} else if !os.IsNotExist(err) {
return err
}
srcDB, err := os.Open(srcPath)
if err != nil {
return fmt.Errorf("failed to open source file %q: %w", srcPath, err)
}
defer srcDB.Close()
dstDB, err := os.Create(dstPath)
if err != nil {
return fmt.Errorf("failed to create output file %q: %w", dstPath, err)
}
defer dstDB.Close()
written, err := io.Copy(dstDB, srcDB)
if err != nil {
return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err)
}
srcFi, err := srcDB.Stat()
if err != nil {
return fmt.Errorf("failed to get source file info %q: %w", srcPath, err)
}
initialSize := srcFi.Size()
if initialSize != written {
return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize)
}
return nil
}

67
vendor/go.etcd.io/bbolt/internal/common/verify.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go
package common
import (
"fmt"
"os"
"strings"
)
const ENV_VERIFY = "BBOLT_VERIFY"
type VerificationType string
const (
ENV_VERIFY_VALUE_ALL VerificationType = "all"
ENV_VERIFY_VALUE_ASSERT VerificationType = "assert"
)
func getEnvVerify() string {
return strings.ToLower(os.Getenv(ENV_VERIFY))
}
func IsVerificationEnabled(verification VerificationType) bool {
env := getEnvVerify()
return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification))
}
// EnableVerifications sets `ENV_VERIFY` and returns a function that
// can be used to bring the original settings.
func EnableVerifications(verification VerificationType) func() {
previousEnv := getEnvVerify()
os.Setenv(ENV_VERIFY, string(verification))
return func() {
os.Setenv(ENV_VERIFY, previousEnv)
}
}
// EnableAllVerifications enables verification and returns a function
// that can be used to bring the original settings.
func EnableAllVerifications() func() {
return EnableVerifications(ENV_VERIFY_VALUE_ALL)
}
// DisableVerifications unsets `ENV_VERIFY` and returns a function that
// can be used to bring the original settings.
func DisableVerifications() func() {
previousEnv := getEnvVerify()
os.Unsetenv(ENV_VERIFY)
return func() {
os.Setenv(ENV_VERIFY, previousEnv)
}
}
// Verify performs verification if the assertions are enabled.
// In the default setup running in tests and skipped in the production code.
func Verify(f func()) {
if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) {
f()
}
}
// Assert will panic with a given formatted message if the given condition is false.
func Assert(condition bool, msg string, v ...any) {
if !condition {
panic(fmt.Sprintf("assertion failed: "+msg, v...))
}
}

108
vendor/go.etcd.io/bbolt/internal/freelist/array.go generated vendored Normal file
View File

@@ -0,0 +1,108 @@
package freelist
import (
"fmt"
"sort"
"go.etcd.io/bbolt/internal/common"
)
type array struct {
*shared
ids []common.Pgid // all free and available free page ids.
}
func (f *array) Init(ids common.Pgids) {
f.ids = ids
f.reindex()
}
func (f *array) Allocate(txid common.Txid, n int) common.Pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd common.Pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == common.Pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, initial+i)
}
f.allocs[initial] = txid
return initial
}
previd = id
}
return 0
}
func (f *array) FreeCount() int {
return len(f.ids)
}
func (f *array) freePageIds() common.Pgids {
return f.ids
}
func (f *array) mergeSpans(ids common.Pgids) {
sort.Sort(ids)
common.Verify(func() {
idsIdx := make(map[common.Pgid]struct{})
for _, id := range f.ids {
// The existing f.ids shouldn't have duplicated free ID.
if _, ok := idsIdx[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids))
}
idsIdx[id] = struct{}{}
}
prev := common.Pgid(0)
for _, id := range ids {
// The ids shouldn't have duplicated free ID. Note page 0 and 1
// are reserved for meta pages, so they can never be free page IDs.
if prev == id {
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
}
prev = id
// The ids shouldn't have any overlap with the existing f.ids.
if _, ok := idsIdx[id]; ok {
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids))
}
}
})
f.ids = common.Pgids(f.ids).Merge(ids)
}
func NewArrayFreelist() Interface {
a := &array{
shared: newShared(),
ids: []common.Pgid{},
}
a.Interface = a
return a
}

82
vendor/go.etcd.io/bbolt/internal/freelist/freelist.go generated vendored Normal file
View File

@@ -0,0 +1,82 @@
package freelist
import (
"go.etcd.io/bbolt/internal/common"
)
type ReadWriter interface {
// Read calls Init with the page ids stored in the given page.
Read(page *common.Page)
// Write writes the freelist into the given page.
Write(page *common.Page)
// EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write.
// This should never underestimate the size.
EstimatedWritePageSize() int
}
type Interface interface {
ReadWriter
// Init initializes this freelist with the given list of pages.
Init(ids common.Pgids)
// Allocate tries to allocate the given number of contiguous pages
// from the free list pages. It returns the starting page ID if
// available; otherwise, it returns 0.
Allocate(txid common.Txid, numPages int) common.Pgid
// Count returns the number of free and pending pages.
Count() int
// FreeCount returns the number of free pages.
FreeCount() int
// PendingCount returns the number of pending pages.
PendingCount() int
// AddReadonlyTXID adds a given read-only transaction id for pending page tracking.
AddReadonlyTXID(txid common.Txid)
// RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking.
RemoveReadonlyTXID(txid common.Txid)
// ReleasePendingPages releases any pages associated with closed read-only transactions.
ReleasePendingPages()
// Free releases a page and its overflow for a given transaction id.
// If the page is already free or is one of the meta pages, then a panic will occur.
Free(txId common.Txid, p *common.Page)
// Freed returns whether a given page is in the free list.
Freed(pgId common.Pgid) bool
// Rollback removes the pages from a given pending tx.
Rollback(txId common.Txid)
// Copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
Copyall(dst []common.Pgid)
// Reload reads the freelist from a page and filters out pending items.
Reload(p *common.Page)
// NoSyncReload reads the freelist from Pgids and filters out pending items.
NoSyncReload(pgIds common.Pgids)
// freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available.
freePageIds() common.Pgids
// pendingPageIds returns all pending pages by transaction id.
pendingPageIds() map[common.Txid]*txPending
// release moves all page ids for a transaction id (or older) to the freelist.
release(txId common.Txid)
// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
releaseRange(begin, end common.Txid)
// mergeSpans is merging the given pages into the freelist
mergeSpans(ids common.Pgids)
}

292
vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go generated vendored Normal file
View File

@@ -0,0 +1,292 @@
package freelist
import (
"fmt"
"reflect"
"sort"
"go.etcd.io/bbolt/internal/common"
)
// pidSet holds the set of starting pgids which have the same span size
type pidSet map[common.Pgid]struct{}
type hashMap struct {
*shared
freePagesCount uint64 // count of free pages(hashmap version)
freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size
backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size
}
func (f *hashMap) Init(pgids common.Pgids) {
// reset the counter when freelist init
f.freePagesCount = 0
f.freemaps = make(map[uint64]pidSet)
f.forwardMap = make(map[common.Pgid]uint64)
f.backwardMap = make(map[common.Pgid]uint64)
if len(pgids) == 0 {
return
}
if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
panic("pgids not sorted")
}
size := uint64(1)
start := pgids[0]
for i := 1; i < len(pgids); i++ {
// continuous page
if pgids[i] == pgids[i-1]+1 {
size++
} else {
f.addSpan(start, size)
size = 1
start = pgids[i]
}
}
// init the tail
if size != 0 && start != 0 {
f.addSpan(start, size)
}
f.reindex()
}
func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid {
if n == 0 {
return 0
}
// if we have a exact size match just return short path
if bm, ok := f.freemaps[uint64(n)]; ok {
for pid := range bm {
// remove the span
f.delSpan(pid, uint64(n))
f.allocs[pid] = txid
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, pid+i)
}
return pid
}
}
// lookup the map to find larger span
for size, bm := range f.freemaps {
if size < uint64(n) {
continue
}
for pid := range bm {
// remove the initial
f.delSpan(pid, size)
f.allocs[pid] = txid
remain := size - uint64(n)
// add remain span
f.addSpan(pid+common.Pgid(n), remain)
for i := common.Pgid(0); i < common.Pgid(n); i++ {
delete(f.cache, pid+i)
}
return pid
}
}
return 0
}
func (f *hashMap) FreeCount() int {
common.Verify(func() {
expectedFreePageCount := f.hashmapFreeCountSlow()
common.Assert(int(f.freePagesCount) == expectedFreePageCount,
"freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount)
})
return int(f.freePagesCount)
}
func (f *hashMap) freePageIds() common.Pgids {
count := f.FreeCount()
if count == 0 {
return common.Pgids{}
}
m := make([]common.Pgid, 0, count)
startPageIds := make([]common.Pgid, 0, len(f.forwardMap))
for k := range f.forwardMap {
startPageIds = append(startPageIds, k)
}
sort.Sort(common.Pgids(startPageIds))
for _, start := range startPageIds {
if size, ok := f.forwardMap[start]; ok {
for i := 0; i < int(size); i++ {
m = append(m, start+common.Pgid(i))
}
}
}
return m
}
func (f *hashMap) hashmapFreeCountSlow() int {
count := 0
for _, size := range f.forwardMap {
count += int(size)
}
return count
}
func (f *hashMap) addSpan(start common.Pgid, size uint64) {
f.backwardMap[start-1+common.Pgid(size)] = size
f.forwardMap[start] = size
if _, ok := f.freemaps[size]; !ok {
f.freemaps[size] = make(map[common.Pgid]struct{})
}
f.freemaps[size][start] = struct{}{}
f.freePagesCount += size
}
func (f *hashMap) delSpan(start common.Pgid, size uint64) {
delete(f.forwardMap, start)
delete(f.backwardMap, start+common.Pgid(size-1))
delete(f.freemaps[size], start)
if len(f.freemaps[size]) == 0 {
delete(f.freemaps, size)
}
f.freePagesCount -= size
}
func (f *hashMap) mergeSpans(ids common.Pgids) {
common.Verify(func() {
ids1Freemap := f.idsFromFreemaps()
ids2Forward := f.idsFromForwardMap()
ids3Backward := f.idsFromBackwardMap()
if !reflect.DeepEqual(ids1Freemap, ids2Forward) {
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap))
}
if !reflect.DeepEqual(ids1Freemap, ids3Backward) {
panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap))
}
sort.Sort(ids)
prev := common.Pgid(0)
for _, id := range ids {
// The ids shouldn't have duplicated free ID.
if prev == id {
panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
}
prev = id
// The ids shouldn't have any overlap with the existing f.freemaps.
if _, ok := ids1Freemap[id]; ok {
panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps))
}
}
})
for _, id := range ids {
// try to see if we can merge and update
f.mergeWithExistingSpan(id)
}
}
// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) {
prev := pid - 1
next := pid + 1
preSize, mergeWithPrev := f.backwardMap[prev]
nextSize, mergeWithNext := f.forwardMap[next]
newStart := pid
newSize := uint64(1)
if mergeWithPrev {
//merge with previous span
start := prev + 1 - common.Pgid(preSize)
f.delSpan(start, preSize)
newStart -= common.Pgid(preSize)
newSize += preSize
}
if mergeWithNext {
// merge with next span
f.delSpan(next, nextSize)
newSize += nextSize
}
f.addSpan(newStart, newSize)
}
// idsFromFreemaps get all free page IDs from f.freemaps.
// used by test only.
func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for size, idSet := range f.freemaps {
for start := range idSet {
for i := 0; i < int(size); i++ {
id := start + common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps))
}
ids[id] = struct{}{}
}
}
}
return ids
}
// idsFromForwardMap get all free page IDs from f.forwardMap.
// used by test only.
func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for start, size := range f.forwardMap {
for i := 0; i < int(size); i++ {
id := start + common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap))
}
ids[id] = struct{}{}
}
}
return ids
}
// idsFromBackwardMap get all free page IDs from f.backwardMap.
// used by test only.
func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} {
ids := make(map[common.Pgid]struct{})
for end, size := range f.backwardMap {
for i := 0; i < int(size); i++ {
id := end - common.Pgid(i)
if _, ok := ids[id]; ok {
panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap))
}
ids[id] = struct{}{}
}
}
return ids
}
func NewHashMapFreelist() Interface {
hm := &hashMap{
shared: newShared(),
freemaps: make(map[uint64]pidSet),
forwardMap: make(map[common.Pgid]uint64),
backwardMap: make(map[common.Pgid]uint64),
}
hm.Interface = hm
return hm
}

310
vendor/go.etcd.io/bbolt/internal/freelist/shared.go generated vendored Normal file
View File

@@ -0,0 +1,310 @@
package freelist
import (
"fmt"
"math"
"sort"
"unsafe"
"go.etcd.io/bbolt/internal/common"
)
type txPending struct {
ids []common.Pgid
alloctx []common.Txid // txids allocating the ids
lastReleaseBegin common.Txid // beginning txid of last matching releaseRange
}
type shared struct {
Interface
readonlyTXIDs []common.Txid // all readonly transaction IDs.
allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid.
cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids.
pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx.
}
func newShared() *shared {
return &shared{
pending: make(map[common.Txid]*txPending),
allocs: make(map[common.Pgid]common.Txid),
cache: make(map[common.Pgid]struct{}),
}
}
func (t *shared) pendingPageIds() map[common.Txid]*txPending {
return t.pending
}
func (t *shared) PendingCount() int {
var count int
for _, txp := range t.pending {
count += len(txp.ids)
}
return count
}
func (t *shared) Count() int {
return t.FreeCount() + t.PendingCount()
}
func (t *shared) Freed(pgId common.Pgid) bool {
_, ok := t.cache[pgId]
return ok
}
func (t *shared) Free(txid common.Txid, p *common.Page) {
if p.Id() <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id()))
}
// Free page and all its overflow pages.
txp := t.pending[txid]
if txp == nil {
txp = &txPending{}
t.pending[txid] = txp
}
allocTxid, ok := t.allocs[p.Id()]
common.Verify(func() {
if allocTxid == txid {
panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid))
}
})
if ok {
delete(t.allocs, p.Id())
}
for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ {
// Verify that page is not already free.
if _, ok := t.cache[id]; ok {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
txp.ids = append(txp.ids, id)
txp.alloctx = append(txp.alloctx, allocTxid)
t.cache[id] = struct{}{}
}
}
func (t *shared) Rollback(txid common.Txid) {
// Remove page ids from cache.
txp := t.pending[txid]
if txp == nil {
return
}
for i, pgid := range txp.ids {
delete(t.cache, pgid)
tx := txp.alloctx[i]
if tx == 0 {
continue
}
if tx != txid {
// Pending free aborted; restore page back to alloc list.
t.allocs[pgid] = tx
} else {
// A writing TXN should never free a page which was allocated by itself.
panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid))
}
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(t.pending, txid)
// Remove pgids which are allocated by this txid
for pgid, tid := range t.allocs {
if tid == txid {
delete(t.allocs, pgid)
}
}
}
func (t *shared) AddReadonlyTXID(tid common.Txid) {
t.readonlyTXIDs = append(t.readonlyTXIDs, tid)
}
func (t *shared) RemoveReadonlyTXID(tid common.Txid) {
for i := range t.readonlyTXIDs {
if t.readonlyTXIDs[i] == tid {
last := len(t.readonlyTXIDs) - 1
t.readonlyTXIDs[i] = t.readonlyTXIDs[last]
t.readonlyTXIDs = t.readonlyTXIDs[:last]
break
}
}
}
type txIDx []common.Txid
func (t txIDx) Len() int { return len(t) }
func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
func (t txIDx) Less(i, j int) bool { return t[i] < t[j] }
func (t *shared) ReleasePendingPages() {
// Free all pending pages prior to the earliest open transaction.
sort.Sort(txIDx(t.readonlyTXIDs))
minid := common.Txid(math.MaxUint64)
if len(t.readonlyTXIDs) > 0 {
minid = t.readonlyTXIDs[0]
}
if minid > 0 {
t.release(minid - 1)
}
// Release unused txid extents.
for _, tid := range t.readonlyTXIDs {
t.releaseRange(minid, tid-1)
minid = tid + 1
}
t.releaseRange(minid, common.Txid(math.MaxUint64))
// Any page both allocated and freed in an extent is safe to release.
}
func (t *shared) release(txid common.Txid) {
m := make(common.Pgids, 0)
for tid, txp := range t.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, txp.ids...)
delete(t.pending, tid)
}
}
t.mergeSpans(m)
}
func (t *shared) releaseRange(begin, end common.Txid) {
if begin > end {
return
}
m := common.Pgids{}
for tid, txp := range t.pending {
if tid < begin || tid > end {
continue
}
// Don't recompute freed pages if ranges haven't updated.
if txp.lastReleaseBegin == begin {
continue
}
for i := 0; i < len(txp.ids); i++ {
if atx := txp.alloctx[i]; atx < begin || atx > end {
continue
}
m = append(m, txp.ids[i])
txp.ids[i] = txp.ids[len(txp.ids)-1]
txp.ids = txp.ids[:len(txp.ids)-1]
txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
i--
}
txp.lastReleaseBegin = begin
if len(txp.ids) == 0 {
delete(t.pending, tid)
}
}
t.mergeSpans(m)
}
// Copyall copies a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (t *shared) Copyall(dst []common.Pgid) {
m := make(common.Pgids, 0, t.PendingCount())
for _, txp := range t.pendingPageIds() {
m = append(m, txp.ids...)
}
sort.Sort(m)
common.Mergepgids(dst, t.freePageIds(), m)
}
func (t *shared) Reload(p *common.Page) {
t.Read(p)
t.NoSyncReload(t.freePageIds())
}
func (t *shared) NoSyncReload(pgIds common.Pgids) {
// Build a cache of only pending pages.
pcache := make(map[common.Pgid]bool)
for _, txp := range t.pending {
for _, pendingID := range txp.ids {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
a := []common.Pgid{}
for _, id := range pgIds {
if !pcache[id] {
a = append(a, id)
}
}
t.Init(a)
}
// reindex rebuilds the free cache based on available and pending free lists.
func (t *shared) reindex() {
free := t.freePageIds()
pending := t.pendingPageIds()
t.cache = make(map[common.Pgid]struct{}, len(free))
for _, id := range free {
t.cache[id] = struct{}{}
}
for _, txp := range pending {
for _, pendingID := range txp.ids {
t.cache[pendingID] = struct{}{}
}
}
}
func (t *shared) Read(p *common.Page) {
if !p.IsFreelistPage() {
panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ()))
}
ids := p.FreelistPageIds()
// Copy the list of page ids from the freelist.
if len(ids) == 0 {
t.Init([]common.Pgid{})
} else {
// copy the ids, so we don't modify on the freelist page directly
idsCopy := make([]common.Pgid, len(ids))
copy(idsCopy, ids)
// Make sure they're sorted.
sort.Sort(common.Pgids(idsCopy))
t.Init(idsCopy)
}
}
func (t *shared) EstimatedWritePageSize() int {
n := t.Count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n)
}
func (t *shared) Write(p *common.Page) {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.SetFlags(common.FreelistPageFlag)
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
l := t.Count()
if l == 0 {
p.SetCount(uint16(l))
} else if l < 0xFFFF {
p.SetCount(uint16(l))
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
ids := unsafe.Slice((*common.Pgid)(data), l)
t.Copyall(ids)
} else {
p.SetCount(0xFFFF)
data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
ids := unsafe.Slice((*common.Pgid)(data), l+1)
ids[0] = common.Pgid(l)
t.Copyall(ids[1:])
}
}

113
vendor/go.etcd.io/bbolt/logger.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
package bbolt
// See https://github.com/etcd-io/raft/blob/main/logger.go
import (
"fmt"
"io"
"log"
"os"
)
type Logger interface {
Debug(v ...interface{})
Debugf(format string, v ...interface{})
Error(v ...interface{})
Errorf(format string, v ...interface{})
Info(v ...interface{})
Infof(format string, v ...interface{})
Warning(v ...interface{})
Warningf(format string, v ...interface{})
Fatal(v ...interface{})
Fatalf(format string, v ...interface{})
Panic(v ...interface{})
Panicf(format string, v ...interface{})
}
func getDiscardLogger() Logger {
return discardLogger
}
var (
discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)}
)
const (
calldepth = 2
)
// DefaultLogger is a default implementation of the Logger interface.
type DefaultLogger struct {
*log.Logger
debug bool
}
func (l *DefaultLogger) EnableTimestamps() {
l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
}
func (l *DefaultLogger) EnableDebug() {
l.debug = true
}
func (l *DefaultLogger) Debug(v ...interface{}) {
if l.debug {
_ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
}
}
func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
if l.debug {
_ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
}
}
func (l *DefaultLogger) Info(v ...interface{}) {
_ = l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
}
func (l *DefaultLogger) Infof(format string, v ...interface{}) {
_ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
}
func (l *DefaultLogger) Error(v ...interface{}) {
_ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
}
func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
_ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
}
func (l *DefaultLogger) Warning(v ...interface{}) {
_ = l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
}
func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
_ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
}
func (l *DefaultLogger) Fatal(v ...interface{}) {
_ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
os.Exit(1)
}
func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
_ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
os.Exit(1)
}
func (l *DefaultLogger) Panic(v ...interface{}) {
l.Logger.Panic(v...)
}
func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
l.Logger.Panicf(format, v...)
}
func header(lvl, msg string) string {
return fmt.Sprintf("%s: %s", lvl, msg)
}

36
vendor/go.etcd.io/bbolt/mlock_unix.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
//go:build !windows
package bbolt
import "golang.org/x/sys/unix"
// mlock locks memory of db file
func mlock(db *DB, fileSize int) error {
sizeToLock := fileSize
if sizeToLock > db.datasz {
// Can't lock more than mmaped slice
sizeToLock = db.datasz
}
if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil {
return err
}
return nil
}
// munlock unlocks memory of db file
func munlock(db *DB, fileSize int) error {
if db.dataref == nil {
return nil
}
sizeToUnlock := fileSize
if sizeToUnlock > db.datasz {
// Can't unlock more than mmaped slice
sizeToUnlock = db.datasz
}
if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil {
return err
}
return nil
}

11
vendor/go.etcd.io/bbolt/mlock_windows.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package bbolt
// mlock locks memory of db file
func mlock(_ *DB, _ int) error {
panic("mlock is supported only on UNIX systems")
}
// munlock unlocks memory of db file
func munlock(_ *DB, _ int) error {
panic("munlock is supported only on UNIX systems")
}

538
vendor/go.etcd.io/bbolt/node.go generated vendored Normal file
View File

@@ -0,0 +1,538 @@
package bbolt
import (
"bytes"
"fmt"
"sort"
"go.etcd.io/bbolt/internal/common"
)
// node represents an in-memory, deserialized page.
type node struct {
bucket *Bucket
isLeaf bool
unbalanced bool
spilled bool
key []byte
pgid common.Pgid
parent *node
children nodes
inodes common.Inodes
}
// root returns the top-level node this node is attached to.
func (n *node) root() *node {
if n.parent == nil {
return n
}
return n.parent.root()
}
// minKeys returns the minimum number of inodes this node should have.
func (n *node) minKeys() int {
if n.isLeaf {
return 1
}
return 2
}
// size returns the size of the node after serialization.
func (n *node) size() int {
sz, elsz := common.PageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
}
return int(sz)
}
// sizeLessThan returns true if the node is less than a given size.
// This is an optimization to avoid calculating a large node when we only need
// to know if it fits inside a certain page size.
func (n *node) sizeLessThan(v uintptr) bool {
sz, elsz := common.PageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
if sz >= v {
return false
}
}
return true
}
// pageElementSize returns the size of each page element based on the type of node.
func (n *node) pageElementSize() uintptr {
if n.isLeaf {
return common.LeafPageElementSize
}
return common.BranchPageElementSize
}
// childAt returns the child node at a given index.
func (n *node) childAt(index int) *node {
if n.isLeaf {
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
}
return n.bucket.node(n.inodes[index].Pgid(), n)
}
// childIndex returns the index of a given child node.
func (n *node) childIndex(child *node) int {
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 })
return index
}
// numChildren returns the number of children.
func (n *node) numChildren() int {
return len(n.inodes)
}
// nextSibling returns the next node with the same parent.
func (n *node) nextSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index >= n.parent.numChildren()-1 {
return nil
}
return n.parent.childAt(index + 1)
}
// prevSibling returns the previous node with the same parent.
func (n *node) prevSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index == 0 {
return nil
}
return n.parent.childAt(index - 1)
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) {
if pgId >= n.bucket.tx.meta.Pgid() {
panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid()))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
panic("put: zero-length new key")
}
// Find insertion index.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 })
// Add capacity and shift nodes if we don't have an exact match and need to insert.
exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey)
if !exact {
n.inodes = append(n.inodes, common.Inode{})
copy(n.inodes[index+1:], n.inodes[index:])
}
inode := &n.inodes[index]
inode.SetFlags(flags)
inode.SetKey(newKey)
inode.SetValue(value)
inode.SetPgid(pgId)
common.Assert(len(inode.Key()) > 0, "put: zero-length inode key")
}
// del removes a key from the node.
func (n *node) del(key []byte) {
// Find index of key.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 })
// Exit if the key isn't found.
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) {
return
}
// Delete inode from the node.
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
// Mark the node as needing rebalancing.
n.unbalanced = true
}
// read initializes the node from a page.
func (n *node) read(p *common.Page) {
n.pgid = p.Id()
n.isLeaf = p.IsLeafPage()
n.inodes = common.ReadInodeFromPage(p)
// Save first key, so we can find the node in the parent when we spill.
if len(n.inodes) > 0 {
n.key = n.inodes[0].Key()
common.Assert(len(n.key) > 0, "read: zero-length node key")
} else {
n.key = nil
}
}
// write writes the items onto one or more pages.
// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
// and the rest should be zeroed.
func (n *node) write(p *common.Page) {
common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page")
// Initialize page.
if n.isLeaf {
p.SetFlags(common.LeafPageFlag)
} else {
p.SetFlags(common.BranchPageFlag)
}
if len(n.inodes) >= 0xFFFF {
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id()))
}
p.SetCount(uint16(len(n.inodes)))
// Stop here if there are no items to write.
if p.Count() == 0 {
return
}
common.WriteInodeToPage(n.inodes, p)
// DEBUG ONLY: n.dump()
}
// split breaks up a node into multiple smaller nodes, if appropriate.
// This should only be called from the spill() function.
func (n *node) split(pageSize uintptr) []*node {
var nodes []*node
node := n
for {
// Split node into two.
a, b := node.splitTwo(pageSize)
nodes = append(nodes, a)
// If we can't split then exit the loop.
if b == nil {
break
}
// Set node to b so it gets split on the next iteration.
node = b
}
return nodes
}
// splitTwo breaks up a node into two smaller nodes, if appropriate.
// This should only be called from the split() function.
func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
// Ignore the split if the page doesn't have at least enough nodes for
// two pages or if the nodes can fit in a single page.
if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) {
return n, nil
}
// Determine the threshold before starting a new node.
var fillPercent = n.bucket.FillPercent
if fillPercent < minFillPercent {
fillPercent = minFillPercent
} else if fillPercent > maxFillPercent {
fillPercent = maxFillPercent
}
threshold := int(float64(pageSize) * fillPercent)
// Determine split position and sizes of the two pages.
splitIndex, _ := n.splitIndex(threshold)
// Split node into two separate nodes.
// If there's no parent then we'll need to create one.
if n.parent == nil {
n.parent = &node{bucket: n.bucket, children: []*node{n}}
}
// Create a new node and add it to the parent.
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
n.parent.children = append(n.parent.children, next)
// Split inodes across two nodes.
next.inodes = n.inodes[splitIndex:]
n.inodes = n.inodes[:splitIndex]
// Update the statistics.
n.bucket.tx.stats.IncSplit(1)
return n, next
}
// splitIndex finds the position where a page will fill a given threshold.
// It returns the index as well as the size of the first page.
// This is only be called from split().
func (n *node) splitIndex(threshold int) (index, sz uintptr) {
sz = common.PageHeaderSize
// Loop until we only have the minimum number of keys required for the second page.
for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ {
index = uintptr(i)
inode := n.inodes[i]
elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value()))
// If we have at least the minimum number of keys and adding another
// node would put us over the threshold then exit and return.
if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) {
break
}
// Add the element size to the total size.
sz += elsize
}
return
}
// spill writes the nodes to dirty pages and splits nodes as it goes.
// Returns an error if dirty pages cannot be allocated.
func (n *node) spill() error {
var tx = n.bucket.tx
if n.spilled {
return nil
}
// Spill child nodes first. Child nodes can materialize sibling nodes in
// the case of split-merge so we cannot use a range loop. We have to check
// the children size on every loop iteration.
sort.Sort(n.children)
for i := 0; i < len(n.children); i++ {
if err := n.children[i].spill(); err != nil {
return err
}
}
// We no longer need the child list because it's only used for spill tracking.
n.children = nil
// Split nodes into appropriate sizes. The first node will always be n.
var nodes = n.split(uintptr(tx.db.pageSize))
for _, node := range nodes {
// Add node's page to the freelist if it's not new.
if node.pgid > 0 {
tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid))
node.pgid = 0
}
// Allocate contiguous space for the node.
p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
if err != nil {
return err
}
// Write the node.
if p.Id() >= tx.meta.Pgid() {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid()))
}
node.pgid = p.Id()
node.write(p)
node.spilled = true
// Insert into parent inodes.
if node.parent != nil {
var key = node.key
if key == nil {
key = node.inodes[0].Key()
}
node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0)
node.key = node.inodes[0].Key()
common.Assert(len(node.key) > 0, "spill: zero-length node key")
}
// Update the statistics.
tx.stats.IncSpill(1)
}
// If the root node split and created a new root then we need to spill that
// as well. We'll clear out the children to make sure it doesn't try to respill.
if n.parent != nil && n.parent.pgid == 0 {
n.children = nil
return n.parent.spill()
}
return nil
}
// rebalance attempts to combine the node with sibling nodes if the node fill
// size is below a threshold or if there are not enough keys.
func (n *node) rebalance() {
if !n.unbalanced {
return
}
n.unbalanced = false
// Update statistics.
n.bucket.tx.stats.IncRebalance(1)
// Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys.
var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
// Root node has special handling.
if n.parent == nil {
// If root node is a branch and only has one node then collapse it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move root's child up.
child := n.bucket.node(n.inodes[0].Pgid(), n)
n.isLeaf = child.isLeaf
n.inodes = child.inodes[:]
n.children = child.children
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
child.parent = n
}
}
// Remove old child.
child.parent = nil
delete(n.bucket.nodes, child.pgid)
child.free()
}
return
}
// If node has no keys then just remove it.
if n.numChildren() == 0 {
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
n.parent.rebalance()
return
}
common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
// Merge with right sibling if idx == 0, otherwise left sibling.
var leftNode, rightNode *node
var useNextSibling = n.parent.childIndex(n) == 0
if useNextSibling {
leftNode = n
rightNode = n.nextSibling()
} else {
leftNode = n.prevSibling()
rightNode = n
}
// If both nodes are too small then merge them.
// Reparent all child nodes being moved.
for _, inode := range rightNode.inodes {
if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
child.parent.removeChild(child)
child.parent = leftNode
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes from right node to left node and remove right node.
leftNode.inodes = append(leftNode.inodes, rightNode.inodes...)
n.parent.del(rightNode.key)
n.parent.removeChild(rightNode)
delete(n.bucket.nodes, rightNode.pgid)
rightNode.free()
// Either this node or the sibling node was deleted from the parent so rebalance it.
n.parent.rebalance()
}
// removes a node from the list of in-memory children.
// This does not affect the inodes.
func (n *node) removeChild(target *node) {
for i, child := range n.children {
if child == target {
n.children = append(n.children[:i], n.children[i+1:]...)
return
}
}
}
// dereference causes the node to copy all its inode key/value references to heap memory.
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
func (n *node) dereference() {
if n.key != nil {
key := make([]byte, len(n.key))
copy(key, n.key)
n.key = key
common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
}
for i := range n.inodes {
inode := &n.inodes[i]
key := make([]byte, len(inode.Key()))
copy(key, inode.Key())
inode.SetKey(key)
common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key")
value := make([]byte, len(inode.Value()))
copy(value, inode.Value())
inode.SetValue(value)
}
// Recursively dereference children.
for _, child := range n.children {
child.dereference()
}
// Update statistics.
n.bucket.tx.stats.IncNodeDeref(1)
}
// free adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid))
n.pgid = 0
}
}
// dump writes the contents of the node to STDERR for debugging purposes.
/*
func (n *node) dump() {
// Write node header.
var typ = "branch"
if n.isLeaf {
typ = "leaf"
}
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
// Write out abbreviated version of each item.
for _, item := range n.inodes {
if n.isLeaf {
if item.flags&bucketLeafFlag != 0 {
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
} else {
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
}
} else {
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
}
}
warn("")
}
*/
func compareKeys(left, right []byte) int {
return bytes.Compare(left, right)
}
type nodes []*node
func (s nodes) Len() int { return len(s) }
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nodes) Less(i, j int) bool {
return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1
}

858
vendor/go.etcd.io/bbolt/tx.go generated vendored Normal file
View File

@@ -0,0 +1,858 @@
package bbolt
import (
"errors"
"fmt"
"io"
"os"
"runtime"
"sort"
"strings"
"sync/atomic"
"time"
"unsafe"
berrors "go.etcd.io/bbolt/errors"
"go.etcd.io/bbolt/internal/common"
)
// Tx represents a read-only or read/write transaction on the database.
// Read-only transactions can be used for retrieving values for keys and creating cursors.
// Read/write transactions can create and remove buckets and create and remove keys.
//
// IMPORTANT: You must commit or rollback transactions when you are done with
// them. Pages can not be reclaimed by the writer until no more transactions
// are using them. A long running read transaction can cause the database to
// quickly grow.
type Tx struct {
writable bool
managed bool
db *DB
meta *common.Meta
root Bucket
pages map[common.Pgid]*common.Page
stats TxStats
commitHandlers []func()
// WriteFlag specifies the flag for write-related methods like WriteTo().
// Tx opens the database file with the specified flag to copy the data.
//
// By default, the flag is unset, which works well for mostly in-memory
// workloads. For databases that are much larger than available RAM,
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
WriteFlag int
}
// init initializes the transaction.
func (tx *Tx) init(db *DB) {
tx.db = db
tx.pages = nil
// Copy the meta page since it can be changed by the writer.
tx.meta = &common.Meta{}
db.meta().Copy(tx.meta)
// Copy over the root bucket.
tx.root = newBucket(tx)
tx.root.InBucket = &common.InBucket{}
*tx.root.InBucket = *(tx.meta.RootBucket())
// Increment the transaction id and add a page cache for writable transactions.
if tx.writable {
tx.pages = make(map[common.Pgid]*common.Page)
tx.meta.IncTxid()
}
}
// ID returns the transaction id.
func (tx *Tx) ID() int {
if tx == nil || tx.meta == nil {
return -1
}
return int(tx.meta.Txid())
}
// DB returns a reference to the database that created the transaction.
func (tx *Tx) DB() *DB {
return tx.db
}
// Size returns current database size in bytes as seen by this transaction.
func (tx *Tx) Size() int64 {
return int64(tx.meta.Pgid()) * int64(tx.db.pageSize)
}
// Writable returns whether the transaction can perform write operations.
func (tx *Tx) Writable() bool {
return tx.writable
}
// Cursor creates a cursor associated with the root bucket.
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
// Stats retrieves a copy of the current transaction statistics.
func (tx *Tx) Stats() TxStats {
return tx.stats
}
// Inspect returns the structure of the database.
func (tx *Tx) Inspect() BucketStructure {
return tx.root.Inspect()
}
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name)
}
// CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name)
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name)
}
// DeleteBucket deletes a bucket.
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
func (tx *Tx) DeleteBucket(name []byte) error {
return tx.root.DeleteBucket(name)
}
// MoveBucket moves a sub-bucket from the source bucket to the destination bucket.
// Returns an error if
// 1. the sub-bucket cannot be found in the source bucket;
// 2. or the key already exists in the destination bucket;
// 3. the key represents a non-bucket value.
//
// If src is nil, it means moving a top level bucket into the target bucket.
// If dst is nil, it means converting the child bucket into a top level bucket.
func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error {
if src == nil {
src = &tx.root
}
if dst == nil {
dst = &tx.root
}
return src.MoveBucket(child, dst)
}
// ForEach executes a function for each bucket in the root.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
return fn(k, tx.root.Bucket(k))
})
}
// OnCommit adds a handler function to be executed after the transaction successfully commits.
func (tx *Tx) OnCommit(fn func()) {
tx.commitHandlers = append(tx.commitHandlers, fn)
}
// Commit writes all changes to disk, updates the meta page and closes the transaction.
// Returns an error if a disk write error occurs, or if Commit is
// called on a read-only transaction.
func (tx *Tx) Commit() (err error) {
txId := tx.ID()
lg := tx.db.Logger()
if lg != discardLogger {
lg.Debugf("Committing transaction %d", txId)
defer func() {
if err != nil {
lg.Errorf("Committing transaction failed: %v", err)
} else {
lg.Debugf("Committing transaction %d successfully", txId)
}
}()
}
common.Assert(!tx.managed, "managed tx commit not allowed")
if tx.db == nil {
return berrors.ErrTxClosed
} else if !tx.writable {
return berrors.ErrTxNotWritable
}
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
// Rebalance nodes which have had deletions.
var startTime = time.Now()
tx.root.rebalance()
if tx.stats.GetRebalance() > 0 {
tx.stats.IncRebalanceTime(time.Since(startTime))
}
opgid := tx.meta.Pgid()
// spill data onto dirty pages.
startTime = time.Now()
if err = tx.root.spill(); err != nil {
lg.Errorf("spilling data onto dirty pages failed: %v", err)
tx.rollback()
return err
}
tx.stats.IncSpillTime(time.Since(startTime))
// Free the old root bucket.
tx.meta.RootBucket().SetRootPage(tx.root.RootPage())
// Free the old freelist because commit writes out a fresh freelist.
if tx.meta.Freelist() != common.PgidNoFreelist {
tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist()))
}
if !tx.db.NoFreelistSync {
err = tx.commitFreelist()
if err != nil {
lg.Errorf("committing freelist failed: %v", err)
return err
}
} else {
tx.meta.SetFreelist(common.PgidNoFreelist)
}
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.Pgid() > opgid {
_ = errors.New("")
// gofail: var lackOfDiskSpace string
// tx.rollback()
// return errors.New(lackOfDiskSpace)
if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil {
lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err)
tx.rollback()
return err
}
}
// Write dirty pages to disk.
startTime = time.Now()
if err = tx.write(); err != nil {
lg.Errorf("writing data failed: %v", err)
tx.rollback()
return err
}
// If strict mode is enabled then perform a consistency check.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
for {
chkErr, ok := <-ch
if !ok {
break
}
errs = append(errs, chkErr.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
}
}
// Write meta to disk.
if err = tx.writeMeta(); err != nil {
lg.Errorf("writeMeta failed: %v", err)
tx.rollback()
return err
}
tx.stats.IncWriteTime(time.Since(startTime))
// Finalize the transaction.
tx.close()
// Execute commit handlers now that the locks have been removed.
for _, fn := range tx.commitHandlers {
fn()
}
return nil
}
func (tx *Tx) commitFreelist() error {
// Allocate new pages for the new free list. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
return err
}
tx.db.freelist.Write(p)
tx.meta.SetFreelist(p.Id())
return nil
}
// Rollback closes the transaction and ignores all previous updates. Read-only
// transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
common.Assert(!tx.managed, "managed tx rollback not allowed")
if tx.db == nil {
return berrors.ErrTxClosed
}
tx.nonPhysicalRollback()
return nil
}
// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
func (tx *Tx) nonPhysicalRollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.Rollback(tx.meta.Txid())
}
tx.close()
}
// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
func (tx *Tx) rollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.Rollback(tx.meta.Txid())
// When mmap fails, the `data`, `dataref` and `datasz` may be reset to
// zero values, and there is no way to reload free page IDs in this case.
if tx.db.data != nil {
if !tx.db.hasSyncedFreelist() {
// Reconstruct free page list by scanning the DB to get the whole free page list.
// Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode.
tx.db.freelist.NoSyncReload(tx.db.freepages())
} else {
// Read free page list from freelist page.
tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist()))
}
}
}
tx.close()
}
func (tx *Tx) close() {
if tx.db == nil {
return
}
if tx.writable {
// Grab freelist stats.
var freelistFreeN = tx.db.freelist.FreeCount()
var freelistPendingN = tx.db.freelist.PendingCount()
var freelistAlloc = tx.db.freelist.EstimatedWritePageSize()
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
// Merge statistics.
tx.db.statlock.Lock()
tx.db.stats.FreePageN = freelistFreeN
tx.db.stats.PendingPageN = freelistPendingN
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
tx.db.stats.FreelistInuse = freelistAlloc
tx.db.stats.TxStats.add(&tx.stats)
tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
// Clear all references.
tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
}
// Copy writes the entire database to a writer.
// This function exists for backwards compatibility.
//
// Deprecated: Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w)
return err
}
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader with WriteFlag
f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if err != nil {
return 0, err
}
defer func() {
if cerr := f.Close(); err == nil {
err = cerr
}
}()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*common.Page)(unsafe.Pointer(&buf[0]))
page.SetFlags(common.MetaPageFlag)
*page.Meta() = *tx.meta
// Write meta 0.
page.SetId(0)
page.Meta().SetChecksum(page.Meta().Sum64())
nn, err := w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.SetId(1)
page.Meta().DecTxid()
page.Meta().SetChecksum(page.Meta().Sum64())
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
// Copy data pages.
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
n += wn
if err != nil {
return n, err
}
return n, nil
}
// CopyFile copies the entire database to file at the given path.
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
_, err = tx.WriteTo(f)
if err != nil {
_ = f.Close()
return err
}
return f.Close()
}
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*common.Page, error) {
lg := tx.db.Logger()
p, err := tx.db.allocate(tx.meta.Txid(), count)
if err != nil {
lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err)
return nil, err
}
// Save to our page cache.
tx.pages[p.Id()] = p
// Update statistics.
tx.stats.IncPageCount(int64(count))
tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
return p, nil
}
// write writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
lg := tx.db.Logger()
pages := make(common.Pages, 0, len(tx.pages))
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[common.Pgid]*common.Page)
sort.Sort(pages)
// Write pages to disk in order.
for _, p := range pages {
rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize)
offset := int64(p.Id()) * int64(tx.db.pageSize)
var written uintptr
// Write out page in "max allocation" sized chunks.
for {
sz := rem
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
lg.Errorf("writeAt failed, offset: %d: %w", offset, err)
return err
}
// Update statistics.
tx.stats.IncWrite(1)
// Exit inner for loop if we've written all the chunks.
rem -= sz
if rem == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
written += uintptr(sz)
}
}
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || common.IgnoreNoSync {
// gofail: var beforeSyncDataPages struct{}
if err := fdatasync(tx.db); err != nil {
lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
return err
}
}
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.Overflow()) != 0 {
continue
}
buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf) //nolint:staticcheck
}
return nil
}
// writeMeta writes the meta to the disk.
func (tx *Tx) writeMeta() error {
// gofail: var beforeWriteMetaError string
// return errors.New(beforeWriteMetaError)
// Create a temporary buffer for the meta page.
lg := tx.db.Logger()
buf := make([]byte, tx.db.pageSize)
p := tx.db.pageInBuffer(buf, 0)
tx.meta.Write(p)
// Write the meta page to file.
if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil {
lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err)
return err
}
if !tx.db.NoSync || common.IgnoreNoSync {
// gofail: var beforeSyncMetaPage struct{}
if err := fdatasync(tx.db); err != nil {
lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
return err
}
}
// Update statistics.
tx.stats.IncWrite(1)
return nil
}
// page returns a reference to the page with a given id.
// If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id common.Pgid) *common.Page {
// Check the dirty pages first.
if tx.pages != nil {
if p, ok := tx.pages[id]; ok {
p.FastCheck(id)
return p
}
}
// Otherwise return directly from the mmap.
p := tx.db.page(id)
p.FastCheck(id)
return p
}
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
stack := make([]common.Pgid, 10)
stack[0] = pgidnum
tx.forEachPageInternal(stack[:1], fn)
}
func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
p := tx.page(pgidstack[len(pgidstack)-1])
// Execute function.
fn(p, len(pgidstack)-1, pgidstack)
// Recursively loop over children.
if p.IsBranchPage() {
for i := 0; i < int(p.Count()); i++ {
elem := p.BranchPageElement(uint16(i))
tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn)
}
}
}
// Page returns page information for a given page number.
// This is only safe for concurrent use when used by a writable transaction.
func (tx *Tx) Page(id int) (*common.PageInfo, error) {
if tx.db == nil {
return nil, berrors.ErrTxClosed
} else if common.Pgid(id) >= tx.meta.Pgid() {
return nil, nil
}
if tx.db.freelist == nil {
return nil, berrors.ErrFreePagesNotLoaded
}
// Build the page info.
p := tx.db.page(common.Pgid(id))
info := &common.PageInfo{
ID: id,
Count: int(p.Count()),
OverflowCount: int(p.Overflow()),
}
// Determine the type (or if it's free).
if tx.db.freelist.Freed(common.Pgid(id)) {
info.Type = "free"
} else {
info.Type = p.Typ()
}
return info, nil
}
// TxStats represents statistics about the actions performed by the transaction.
type TxStats struct {
// Page statistics.
//
// DEPRECATED: Use GetPageCount() or IncPageCount()
PageCount int64 // number of page allocations
// DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
PageAlloc int64 // total bytes allocated
// Cursor statistics.
//
// DEPRECATED: Use GetCursorCount() or IncCursorCount()
CursorCount int64 // number of cursors created
// Node statistics
//
// DEPRECATED: Use GetNodeCount() or IncNodeCount()
NodeCount int64 // number of node allocations
// DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
NodeDeref int64 // number of node dereferences
// Rebalance statistics.
//
// DEPRECATED: Use GetRebalance() or IncRebalance()
Rebalance int64 // number of node rebalances
// DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics.
//
// DEPRECATED: Use GetSplit() or IncSplit()
Split int64 // number of nodes split
// DEPRECATED: Use GetSpill() or IncSpill()
Spill int64 // number of nodes spilled
// DEPRECATED: Use GetSpillTime() or IncSpillTime()
SpillTime time.Duration // total time spent spilling
// Write statistics.
//
// DEPRECATED: Use GetWrite() or IncWrite()
Write int64 // number of writes performed
// DEPRECATED: Use GetWriteTime() or IncWriteTime()
WriteTime time.Duration // total time spent writing to disk
}
func (s *TxStats) add(other *TxStats) {
s.IncPageCount(other.GetPageCount())
s.IncPageAlloc(other.GetPageAlloc())
s.IncCursorCount(other.GetCursorCount())
s.IncNodeCount(other.GetNodeCount())
s.IncNodeDeref(other.GetNodeDeref())
s.IncRebalance(other.GetRebalance())
s.IncRebalanceTime(other.GetRebalanceTime())
s.IncSplit(other.GetSplit())
s.IncSpill(other.GetSpill())
s.IncSpillTime(other.GetSpillTime())
s.IncWrite(other.GetWrite())
s.IncWriteTime(other.GetWriteTime())
}
// Sub calculates and returns the difference between two sets of transaction stats.
// This is useful when obtaining stats at two different points and time and
// you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats
diff.PageCount = s.GetPageCount() - other.GetPageCount()
diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
diff.Rebalance = s.GetRebalance() - other.GetRebalance()
diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
diff.Split = s.GetSplit() - other.GetSplit()
diff.Spill = s.GetSpill() - other.GetSpill()
diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
diff.Write = s.GetWrite() - other.GetWrite()
diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
return diff
}
// GetPageCount returns PageCount atomically.
func (s *TxStats) GetPageCount() int64 {
return atomic.LoadInt64(&s.PageCount)
}
// IncPageCount increases PageCount atomically and returns the new value.
func (s *TxStats) IncPageCount(delta int64) int64 {
return atomic.AddInt64(&s.PageCount, delta)
}
// GetPageAlloc returns PageAlloc atomically.
func (s *TxStats) GetPageAlloc() int64 {
return atomic.LoadInt64(&s.PageAlloc)
}
// IncPageAlloc increases PageAlloc atomically and returns the new value.
func (s *TxStats) IncPageAlloc(delta int64) int64 {
return atomic.AddInt64(&s.PageAlloc, delta)
}
// GetCursorCount returns CursorCount atomically.
func (s *TxStats) GetCursorCount() int64 {
return atomic.LoadInt64(&s.CursorCount)
}
// IncCursorCount increases CursorCount atomically and return the new value.
func (s *TxStats) IncCursorCount(delta int64) int64 {
return atomic.AddInt64(&s.CursorCount, delta)
}
// GetNodeCount returns NodeCount atomically.
func (s *TxStats) GetNodeCount() int64 {
return atomic.LoadInt64(&s.NodeCount)
}
// IncNodeCount increases NodeCount atomically and returns the new value.
func (s *TxStats) IncNodeCount(delta int64) int64 {
return atomic.AddInt64(&s.NodeCount, delta)
}
// GetNodeDeref returns NodeDeref atomically.
func (s *TxStats) GetNodeDeref() int64 {
return atomic.LoadInt64(&s.NodeDeref)
}
// IncNodeDeref increases NodeDeref atomically and returns the new value.
func (s *TxStats) IncNodeDeref(delta int64) int64 {
return atomic.AddInt64(&s.NodeDeref, delta)
}
// GetRebalance returns Rebalance atomically.
func (s *TxStats) GetRebalance() int64 {
return atomic.LoadInt64(&s.Rebalance)
}
// IncRebalance increases Rebalance atomically and returns the new value.
func (s *TxStats) IncRebalance(delta int64) int64 {
return atomic.AddInt64(&s.Rebalance, delta)
}
// GetRebalanceTime returns RebalanceTime atomically.
func (s *TxStats) GetRebalanceTime() time.Duration {
return atomicLoadDuration(&s.RebalanceTime)
}
// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.RebalanceTime, delta)
}
// GetSplit returns Split atomically.
func (s *TxStats) GetSplit() int64 {
return atomic.LoadInt64(&s.Split)
}
// IncSplit increases Split atomically and returns the new value.
func (s *TxStats) IncSplit(delta int64) int64 {
return atomic.AddInt64(&s.Split, delta)
}
// GetSpill returns Spill atomically.
func (s *TxStats) GetSpill() int64 {
return atomic.LoadInt64(&s.Spill)
}
// IncSpill increases Spill atomically and returns the new value.
func (s *TxStats) IncSpill(delta int64) int64 {
return atomic.AddInt64(&s.Spill, delta)
}
// GetSpillTime returns SpillTime atomically.
func (s *TxStats) GetSpillTime() time.Duration {
return atomicLoadDuration(&s.SpillTime)
}
// IncSpillTime increases SpillTime atomically and returns the new value.
func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.SpillTime, delta)
}
// GetWrite returns Write atomically.
func (s *TxStats) GetWrite() int64 {
return atomic.LoadInt64(&s.Write)
}
// IncWrite increases Write atomically and returns the new value.
func (s *TxStats) IncWrite(delta int64) int64 {
return atomic.AddInt64(&s.Write, delta)
}
// GetWriteTime returns WriteTime atomically.
func (s *TxStats) GetWriteTime() time.Duration {
return atomicLoadDuration(&s.WriteTime)
}
// IncWriteTime increases WriteTime atomically and returns the new value.
func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
return atomicAddDuration(&s.WriteTime, delta)
}
func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
}
func atomicLoadDuration(ptr *time.Duration) time.Duration {
return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
}

290
vendor/go.etcd.io/bbolt/tx_check.go generated vendored Normal file
View File

@@ -0,0 +1,290 @@
package bbolt
import (
"encoding/hex"
"fmt"
"go.etcd.io/bbolt/internal/common"
)
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
//
// It also allows users to provide a customized `KVStringer` implementation,
// so that bolt can generate human-readable diagnostic messages.
func (tx *Tx) Check(options ...CheckOption) <-chan error {
chkConfig := checkConfig{
kvStringer: HexKVStringer(),
}
for _, op := range options {
op(&chkConfig)
}
ch := make(chan error)
go func() {
// Close the channel to signal completion.
defer close(ch)
tx.check(chkConfig, ch)
}()
return ch
}
func (tx *Tx) check(cfg checkConfig, ch chan error) {
// Force loading free list if opened in ReadOnly mode.
tx.db.loadFreelist()
// Check if any pages are double freed.
freed := make(map[common.Pgid]bool)
all := make([]common.Pgid, tx.db.freelist.Count())
tx.db.freelist.Copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[common.Pgid]*common.Page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
if tx.meta.Freelist() != common.PgidNoFreelist {
for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ {
reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist())
}
}
if cfg.pageId == 0 {
// Check the whole db file, starting from the root bucket and
// recursively check all child buckets.
tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := common.Pgid(0); i < tx.meta.Pgid(); i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
} else {
// Check the db file starting from a specified pageId.
if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) {
ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid())
return
}
tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch)
}
}
func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
kvStringer KVStringer, ch chan error) {
tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch)
tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch)
}
func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
kvStringer KVStringer, ch chan error) {
p := tx.page(pageId)
switch {
case p.IsBranchPage():
for i := range p.BranchPageElements() {
elem := p.BranchPageElement(uint16(i))
tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch)
}
case p.IsLeafPage():
for i := range p.LeafPageElements() {
elem := p.LeafPageElement(uint16(i))
if elem.IsBucketEntry() {
inBkt := common.NewInBucket(pageId, 0)
tmpBucket := Bucket{
InBucket: &inBkt,
rootNode: &node{isLeaf: p.IsLeafPage()},
FillPercent: DefaultFillPercent,
tx: tx,
}
if child := tmpBucket.Bucket(elem.Key()); child != nil {
tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
}
}
}
default:
ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId)
}
}
func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
kvStringer KVStringer, ch chan error) {
// Ignore inline buckets.
if b.RootPage() == 0 {
return
}
tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch)
// Check each bucket within this bucket.
_ = b.ForEachBucket(func(k []byte) error {
if child := b.Bucket(k); child != nil {
tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
}
return nil
})
}
func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
kvStringer KVStringer, ch chan error) {
tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) {
verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch)
})
tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch)
}
func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) {
if p.Id() > hwm {
ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack)
}
// Ensure each page is only referenced once.
for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ {
var id = p.Id() + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.Id()] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.Id()))
} else if !p.IsBranchPage() && !p.IsLeafPage() {
ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack)
}
}
// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree
// key order constraints:
// - keys on pages must be sorted
// - keys on children pages are between 2 consecutive keys on the parent's branch page).
func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) {
tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch)
}
// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are:
// - >=`minKeyClosed` (can be nil)
// - <`maxKeyOpen` (can be nil)
// - Are in right ordering relationship to their parents.
// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
func (tx *Tx) recursivelyCheckPageKeyOrderInternal(
pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid,
keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
p := tx.page(pgId)
pagesStack = append(pagesStack, pgId)
switch {
case p.IsBranchPage():
// For branch page we navigate ranges of all subpages.
runningMin := minKeyClosed
for i := range p.BranchPageElements() {
elem := p.BranchPageElement(uint16(i))
verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
maxKey := maxKeyOpen
if i < len(p.BranchPageElements())-1 {
maxKey = p.BranchPageElement(uint16(i + 1)).Key()
}
maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch)
runningMin = maxKeyInSubtree
}
return maxKeyInSubtree
case p.IsLeafPage():
runningMin := minKeyClosed
for i := range p.LeafPageElements() {
elem := p.LeafPageElement(uint16(i))
verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
runningMin = elem.Key()
}
if p.Count() > 0 {
return p.LeafPageElement(p.Count() - 1).Key()
}
default:
ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId)
}
return maxKeyInSubtree
}
/***
* verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
* is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
*/
func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) {
if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if index > 0 {
cmpRet := compareKeys(previousKey, key)
if cmpRet > 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
if cmpRet == 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
}
}
// ===========================================================================================
type checkConfig struct {
kvStringer KVStringer
pageId uint64
}
type CheckOption func(options *checkConfig)
func WithKVStringer(kvStringer KVStringer) CheckOption {
return func(c *checkConfig) {
c.kvStringer = kvStringer
}
}
// WithPageId sets a page ID from which the check command starts to check
func WithPageId(pageId uint64) CheckOption {
return func(c *checkConfig) {
c.pageId = pageId
}
}
// KVStringer allows to prepare human-readable diagnostic messages.
type KVStringer interface {
KeyToString([]byte) string
ValueToString([]byte) string
}
// HexKVStringer serializes both key & value to hex representation.
func HexKVStringer() KVStringer {
return hexKvStringer{}
}
type hexKvStringer struct{}
func (_ hexKvStringer) KeyToString(key []byte) string {
return hex.EncodeToString(key)
}
func (_ hexKvStringer) ValueToString(value []byte) string {
return hex.EncodeToString(value)
}