Integrate BACKBEAT SDK and resolve KACHING license validation

Major integrations and fixes:
- Added BACKBEAT SDK integration for P2P operation timing
- Implemented beat-aware status tracking for distributed operations
- Added Docker secrets support for secure license management
- Resolved KACHING license validation via HTTPS/TLS
- Updated docker-compose configuration for clean stack deployment
- Disabled rollback policies to prevent deployment failures
- Added license credential storage (CHORUS-DEV-MULTI-001)

Technical improvements:
- BACKBEAT P2P operation tracking with phase management
- Enhanced configuration system with file-based secrets
- Improved error handling for license validation
- Clean separation of KACHING and CHORUS deployment stacks

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
anthonyrawlins
2025-09-06 07:56:26 +10:00
parent 543ab216f9
commit 9bdcbe0447
4730 changed files with 1480093 additions and 1916 deletions

1
vendor/github.com/ipfs/go-datastore/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
*.swp

21
vendor/github.com/ipfs/go-datastore/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,21 @@
The MIT License
Copyright (c) 2016 Juan Batiz-Benet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

47
vendor/github.com/ipfs/go-datastore/README.md generated vendored Normal file
View File

@@ -0,0 +1,47 @@
# go-datastore
[![](https://img.shields.io/badge/made%20by-Protocol%20Labs-blue.svg?style=flat-square)](http://ipn.io)
[![](https://img.shields.io/badge/project-IPFS-blue.svg?style=flat-square)](http://ipfs.io/)
[![](https://img.shields.io/badge/freenode-%23ipfs-blue.svg?style=flat-square)](http://webchat.freenode.net/?channels=%23ipfs)
[![standard-readme compliant](https://img.shields.io/badge/standard--readme-OK-green.svg?style=flat-square)](https://github.com/RichardLitt/standard-readme)
[![GoDoc](https://godoc.org/github.com/ipfs/go-datastore?status.svg)](https://godoc.org/github.com/ipfs/go-datastore)
> key-value datastore interfaces
## Lead Maintainer
[Steven Allen](https://github.com/Stebalien)
## Table of Contents
- [Background](#background)
- [Documentation](#documentation)
- [Contribute](#contribute)
- [License](#license)
## Background
Datastore is a generic layer of abstraction for data store and database access. It is a simple API with the aim to enable application development in a datastore-agnostic way, allowing datastores to be swapped seamlessly without changing application code. Thus, one can leverage different datastores with different strengths without committing the application to one datastore throughout its lifetime.
In addition, grouped datastores significantly simplify interesting data access patterns (such as caching and sharding).
Based on [datastore.py](https://github.com/datastore/datastore).
## Documentation
https://godoc.org/github.com/ipfs/go-datastore
## Contribute
Feel free to join in. All welcome. Open an [issue](https://github.com/ipfs/go-datastore/issues)!
This repository falls under the IPFS [Code of Conduct](https://github.com/ipfs/community/blob/master/code-of-conduct.md).
### Want to hack on IPFS?
[![](https://cdn.rawgit.com/jbenet/contribute-ipfs-gif/master/img/contribute.gif)](https://github.com/ipfs/community/blob/master/contributing.md)
## License
MIT

View File

@@ -0,0 +1,19 @@
# autobatch
Autobatch is an implementation of
[go-datastore](https://github.com/ipfs/go-datastore) that automatically batches
together writes by holding puts in memory until a certain threshold is met.
This can improve disk performance at the cost of memory in certain situations.
## Usage
Simply wrap your existing datastore in an autobatching layer like so:
```go
bds := NewAutoBatching(basedstore, 128)
```
And make all future calls to the autobatching object.
## License
MIT

View File

@@ -0,0 +1,174 @@
// Package autobatch provides a go-datastore implementation that
// automatically batches together writes by holding puts in memory until
// a certain threshold is met.
package autobatch
import (
"context"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
)
// Datastore implements a go-datastore.
type Datastore struct {
child ds.Batching
// TODO: discuss making ds.Batch implement the full ds.Datastore interface
buffer map[ds.Key]op
maxBufferEntries int
}
var _ ds.Datastore = (*Datastore)(nil)
var _ ds.PersistentDatastore = (*Datastore)(nil)
type op struct {
delete bool
value []byte
}
// NewAutoBatching returns a new datastore that automatically
// batches writes using the given Batching datastore. The size
// of the memory pool is given by size.
func NewAutoBatching(d ds.Batching, size int) *Datastore {
return &Datastore{
child: d,
buffer: make(map[ds.Key]op, size),
maxBufferEntries: size,
}
}
// Delete deletes a key/value
func (d *Datastore) Delete(ctx context.Context, k ds.Key) error {
d.buffer[k] = op{delete: true}
if len(d.buffer) > d.maxBufferEntries {
return d.Flush(ctx)
}
return nil
}
// Get retrieves a value given a key.
func (d *Datastore) Get(ctx context.Context, k ds.Key) ([]byte, error) {
o, ok := d.buffer[k]
if ok {
if o.delete {
return nil, ds.ErrNotFound
}
return o.value, nil
}
return d.child.Get(ctx, k)
}
// Put stores a key/value.
func (d *Datastore) Put(ctx context.Context, k ds.Key, val []byte) error {
d.buffer[k] = op{value: val}
if len(d.buffer) > d.maxBufferEntries {
return d.Flush(ctx)
}
return nil
}
// Sync flushes all operations on keys at or under the prefix
// from the current batch to the underlying datastore
func (d *Datastore) Sync(ctx context.Context, prefix ds.Key) error {
b, err := d.child.Batch(ctx)
if err != nil {
return err
}
for k, o := range d.buffer {
if !(k.Equal(prefix) || k.IsDescendantOf(prefix)) {
continue
}
var err error
if o.delete {
err = b.Delete(ctx, k)
} else {
err = b.Put(ctx, k, o.value)
}
if err != nil {
return err
}
delete(d.buffer, k)
}
return b.Commit(ctx)
}
// Flush flushes the current batch to the underlying datastore.
func (d *Datastore) Flush(ctx context.Context) error {
b, err := d.child.Batch(ctx)
if err != nil {
return err
}
for k, o := range d.buffer {
var err error
if o.delete {
err = b.Delete(ctx, k)
} else {
err = b.Put(ctx, k, o.value)
}
if err != nil {
return err
}
}
// clear out buffer
d.buffer = make(map[ds.Key]op, d.maxBufferEntries)
return b.Commit(ctx)
}
// Has checks if a key is stored.
func (d *Datastore) Has(ctx context.Context, k ds.Key) (bool, error) {
o, ok := d.buffer[k]
if ok {
return !o.delete, nil
}
return d.child.Has(ctx, k)
}
// GetSize implements Datastore.GetSize
func (d *Datastore) GetSize(ctx context.Context, k ds.Key) (int, error) {
o, ok := d.buffer[k]
if ok {
if o.delete {
return -1, ds.ErrNotFound
}
return len(o.value), nil
}
return d.child.GetSize(ctx, k)
}
// Query performs a query
func (d *Datastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
err := d.Flush(ctx)
if err != nil {
return nil, err
}
return d.child.Query(ctx, q)
}
// DiskUsage implements the PersistentDatastore interface.
func (d *Datastore) DiskUsage(ctx context.Context) (uint64, error) {
return ds.DiskUsage(ctx, d.child)
}
func (d *Datastore) Close() error {
ctx := context.Background()
err1 := d.Flush(ctx)
err2 := d.child.Close()
if err1 != nil {
return err1
}
if err2 != nil {
return err2
}
return nil
}

248
vendor/github.com/ipfs/go-datastore/basic_ds.go generated vendored Normal file
View File

@@ -0,0 +1,248 @@
package datastore
import (
"context"
"log"
dsq "github.com/ipfs/go-datastore/query"
)
// Here are some basic datastore implementations.
// MapDatastore uses a standard Go map for internal storage.
type MapDatastore struct {
values map[Key][]byte
}
var _ Datastore = (*MapDatastore)(nil)
var _ Batching = (*MapDatastore)(nil)
// NewMapDatastore constructs a MapDatastore. It is _not_ thread-safe by
// default, wrap using sync.MutexWrap if you need thread safety (the answer here
// is usually yes).
func NewMapDatastore() (d *MapDatastore) {
return &MapDatastore{
values: make(map[Key][]byte),
}
}
// Put implements Datastore.Put
func (d *MapDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
d.values[key] = value
return nil
}
// Sync implements Datastore.Sync
func (d *MapDatastore) Sync(ctx context.Context, prefix Key) error {
return nil
}
// Get implements Datastore.Get
func (d *MapDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
val, found := d.values[key]
if !found {
return nil, ErrNotFound
}
return val, nil
}
// Has implements Datastore.Has
func (d *MapDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
_, found := d.values[key]
return found, nil
}
// GetSize implements Datastore.GetSize
func (d *MapDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
if v, found := d.values[key]; found {
return len(v), nil
}
return -1, ErrNotFound
}
// Delete implements Datastore.Delete
func (d *MapDatastore) Delete(ctx context.Context, key Key) (err error) {
delete(d.values, key)
return nil
}
// Query implements Datastore.Query
func (d *MapDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
re := make([]dsq.Entry, 0, len(d.values))
for k, v := range d.values {
e := dsq.Entry{Key: k.String(), Size: len(v)}
if !q.KeysOnly {
e.Value = v
}
re = append(re, e)
}
r := dsq.ResultsWithEntries(q, re)
r = dsq.NaiveQueryApply(q, r)
return r, nil
}
func (d *MapDatastore) Batch(ctx context.Context) (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *MapDatastore) Close() error {
return nil
}
// LogDatastore logs all accesses through the datastore.
type LogDatastore struct {
Name string
child Datastore
}
var _ Datastore = (*LogDatastore)(nil)
var _ Batching = (*LogDatastore)(nil)
var _ GCDatastore = (*LogDatastore)(nil)
var _ PersistentDatastore = (*LogDatastore)(nil)
var _ ScrubbedDatastore = (*LogDatastore)(nil)
var _ CheckedDatastore = (*LogDatastore)(nil)
var _ Shim = (*LogDatastore)(nil)
// Shim is a datastore which has a child.
type Shim interface {
Datastore
Children() []Datastore
}
// NewLogDatastore constructs a log datastore.
func NewLogDatastore(ds Datastore, name string) *LogDatastore {
if len(name) < 1 {
name = "LogDatastore"
}
return &LogDatastore{Name: name, child: ds}
}
// Children implements Shim
func (d *LogDatastore) Children() []Datastore {
return []Datastore{d.child}
}
// Put implements Datastore.Put
func (d *LogDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
log.Printf("%s: Put %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(ctx, key, value)
}
// Sync implements Datastore.Sync
func (d *LogDatastore) Sync(ctx context.Context, prefix Key) error {
log.Printf("%s: Sync %s\n", d.Name, prefix)
return d.child.Sync(ctx, prefix)
}
// Get implements Datastore.Get
func (d *LogDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
log.Printf("%s: Get %s\n", d.Name, key)
return d.child.Get(ctx, key)
}
// Has implements Datastore.Has
func (d *LogDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
log.Printf("%s: Has %s\n", d.Name, key)
return d.child.Has(ctx, key)
}
// GetSize implements Datastore.GetSize
func (d *LogDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
log.Printf("%s: GetSize %s\n", d.Name, key)
return d.child.GetSize(ctx, key)
}
// Delete implements Datastore.Delete
func (d *LogDatastore) Delete(ctx context.Context, key Key) (err error) {
log.Printf("%s: Delete %s\n", d.Name, key)
return d.child.Delete(ctx, key)
}
// DiskUsage implements the PersistentDatastore interface.
func (d *LogDatastore) DiskUsage(ctx context.Context) (uint64, error) {
log.Printf("%s: DiskUsage\n", d.Name)
return DiskUsage(ctx, d.child)
}
// Query implements Datastore.Query
func (d *LogDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
log.Printf("%s: Query\n", d.Name)
log.Printf("%s: q.Prefix: %s\n", d.Name, q.Prefix)
log.Printf("%s: q.KeysOnly: %v\n", d.Name, q.KeysOnly)
log.Printf("%s: q.Filters: %d\n", d.Name, len(q.Filters))
log.Printf("%s: q.Orders: %d\n", d.Name, len(q.Orders))
log.Printf("%s: q.Offset: %d\n", d.Name, q.Offset)
return d.child.Query(ctx, q)
}
// LogBatch logs all accesses through the batch.
type LogBatch struct {
Name string
child Batch
}
var _ Batch = (*LogBatch)(nil)
func (d *LogDatastore) Batch(ctx context.Context) (Batch, error) {
log.Printf("%s: Batch\n", d.Name)
if bds, ok := d.child.(Batching); ok {
b, err := bds.Batch(ctx)
if err != nil {
return nil, err
}
return &LogBatch{
Name: d.Name,
child: b,
}, nil
}
return nil, ErrBatchUnsupported
}
// Put implements Batch.Put
func (d *LogBatch) Put(ctx context.Context, key Key, value []byte) (err error) {
log.Printf("%s: BatchPut %s\n", d.Name, key)
// log.Printf("%s: Put %s ```%s```", d.Name, key, value)
return d.child.Put(ctx, key, value)
}
// Delete implements Batch.Delete
func (d *LogBatch) Delete(ctx context.Context, key Key) (err error) {
log.Printf("%s: BatchDelete %s\n", d.Name, key)
return d.child.Delete(ctx, key)
}
// Commit implements Batch.Commit
func (d *LogBatch) Commit(ctx context.Context) (err error) {
log.Printf("%s: BatchCommit\n", d.Name)
return d.child.Commit(ctx)
}
func (d *LogDatastore) Close() error {
log.Printf("%s: Close\n", d.Name)
return d.child.Close()
}
func (d *LogDatastore) Check(ctx context.Context) error {
if c, ok := d.child.(CheckedDatastore); ok {
return c.Check(ctx)
}
return nil
}
func (d *LogDatastore) Scrub(ctx context.Context) error {
if c, ok := d.child.(ScrubbedDatastore); ok {
return c.Scrub(ctx)
}
return nil
}
func (d *LogDatastore) CollectGarbage(ctx context.Context) error {
if c, ok := d.child.(GCDatastore); ok {
return c.CollectGarbage(ctx)
}
return nil
}

53
vendor/github.com/ipfs/go-datastore/batch.go generated vendored Normal file
View File

@@ -0,0 +1,53 @@
package datastore
import (
"context"
)
type op struct {
delete bool
value []byte
}
// basicBatch implements the transaction interface for datastores who do
// not have any sort of underlying transactional support
type basicBatch struct {
ops map[Key]op
target Datastore
}
var _ Batch = (*basicBatch)(nil)
func NewBasicBatch(ds Datastore) Batch {
return &basicBatch{
ops: make(map[Key]op),
target: ds,
}
}
func (bt *basicBatch) Put(ctx context.Context, key Key, val []byte) error {
bt.ops[key] = op{value: val}
return nil
}
func (bt *basicBatch) Delete(ctx context.Context, key Key) error {
bt.ops[key] = op{delete: true}
return nil
}
func (bt *basicBatch) Commit(ctx context.Context) error {
var err error
for k, op := range bt.ops {
if op.delete {
err = bt.target.Delete(ctx, k)
} else {
err = bt.target.Put(ctx, k, op.value)
}
if err != nil {
break
}
}
return err
}

237
vendor/github.com/ipfs/go-datastore/datastore.go generated vendored Normal file
View File

@@ -0,0 +1,237 @@
package datastore
import (
"context"
"errors"
"io"
query "github.com/ipfs/go-datastore/query"
)
/*
Datastore represents storage for any key-value pair.
Datastores are general enough to be backed by all kinds of different storage:
in-memory caches, databases, a remote datastore, flat files on disk, etc.
The general idea is to wrap a more complicated storage facility in a simple,
uniform interface, keeping the freedom of using the right tools for the job.
In particular, a Datastore can aggregate other datastores in interesting ways,
like sharded (to distribute load) or tiered access (caches before databases).
While Datastores should be written general enough to accept all sorts of
values, some implementations will undoubtedly have to be specific (e.g. SQL
databases where fields should be decomposed into columns), particularly to
support queries efficiently. Moreover, certain datastores may enforce certain
types of values (e.g. requiring an io.Reader, a specific struct, etc) or
serialization formats (JSON, Protobufs, etc).
IMPORTANT: No Datastore should ever Panic! This is a cross-module interface,
and thus it should behave predictably and handle exceptional conditions with
proper error reporting. Thus, all Datastore calls may return errors, which
should be checked by callers.
*/
type Datastore interface {
Read
Write
// Sync guarantees that any Put or Delete calls under prefix that returned
// before Sync(prefix) was called will be observed after Sync(prefix)
// returns, even if the program crashes. If Put/Delete operations already
// satisfy these requirements then Sync may be a no-op.
//
// If the prefix fails to Sync this method returns an error.
Sync(ctx context.Context, prefix Key) error
io.Closer
}
// Write is the write-side of the Datastore interface.
type Write interface {
// Put stores the object `value` named by `key`.
//
// The generalized Datastore interface does not impose a value type,
// allowing various datastore middleware implementations (which do not
// handle the values directly) to be composed together.
//
// Ultimately, the lowest-level datastore will need to do some value checking
// or risk getting incorrect values. It may also be useful to expose a more
// type-safe interface to your application, and do the checking up-front.
Put(ctx context.Context, key Key, value []byte) error
// Delete removes the value for given `key`. If the key is not in the
// datastore, this method returns no error.
Delete(ctx context.Context, key Key) error
}
// Read is the read-side of the Datastore interface.
type Read interface {
// Get retrieves the object `value` named by `key`.
// Get will return ErrNotFound if the key is not mapped to a value.
Get(ctx context.Context, key Key) (value []byte, err error)
// Has returns whether the `key` is mapped to a `value`.
// In some contexts, it may be much cheaper only to check for existence of
// a value, rather than retrieving the value itself. (e.g. HTTP HEAD).
// The default implementation is found in `GetBackedHas`.
Has(ctx context.Context, key Key) (exists bool, err error)
// GetSize returns the size of the `value` named by `key`.
// In some contexts, it may be much cheaper to only get the size of the
// value rather than retrieving the value itself.
GetSize(ctx context.Context, key Key) (size int, err error)
// Query searches the datastore and returns a query result. This function
// may return before the query actually runs. To wait for the query:
//
// result, _ := ds.Query(q)
//
// // use the channel interface; result may come in at different times
// for entry := range result.Next() { ... }
//
// // or wait for the query to be completely done
// entries, _ := result.Rest()
// for entry := range entries { ... }
//
Query(ctx context.Context, q query.Query) (query.Results, error)
}
// Batching datastores support deferred, grouped updates to the database.
// `Batch`es do NOT have transactional semantics: updates to the underlying
// datastore are not guaranteed to occur in the same iota of time. Similarly,
// batched updates will not be flushed to the underlying datastore until
// `Commit` has been called. `Txn`s from a `TxnDatastore` have all the
// capabilities of a `Batch`, but the reverse is NOT true.
type Batching interface {
Datastore
BatchingFeature
}
// ErrBatchUnsupported is returned if the by Batch if the Datastore doesn't
// actually support batching.
var ErrBatchUnsupported = errors.New("this datastore does not support batching")
// CheckedDatastore is an interface that should be implemented by datastores
// which may need checking on-disk data integrity.
type CheckedDatastore interface {
Datastore
CheckedFeature
}
// ScrubbedDatastore is an interface that should be implemented by datastores
// which want to provide a mechanism to check data integrity and/or
// error correction.
type ScrubbedDatastore interface {
Datastore
ScrubbedFeature
}
// GCDatastore is an interface that should be implemented by datastores which
// don't free disk space by just removing data from them.
type GCDatastore interface {
Datastore
GCFeature
}
// PersistentDatastore is an interface that should be implemented by datastores
// which can report disk usage.
type PersistentDatastore interface {
Datastore
PersistentFeature
}
// DiskUsage checks if a Datastore is a
// PersistentDatastore and returns its DiskUsage(),
// otherwise returns 0.
func DiskUsage(ctx context.Context, d Datastore) (uint64, error) {
persDs, ok := d.(PersistentDatastore)
if !ok {
return 0, nil
}
return persDs.DiskUsage(ctx)
}
// TTLDatastore is an interface that should be implemented by datastores that
// support expiring entries.
type TTLDatastore interface {
Datastore
TTL
}
// Txn extends the Datastore type. Txns allow users to batch queries and
// mutations to the Datastore into atomic groups, or transactions. Actions
// performed on a transaction will not take hold until a successful call to
// Commit has been made. Likewise, transactions can be aborted by calling
// Discard before a successful Commit has been made.
type Txn interface {
Read
Write
// Commit finalizes a transaction, attempting to commit it to the Datastore.
// May return an error if the transaction has gone stale. The presence of an
// error is an indication that the data was not committed to the Datastore.
Commit(ctx context.Context) error
// Discard throws away changes recorded in a transaction without committing
// them to the underlying Datastore. Any calls made to Discard after Commit
// has been successfully called will have no effect on the transaction and
// state of the Datastore, making it safe to defer.
Discard(ctx context.Context)
}
// TxnDatastore is an interface that should be implemented by datastores that
// support transactions.
type TxnDatastore interface {
Datastore
TxnFeature
}
// Errors
type dsError struct {
error
isNotFound bool
}
func (e *dsError) NotFound() bool {
return e.isNotFound
}
// ErrNotFound is returned by Get and GetSize when a datastore does not map the
// given key to a value.
var ErrNotFound error = &dsError{error: errors.New("datastore: key not found"), isNotFound: true}
// GetBackedHas provides a default Datastore.Has implementation.
// It exists so Datastore.Has implementations can use it, like so:
//
// func (*d SomeDatastore) Has(key Key) (exists bool, err error) {
// return GetBackedHas(d, key)
// }
func GetBackedHas(ctx context.Context, ds Read, key Key) (bool, error) {
_, err := ds.Get(ctx, key)
switch err {
case nil:
return true, nil
case ErrNotFound:
return false, nil
default:
return false, err
}
}
// GetBackedSize provides a default Datastore.GetSize implementation.
// It exists so Datastore.GetSize implementations can use it, like so:
//
// func (*d SomeDatastore) GetSize(key Key) (size int, err error) {
// return GetBackedSize(d, key)
// }
func GetBackedSize(ctx context.Context, ds Read, key Key) (int, error) {
value, err := ds.Get(ctx, key)
if err == nil {
return len(value), nil
}
return -1, err
}
type Batch interface {
Write
Commit(ctx context.Context) error
}

132
vendor/github.com/ipfs/go-datastore/features.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
package datastore
import (
"context"
"reflect"
"time"
)
const (
FeatureNameBatching = "Batching"
FeatureNameChecked = "Checked"
FeatureNameGC = "GC"
FeatureNamePersistent = "Persistent"
FeatureNameScrubbed = "Scrubbed"
FeatureNameTTL = "TTL"
FeatureNameTransaction = "Transaction"
)
type BatchingFeature interface {
Batch(ctx context.Context) (Batch, error)
}
type CheckedFeature interface {
Check(ctx context.Context) error
}
type ScrubbedFeature interface {
Scrub(ctx context.Context) error
}
type GCFeature interface {
CollectGarbage(ctx context.Context) error
}
type PersistentFeature interface {
// DiskUsage returns the space used by a datastore, in bytes.
DiskUsage(ctx context.Context) (uint64, error)
}
// TTL encapulates the methods that deal with entries with time-to-live.
type TTL interface {
PutWithTTL(ctx context.Context, key Key, value []byte, ttl time.Duration) error
SetTTL(ctx context.Context, key Key, ttl time.Duration) error
GetExpiration(ctx context.Context, key Key) (time.Time, error)
}
type TxnFeature interface {
NewTransaction(ctx context.Context, readOnly bool) (Txn, error)
}
// Feature contains metadata about a datastore Feature.
type Feature struct {
Name string
// Interface is the nil interface of the feature.
Interface interface{}
// DatastoreInterface is the nil interface of the feature's corresponding datastore interface.
DatastoreInterface interface{}
}
var featuresByName map[string]Feature
func init() {
featuresByName = map[string]Feature{}
for _, f := range Features() {
featuresByName[f.Name] = f
}
}
// Features returns a list of all known datastore features.
// This serves both to provide an authoritative list of features,
// and to define a canonical ordering of features.
func Features() []Feature {
// for backwards compatibility, only append to this list
return []Feature{
{
Name: FeatureNameBatching,
Interface: (*BatchingFeature)(nil),
DatastoreInterface: (*Batching)(nil),
},
{
Name: FeatureNameChecked,
Interface: (*CheckedFeature)(nil),
DatastoreInterface: (*CheckedDatastore)(nil),
},
{
Name: FeatureNameGC,
Interface: (*GCFeature)(nil),
DatastoreInterface: (*GCDatastore)(nil),
},
{
Name: FeatureNamePersistent,
Interface: (*PersistentFeature)(nil),
DatastoreInterface: (*PersistentDatastore)(nil),
},
{
Name: FeatureNameScrubbed,
Interface: (*ScrubbedFeature)(nil),
DatastoreInterface: (*ScrubbedDatastore)(nil),
},
{
Name: FeatureNameTTL,
Interface: (*TTL)(nil),
DatastoreInterface: (*TTLDatastore)(nil),
},
{
Name: FeatureNameTransaction,
Interface: (*TxnFeature)(nil),
DatastoreInterface: (*TxnDatastore)(nil),
},
}
}
// FeatureByName returns the feature with the given name, if known.
func FeatureByName(name string) (Feature, bool) {
feat, known := featuresByName[name]
return feat, known
}
// FeaturesForDatastore returns the features supported by the given datastore.
func FeaturesForDatastore(dstore Datastore) (features []Feature) {
if dstore == nil {
return nil
}
dstoreType := reflect.TypeOf(dstore)
for _, f := range Features() {
fType := reflect.TypeOf(f.Interface).Elem()
if dstoreType.Implements(fType) {
features = append(features, f)
}
}
return
}

309
vendor/github.com/ipfs/go-datastore/key.go generated vendored Normal file
View File

@@ -0,0 +1,309 @@
package datastore
import (
"encoding/json"
"path"
"strings"
dsq "github.com/ipfs/go-datastore/query"
"github.com/google/uuid"
)
/*
A Key represents the unique identifier of an object.
Our Key scheme is inspired by file systems and Google App Engine key model.
Keys are meant to be unique across a system. Keys are hierarchical,
incorporating more and more specific namespaces. Thus keys can be deemed
'children' or 'ancestors' of other keys::
Key("/Comedy")
Key("/Comedy/MontyPython")
Also, every namespace can be parametrized to embed relevant object
information. For example, the Key `name` (most specific namespace) could
include the object type::
Key("/Comedy/MontyPython/Actor:JohnCleese")
Key("/Comedy/MontyPython/Sketch:CheeseShop")
Key("/Comedy/MontyPython/Sketch:CheeseShop/Character:Mousebender")
*/
type Key struct {
string
}
// NewKey constructs a key from string. it will clean the value.
func NewKey(s string) Key {
k := Key{s}
k.Clean()
return k
}
// RawKey creates a new Key without safety checking the input. Use with care.
func RawKey(s string) Key {
// accept an empty string and fix it to avoid special cases
// elsewhere
if len(s) == 0 {
return Key{"/"}
}
// perform a quick sanity check that the key is in the correct
// format, if it is not then it is a programmer error and it is
// okay to panic
if len(s) == 0 || s[0] != '/' || (len(s) > 1 && s[len(s)-1] == '/') {
panic("invalid datastore key: " + s)
}
return Key{s}
}
// KeyWithNamespaces constructs a key out of a namespace slice.
func KeyWithNamespaces(ns []string) Key {
return NewKey(strings.Join(ns, "/"))
}
// Clean up a Key, using path.Clean.
func (k *Key) Clean() {
switch {
case len(k.string) == 0:
k.string = "/"
case k.string[0] == '/':
k.string = path.Clean(k.string)
default:
k.string = path.Clean("/" + k.string)
}
}
// Strings is the string value of Key
func (k Key) String() string {
return k.string
}
// Bytes returns the string value of Key as a []byte
func (k Key) Bytes() []byte {
return []byte(k.string)
}
// Equal checks equality of two keys
func (k Key) Equal(k2 Key) bool {
return k.string == k2.string
}
// Less checks whether this key is sorted lower than another.
func (k Key) Less(k2 Key) bool {
list1 := k.List()
list2 := k2.List()
for i, c1 := range list1 {
if len(list2) < (i + 1) {
return false
}
c2 := list2[i]
if c1 < c2 {
return true
} else if c1 > c2 {
return false
}
// c1 == c2, continue
}
// list1 is shorter or exactly the same.
return len(list1) < len(list2)
}
// List returns the `list` representation of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").List()
// ["Comedy", "MontyPythong", "Actor:JohnCleese"]
func (k Key) List() []string {
return strings.Split(k.string, "/")[1:]
}
// Reverse returns the reverse of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Reverse()
// NewKey("/Actor:JohnCleese/MontyPython/Comedy")
func (k Key) Reverse() Key {
l := k.List()
r := make([]string, len(l))
for i, e := range l {
r[len(l)-i-1] = e
}
return KeyWithNamespaces(r)
}
// Namespaces returns the `namespaces` making up this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Namespaces()
// ["Comedy", "MontyPython", "Actor:JohnCleese"]
func (k Key) Namespaces() []string {
return k.List()
}
// BaseNamespace returns the "base" namespace of this key (path.Base(filename))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").BaseNamespace()
// "Actor:JohnCleese"
func (k Key) BaseNamespace() string {
n := k.Namespaces()
return n[len(n)-1]
}
// Type returns the "type" of this key (value of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Type()
// "Actor"
func (k Key) Type() string {
return NamespaceType(k.BaseNamespace())
}
// Name returns the "name" of this key (field of last namespace).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Name()
// "JohnCleese"
func (k Key) Name() string {
return NamespaceValue(k.BaseNamespace())
}
// Instance returns an "instance" of this type key (appends value to namespace).
// NewKey("/Comedy/MontyPython/Actor").Instance("JohnClesse")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Instance(s string) Key {
return NewKey(k.string + ":" + s)
}
// Path returns the "path" of this key (parent + type).
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Path()
// NewKey("/Comedy/MontyPython/Actor")
func (k Key) Path() Key {
s := k.Parent().string + "/" + NamespaceType(k.BaseNamespace())
return NewKey(s)
}
// Parent returns the `parent` Key of this Key.
// NewKey("/Comedy/MontyPython/Actor:JohnCleese").Parent()
// NewKey("/Comedy/MontyPython")
func (k Key) Parent() Key {
n := k.List()
if len(n) == 1 {
return RawKey("/")
}
return NewKey(strings.Join(n[:len(n)-1], "/"))
}
// Child returns the `child` Key of this Key.
// NewKey("/Comedy/MontyPython").Child(NewKey("Actor:JohnCleese"))
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) Child(k2 Key) Key {
switch {
case k.string == "/":
return k2
case k2.string == "/":
return k
default:
return RawKey(k.string + k2.string)
}
}
// ChildString returns the `child` Key of this Key -- string helper.
// NewKey("/Comedy/MontyPython").ChildString("Actor:JohnCleese")
// NewKey("/Comedy/MontyPython/Actor:JohnCleese")
func (k Key) ChildString(s string) Key {
return NewKey(k.string + "/" + s)
}
// IsAncestorOf returns whether this key is a prefix of `other`
// NewKey("/Comedy").IsAncestorOf("/Comedy/MontyPython")
// true
func (k Key) IsAncestorOf(other Key) bool {
// equivalent to HasPrefix(other, k.string + "/")
if len(other.string) <= len(k.string) {
// We're not long enough to be a child.
return false
}
if k.string == "/" {
// We're the root and the other key is longer.
return true
}
// "other" starts with /k.string/
return other.string[len(k.string)] == '/' && other.string[:len(k.string)] == k.string
}
// IsDescendantOf returns whether this key contains another as a prefix.
// NewKey("/Comedy/MontyPython").IsDescendantOf("/Comedy")
// true
func (k Key) IsDescendantOf(other Key) bool {
return other.IsAncestorOf(k)
}
// IsTopLevel returns whether this key has only one namespace.
func (k Key) IsTopLevel() bool {
return len(k.List()) == 1
}
// MarshalJSON implements the json.Marshaler interface,
// keys are represented as JSON strings
func (k Key) MarshalJSON() ([]byte, error) {
return json.Marshal(k.String())
}
// UnmarshalJSON implements the json.Unmarshaler interface,
// keys will parse any value specified as a key to a string
func (k *Key) UnmarshalJSON(data []byte) error {
var key string
if err := json.Unmarshal(data, &key); err != nil {
return err
}
*k = NewKey(key)
return nil
}
// RandomKey returns a randomly (uuid) generated key.
// RandomKey()
// NewKey("/f98719ea086343f7b71f32ea9d9d521d")
func RandomKey() Key {
return NewKey(strings.Replace(uuid.New().String(), "-", "", -1))
}
/*
A Key Namespace is like a path element.
A namespace can optionally include a type (delimited by ':')
> NamespaceValue("Song:PhilosopherSong")
PhilosopherSong
> NamespaceType("Song:PhilosopherSong")
Song
> NamespaceType("Music:Song:PhilosopherSong")
Music:Song
*/
// NamespaceType is the first component of a namespace. `foo` in `foo:bar`
func NamespaceType(namespace string) string {
parts := strings.Split(namespace, ":")
if len(parts) < 2 {
return ""
}
return strings.Join(parts[0:len(parts)-1], ":")
}
// NamespaceValue returns the last component of a namespace. `baz` in `f:b:baz`
func NamespaceValue(namespace string) string {
parts := strings.Split(namespace, ":")
return parts[len(parts)-1]
}
// KeySlice attaches the methods of sort.Interface to []Key,
// sorting in increasing order.
type KeySlice []Key
func (p KeySlice) Len() int { return len(p) }
func (p KeySlice) Less(i, j int) bool { return p[i].Less(p[j]) }
func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
// EntryKeys
func EntryKeys(e []dsq.Entry) []Key {
ks := make([]Key, len(e))
for i, e := range e {
ks[i] = NewKey(e.Key)
}
return ks
}

120
vendor/github.com/ipfs/go-datastore/null_ds.go generated vendored Normal file
View File

@@ -0,0 +1,120 @@
package datastore
import (
"context"
dsq "github.com/ipfs/go-datastore/query"
)
// NullDatastore stores nothing, but conforms to the API.
// Useful to test with.
type NullDatastore struct {
}
var _ Datastore = (*NullDatastore)(nil)
var _ Batching = (*NullDatastore)(nil)
var _ ScrubbedDatastore = (*NullDatastore)(nil)
var _ CheckedDatastore = (*NullDatastore)(nil)
var _ PersistentDatastore = (*NullDatastore)(nil)
var _ GCDatastore = (*NullDatastore)(nil)
var _ TxnDatastore = (*NullDatastore)(nil)
// NewNullDatastore constructs a null datastoe
func NewNullDatastore() *NullDatastore {
return &NullDatastore{}
}
// Put implements Datastore.Put
func (d *NullDatastore) Put(ctx context.Context, key Key, value []byte) (err error) {
return nil
}
// Sync implements Datastore.Sync
func (d *NullDatastore) Sync(ctx context.Context, prefix Key) error {
return nil
}
// Get implements Datastore.Get
func (d *NullDatastore) Get(ctx context.Context, key Key) (value []byte, err error) {
return nil, ErrNotFound
}
// Has implements Datastore.Has
func (d *NullDatastore) Has(ctx context.Context, key Key) (exists bool, err error) {
return false, nil
}
// Has implements Datastore.GetSize
func (d *NullDatastore) GetSize(ctx context.Context, key Key) (size int, err error) {
return -1, ErrNotFound
}
// Delete implements Datastore.Delete
func (d *NullDatastore) Delete(ctx context.Context, key Key) (err error) {
return nil
}
func (d *NullDatastore) Scrub(ctx context.Context) error {
return nil
}
func (d *NullDatastore) Check(ctx context.Context) error {
return nil
}
// Query implements Datastore.Query
func (d *NullDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
return dsq.ResultsWithEntries(q, nil), nil
}
func (d *NullDatastore) Batch(ctx context.Context) (Batch, error) {
return NewBasicBatch(d), nil
}
func (d *NullDatastore) CollectGarbage(ctx context.Context) error {
return nil
}
func (d *NullDatastore) DiskUsage(ctx context.Context) (uint64, error) {
return 0, nil
}
func (d *NullDatastore) Close() error {
return nil
}
func (d *NullDatastore) NewTransaction(ctx context.Context, readOnly bool) (Txn, error) {
return &nullTxn{}, nil
}
type nullTxn struct{}
func (t *nullTxn) Get(ctx context.Context, key Key) (value []byte, err error) {
return nil, nil
}
func (t *nullTxn) Has(ctx context.Context, key Key) (exists bool, err error) {
return false, nil
}
func (t *nullTxn) GetSize(ctx context.Context, key Key) (size int, err error) {
return 0, nil
}
func (t *nullTxn) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
return dsq.ResultsWithEntries(q, nil), nil
}
func (t *nullTxn) Put(ctx context.Context, key Key, value []byte) error {
return nil
}
func (t *nullTxn) Delete(ctx context.Context, key Key) error {
return nil
}
func (t *nullTxn) Commit(ctx context.Context) error {
return nil
}
func (t *nullTxn) Discard(ctx context.Context) {}

102
vendor/github.com/ipfs/go-datastore/query/filter.go generated vendored Normal file
View File

@@ -0,0 +1,102 @@
package query
import (
"bytes"
"fmt"
"strings"
)
// Filter is an object that tests ResultEntries
type Filter interface {
// Filter returns whether an entry passes the filter
Filter(e Entry) bool
}
// Op is a comparison operator
type Op string
var (
Equal = Op("==")
NotEqual = Op("!=")
GreaterThan = Op(">")
GreaterThanOrEqual = Op(">=")
LessThan = Op("<")
LessThanOrEqual = Op("<=")
)
// FilterValueCompare is used to signal to datastores they
// should apply internal comparisons. unfortunately, there
// is no way to apply comparisons* to interface{} types in
// Go, so if the datastore doesnt have a special way to
// handle these comparisons, you must provided the
// TypedFilter to actually do filtering.
//
// [*] other than == and !=, which use reflect.DeepEqual.
type FilterValueCompare struct {
Op Op
Value []byte
}
func (f FilterValueCompare) Filter(e Entry) bool {
cmp := bytes.Compare(e.Value, f.Value)
switch f.Op {
case Equal:
return cmp == 0
case NotEqual:
return cmp != 0
case LessThan:
return cmp < 0
case LessThanOrEqual:
return cmp <= 0
case GreaterThan:
return cmp > 0
case GreaterThanOrEqual:
return cmp >= 0
default:
panic(fmt.Errorf("unknown operation: %s", f.Op))
}
}
func (f FilterValueCompare) String() string {
return fmt.Sprintf("VALUE %s %q", f.Op, string(f.Value))
}
type FilterKeyCompare struct {
Op Op
Key string
}
func (f FilterKeyCompare) Filter(e Entry) bool {
switch f.Op {
case Equal:
return e.Key == f.Key
case NotEqual:
return e.Key != f.Key
case GreaterThan:
return e.Key > f.Key
case GreaterThanOrEqual:
return e.Key >= f.Key
case LessThan:
return e.Key < f.Key
case LessThanOrEqual:
return e.Key <= f.Key
default:
panic(fmt.Errorf("unknown op '%s'", f.Op))
}
}
func (f FilterKeyCompare) String() string {
return fmt.Sprintf("KEY %s %q", f.Op, f.Key)
}
type FilterKeyPrefix struct {
Prefix string
}
func (f FilterKeyPrefix) Filter(e Entry) bool {
return strings.HasPrefix(e.Key, f.Prefix)
}
func (f FilterKeyPrefix) String() string {
return fmt.Sprintf("PREFIX(%q)", f.Prefix)
}

94
vendor/github.com/ipfs/go-datastore/query/order.go generated vendored Normal file
View File

@@ -0,0 +1,94 @@
package query
import (
"bytes"
"sort"
"strings"
)
// Order is an object used to order objects
type Order interface {
Compare(a, b Entry) int
}
// OrderByFunction orders the results based on the result of the given function.
type OrderByFunction func(a, b Entry) int
func (o OrderByFunction) Compare(a, b Entry) int {
return o(a, b)
}
func (OrderByFunction) String() string {
return "FN"
}
// OrderByValue is used to signal to datastores they should apply internal
// orderings.
type OrderByValue struct{}
func (o OrderByValue) Compare(a, b Entry) int {
return bytes.Compare(a.Value, b.Value)
}
func (OrderByValue) String() string {
return "VALUE"
}
// OrderByValueDescending is used to signal to datastores they
// should apply internal orderings.
type OrderByValueDescending struct{}
func (o OrderByValueDescending) Compare(a, b Entry) int {
return -bytes.Compare(a.Value, b.Value)
}
func (OrderByValueDescending) String() string {
return "desc(VALUE)"
}
// OrderByKey
type OrderByKey struct{}
func (o OrderByKey) Compare(a, b Entry) int {
return strings.Compare(a.Key, b.Key)
}
func (OrderByKey) String() string {
return "KEY"
}
// OrderByKeyDescending
type OrderByKeyDescending struct{}
func (o OrderByKeyDescending) Compare(a, b Entry) int {
return -strings.Compare(a.Key, b.Key)
}
func (OrderByKeyDescending) String() string {
return "desc(KEY)"
}
// Less returns true if a comes before b with the requested orderings.
func Less(orders []Order, a, b Entry) bool {
for _, cmp := range orders {
switch cmp.Compare(a, b) {
case 0:
case -1:
return true
case 1:
return false
}
}
// This gives us a *stable* sort for free. We don't care
// preserving the order from the underlying datastore
// because it's undefined.
return a.Key < b.Key
}
// Sort sorts the given entries using the given orders.
func Sort(orders []Order, entries []Entry) {
sort.Slice(entries, func(i int, j int) bool {
return Less(orders, entries[i], entries[j])
})
}

426
vendor/github.com/ipfs/go-datastore/query/query.go generated vendored Normal file
View File

@@ -0,0 +1,426 @@
package query
import (
"fmt"
"time"
goprocess "github.com/jbenet/goprocess"
)
/*
Query represents storage for any key-value pair.
tl;dr:
queries are supported across datastores.
Cheap on top of relational dbs, and expensive otherwise.
Pick the right tool for the job!
In addition to the key-value store get and set semantics, datastore
provides an interface to retrieve multiple records at a time through
the use of queries. The datastore Query model gleans a common set of
operations performed when querying. To avoid pasting here years of
database research, lets summarize the operations datastore supports.
Query Operations, applied in-order:
* prefix - scope the query to a given path prefix
* filters - select a subset of values by applying constraints
* orders - sort the results by applying sort conditions, hierarchically.
* offset - skip a number of results (for efficient pagination)
* limit - impose a numeric limit on the number of results
Datastore combines these operations into a simple Query class that allows
applications to define their constraints in a simple, generic, way without
introducing datastore specific calls, languages, etc.
However, take heed: not all datastores support efficiently performing these
operations. Pick a datastore based on your needs. If you need efficient look-ups,
go for a simple key/value store. If you need efficient queries, consider an SQL
backed datastore.
Notes:
* Prefix: When a query filters by prefix, it selects keys that are strict
children of the prefix. For example, a prefix "/foo" would select "/foo/bar"
but not "/foobar" or "/foo",
* Orders: Orders are applied hierarchically. Results are sorted by the first
ordering, then entries equal under the first ordering are sorted with the
second ordering, etc.
* Limits & Offset: Limits and offsets are applied after everything else.
*/
type Query struct {
Prefix string // namespaces the query to results whose keys have Prefix
Filters []Filter // filter results. apply sequentially
Orders []Order // order results. apply hierarchically
Limit int // maximum number of results
Offset int // skip given number of results
KeysOnly bool // return only keys.
ReturnExpirations bool // return expirations (see TTLDatastore)
ReturnsSizes bool // always return sizes. If not set, datastore impl can return
// // it anyway if it doesn't involve a performance cost. If KeysOnly
// // is not set, Size should always be set.
}
// String returns a string representation of the Query for debugging/validation
// purposes. Do not use it for SQL queries.
func (q Query) String() string {
s := "SELECT keys"
if !q.KeysOnly {
s += ",vals"
}
if q.ReturnExpirations {
s += ",exps"
}
s += " "
if q.Prefix != "" {
s += fmt.Sprintf("FROM %q ", q.Prefix)
}
if len(q.Filters) > 0 {
s += fmt.Sprintf("FILTER [%s", q.Filters[0])
for _, f := range q.Filters[1:] {
s += fmt.Sprintf(", %s", f)
}
s += "] "
}
if len(q.Orders) > 0 {
s += fmt.Sprintf("ORDER [%s", q.Orders[0])
for _, f := range q.Orders[1:] {
s += fmt.Sprintf(", %s", f)
}
s += "] "
}
if q.Offset > 0 {
s += fmt.Sprintf("OFFSET %d ", q.Offset)
}
if q.Limit > 0 {
s += fmt.Sprintf("LIMIT %d ", q.Limit)
}
// Will always end with a space, strip it.
return s[:len(s)-1]
}
// Entry is a query result entry.
type Entry struct {
Key string // cant be ds.Key because circular imports ...!!!
Value []byte // Will be nil if KeysOnly has been passed.
Expiration time.Time // Entry expiration timestamp if requested and supported (see TTLDatastore).
Size int // Might be -1 if the datastore doesn't support listing the size with KeysOnly
// // or if ReturnsSizes is not set
}
// Result is a special entry that includes an error, so that the client
// may be warned about internal errors. If Error is non-nil, Entry must be
// empty.
type Result struct {
Entry
Error error
}
// Results is a set of Query results. This is the interface for clients.
// Example:
//
// qr, _ := myds.Query(q)
// for r := range qr.Next() {
// if r.Error != nil {
// // handle.
// break
// }
//
// fmt.Println(r.Entry.Key, r.Entry.Value)
// }
//
// or, wait on all results at once:
//
// qr, _ := myds.Query(q)
// es, _ := qr.Rest()
// for _, e := range es {
// fmt.Println(e.Key, e.Value)
// }
//
type Results interface {
Query() Query // the query these Results correspond to
Next() <-chan Result // returns a channel to wait for the next result
NextSync() (Result, bool) // blocks and waits to return the next result, second parameter returns false when results are exhausted
Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once.
Close() error // client may call Close to signal early exit
// Process returns a goprocess.Process associated with these results.
// most users will not need this function (Close is all they want),
// but it's here in case you want to connect the results to other
// goprocess-friendly things.
Process() goprocess.Process
}
// results implements Results
type results struct {
query Query
proc goprocess.Process
res <-chan Result
}
func (r *results) Next() <-chan Result {
return r.res
}
func (r *results) NextSync() (Result, bool) {
val, ok := <-r.res
return val, ok
}
func (r *results) Rest() ([]Entry, error) {
var es []Entry
for e := range r.res {
if e.Error != nil {
return es, e.Error
}
es = append(es, e.Entry)
}
<-r.proc.Closed() // wait till the processing finishes.
return es, nil
}
func (r *results) Process() goprocess.Process {
return r.proc
}
func (r *results) Close() error {
return r.proc.Close()
}
func (r *results) Query() Query {
return r.query
}
// ResultBuilder is what implementors use to construct results
// Implementors of datastores and their clients must respect the
// Process of the Request:
//
// * clients must call r.Process().Close() on an early exit, so
// implementations can reclaim resources.
// * if the Entries are read to completion (channel closed), Process
// should be closed automatically.
// * datastores must respect <-Process.Closing(), which intermediates
// an early close signal from the client.
//
type ResultBuilder struct {
Query Query
Process goprocess.Process
Output chan Result
}
// Results returns a Results to to this builder.
func (rb *ResultBuilder) Results() Results {
return &results{
query: rb.Query,
proc: rb.Process,
res: rb.Output,
}
}
const NormalBufSize = 1
const KeysOnlyBufSize = 128
func NewResultBuilder(q Query) *ResultBuilder {
bufSize := NormalBufSize
if q.KeysOnly {
bufSize = KeysOnlyBufSize
}
b := &ResultBuilder{
Query: q,
Output: make(chan Result, bufSize),
}
b.Process = goprocess.WithTeardown(func() error {
close(b.Output)
return nil
})
return b
}
// ResultsWithChan returns a Results object from a channel
// of Result entries.
//
// DEPRECATED: This iterator is impossible to cancel correctly. Canceling it
// will leave anything trying to write to the result channel hanging.
func ResultsWithChan(q Query, res <-chan Result) Results {
return ResultsWithProcess(q, func(worker goprocess.Process, out chan<- Result) {
for {
select {
case <-worker.Closing(): // client told us to close early
return
case e, more := <-res:
if !more {
return
}
select {
case out <- e:
case <-worker.Closing(): // client told us to close early
return
}
}
}
})
}
// ResultsWithProcess returns a Results object with the results generated by the
// passed subprocess.
func ResultsWithProcess(q Query, proc func(goprocess.Process, chan<- Result)) Results {
b := NewResultBuilder(q)
// go consume all the entries and add them to the results.
b.Process.Go(func(worker goprocess.Process) {
proc(worker, b.Output)
})
go b.Process.CloseAfterChildren() //nolint
return b.Results()
}
// ResultsWithEntries returns a Results object from a list of entries
func ResultsWithEntries(q Query, res []Entry) Results {
i := 0
return ResultsFromIterator(q, Iterator{
Next: func() (Result, bool) {
if i >= len(res) {
return Result{}, false
}
next := res[i]
i++
return Result{Entry: next}, true
},
})
}
func ResultsReplaceQuery(r Results, q Query) Results {
switch r := r.(type) {
case *results:
// note: not using field names to make sure all fields are copied
return &results{q, r.proc, r.res}
case *resultsIter:
// note: not using field names to make sure all fields are copied
lr := r.legacyResults
if lr != nil {
lr = &results{q, lr.proc, lr.res}
}
return &resultsIter{q, r.next, r.close, lr}
default:
panic("unknown results type")
}
}
//
// ResultFromIterator provides an alternative way to to construct
// results without the use of channels.
//
func ResultsFromIterator(q Query, iter Iterator) Results {
if iter.Close == nil {
iter.Close = noopClose
}
return &resultsIter{
query: q,
next: iter.Next,
close: iter.Close,
}
}
func noopClose() error {
return nil
}
type Iterator struct {
Next func() (Result, bool)
Close func() error // note: might be called more than once
}
type resultsIter struct {
query Query
next func() (Result, bool)
close func() error
legacyResults *results
}
func (r *resultsIter) Next() <-chan Result {
r.useLegacyResults()
return r.legacyResults.Next()
}
func (r *resultsIter) NextSync() (Result, bool) {
if r.legacyResults != nil {
return r.legacyResults.NextSync()
} else {
res, ok := r.next()
if !ok {
r.close()
}
return res, ok
}
}
func (r *resultsIter) Rest() ([]Entry, error) {
var es []Entry
for {
e, ok := r.NextSync()
if !ok {
break
}
if e.Error != nil {
return es, e.Error
}
es = append(es, e.Entry)
}
return es, nil
}
func (r *resultsIter) Process() goprocess.Process {
r.useLegacyResults()
return r.legacyResults.Process()
}
func (r *resultsIter) Close() error {
if r.legacyResults != nil {
return r.legacyResults.Close()
} else {
return r.close()
}
}
func (r *resultsIter) Query() Query {
return r.query
}
func (r *resultsIter) useLegacyResults() {
if r.legacyResults != nil {
return
}
b := NewResultBuilder(r.query)
// go consume all the entries and add them to the results.
b.Process.Go(func(worker goprocess.Process) {
defer r.close()
for {
e, ok := r.next()
if !ok {
break
}
select {
case b.Output <- e:
case <-worker.Closing(): // client told us to close early
return
}
}
})
go b.Process.CloseAfterChildren() //nolint
r.legacyResults = b.Results().(*results)
}

158
vendor/github.com/ipfs/go-datastore/query/query_impl.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package query
import (
"path"
goprocess "github.com/jbenet/goprocess"
)
// NaiveFilter applies a filter to the results.
func NaiveFilter(qr Results, filter Filter) Results {
return ResultsFromIterator(qr.Query(), Iterator{
Next: func() (Result, bool) {
for {
e, ok := qr.NextSync()
if !ok {
return Result{}, false
}
if e.Error != nil || filter.Filter(e.Entry) {
return e, true
}
}
},
Close: func() error {
return qr.Close()
},
})
}
// NaiveLimit truncates the results to a given int limit
func NaiveLimit(qr Results, limit int) Results {
if limit == 0 {
// 0 means no limit
return qr
}
closed := false
return ResultsFromIterator(qr.Query(), Iterator{
Next: func() (Result, bool) {
if limit == 0 {
if !closed {
closed = true
err := qr.Close()
if err != nil {
return Result{Error: err}, true
}
}
return Result{}, false
}
limit--
return qr.NextSync()
},
Close: func() error {
if closed {
return nil
}
closed = true
return qr.Close()
},
})
}
// NaiveOffset skips a given number of results
func NaiveOffset(qr Results, offset int) Results {
return ResultsFromIterator(qr.Query(), Iterator{
Next: func() (Result, bool) {
for ; offset > 0; offset-- {
res, ok := qr.NextSync()
if !ok || res.Error != nil {
return res, ok
}
}
return qr.NextSync()
},
Close: func() error {
return qr.Close()
},
})
}
// NaiveOrder reorders results according to given orders.
// WARNING: this is the only non-stream friendly operation!
func NaiveOrder(qr Results, orders ...Order) Results {
// Short circuit.
if len(orders) == 0 {
return qr
}
return ResultsWithProcess(qr.Query(), func(worker goprocess.Process, out chan<- Result) {
defer qr.Close()
var entries []Entry
collect:
for {
select {
case <-worker.Closing():
return
case e, ok := <-qr.Next():
if !ok {
break collect
}
if e.Error != nil {
out <- e
continue
}
entries = append(entries, e.Entry)
}
}
Sort(orders, entries)
for _, e := range entries {
select {
case <-worker.Closing():
return
case out <- Result{Entry: e}:
}
}
})
}
func NaiveQueryApply(q Query, qr Results) Results {
if q.Prefix != "" {
// Clean the prefix as a key and append / so a prefix of /bar
// only finds /bar/baz, not /barbaz.
prefix := q.Prefix
if len(prefix) == 0 {
prefix = "/"
} else {
if prefix[0] != '/' {
prefix = "/" + prefix
}
prefix = path.Clean(prefix)
}
// If the prefix is empty, ignore it.
if prefix != "/" {
qr = NaiveFilter(qr, FilterKeyPrefix{prefix + "/"})
}
}
for _, f := range q.Filters {
qr = NaiveFilter(qr, f)
}
if len(q.Orders) > 0 {
qr = NaiveOrder(qr, q.Orders...)
}
if q.Offset != 0 {
qr = NaiveOffset(qr, q.Offset)
}
if q.Limit != 0 {
qr = NaiveLimit(qr, q.Limit)
}
return qr
}
func ResultEntriesFrom(keys []string, vals [][]byte) []Entry {
re := make([]Entry, len(keys))
for i, k := range keys {
re[i] = Entry{Key: k, Size: len(vals[i]), Value: vals[i]}
}
return re
}

185
vendor/github.com/ipfs/go-datastore/sync/sync.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
package sync
import (
"context"
"sync"
ds "github.com/ipfs/go-datastore"
dsq "github.com/ipfs/go-datastore/query"
)
// MutexDatastore contains a child datastore and a mutex.
// used for coarse sync
type MutexDatastore struct {
sync.RWMutex
child ds.Datastore
}
var _ ds.Datastore = (*MutexDatastore)(nil)
var _ ds.Batching = (*MutexDatastore)(nil)
var _ ds.Shim = (*MutexDatastore)(nil)
var _ ds.PersistentDatastore = (*MutexDatastore)(nil)
var _ ds.CheckedDatastore = (*MutexDatastore)(nil)
var _ ds.ScrubbedDatastore = (*MutexDatastore)(nil)
var _ ds.GCDatastore = (*MutexDatastore)(nil)
// MutexWrap constructs a datastore with a coarse lock around the entire
// datastore, for every single operation.
func MutexWrap(d ds.Datastore) *MutexDatastore {
return &MutexDatastore{child: d}
}
// Children implements Shim
func (d *MutexDatastore) Children() []ds.Datastore {
return []ds.Datastore{d.child}
}
// Put implements Datastore.Put
func (d *MutexDatastore) Put(ctx context.Context, key ds.Key, value []byte) (err error) {
d.Lock()
defer d.Unlock()
return d.child.Put(ctx, key, value)
}
// Sync implements Datastore.Sync
func (d *MutexDatastore) Sync(ctx context.Context, prefix ds.Key) error {
d.Lock()
defer d.Unlock()
return d.child.Sync(ctx, prefix)
}
// Get implements Datastore.Get
func (d *MutexDatastore) Get(ctx context.Context, key ds.Key) (value []byte, err error) {
d.RLock()
defer d.RUnlock()
return d.child.Get(ctx, key)
}
// Has implements Datastore.Has
func (d *MutexDatastore) Has(ctx context.Context, key ds.Key) (exists bool, err error) {
d.RLock()
defer d.RUnlock()
return d.child.Has(ctx, key)
}
// GetSize implements Datastore.GetSize
func (d *MutexDatastore) GetSize(ctx context.Context, key ds.Key) (size int, err error) {
d.RLock()
defer d.RUnlock()
return d.child.GetSize(ctx, key)
}
// Delete implements Datastore.Delete
func (d *MutexDatastore) Delete(ctx context.Context, key ds.Key) (err error) {
d.Lock()
defer d.Unlock()
return d.child.Delete(ctx, key)
}
// Query implements Datastore.Query
func (d *MutexDatastore) Query(ctx context.Context, q dsq.Query) (dsq.Results, error) {
d.RLock()
defer d.RUnlock()
// Apply the entire query while locked. Non-sync datastores may not
// allow concurrent queries.
results, err := d.child.Query(ctx, q)
if err != nil {
return nil, err
}
entries, err1 := results.Rest()
err2 := results.Close()
switch {
case err1 != nil:
return nil, err1
case err2 != nil:
return nil, err2
}
return dsq.ResultsWithEntries(q, entries), nil
}
func (d *MutexDatastore) Batch(ctx context.Context) (ds.Batch, error) {
d.RLock()
defer d.RUnlock()
bds, ok := d.child.(ds.Batching)
if !ok {
return nil, ds.ErrBatchUnsupported
}
b, err := bds.Batch(ctx)
if err != nil {
return nil, err
}
return &syncBatch{
batch: b,
mds: d,
}, nil
}
func (d *MutexDatastore) Close() error {
d.RWMutex.Lock()
defer d.RWMutex.Unlock()
return d.child.Close()
}
// DiskUsage implements the PersistentDatastore interface.
func (d *MutexDatastore) DiskUsage(ctx context.Context) (uint64, error) {
d.RLock()
defer d.RUnlock()
return ds.DiskUsage(ctx, d.child)
}
type syncBatch struct {
batch ds.Batch
mds *MutexDatastore
}
var _ ds.Batch = (*syncBatch)(nil)
func (b *syncBatch) Put(ctx context.Context, key ds.Key, val []byte) error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Put(ctx, key, val)
}
func (b *syncBatch) Delete(ctx context.Context, key ds.Key) error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Delete(ctx, key)
}
func (b *syncBatch) Commit(ctx context.Context) error {
b.mds.Lock()
defer b.mds.Unlock()
return b.batch.Commit(ctx)
}
func (d *MutexDatastore) Check(ctx context.Context) error {
if c, ok := d.child.(ds.CheckedDatastore); ok {
d.RWMutex.Lock()
defer d.RWMutex.Unlock()
return c.Check(ctx)
}
return nil
}
func (d *MutexDatastore) Scrub(ctx context.Context) error {
if c, ok := d.child.(ds.ScrubbedDatastore); ok {
d.RWMutex.Lock()
defer d.RWMutex.Unlock()
return c.Scrub(ctx)
}
return nil
}
func (d *MutexDatastore) CollectGarbage(ctx context.Context) error {
if c, ok := d.child.(ds.GCDatastore); ok {
d.RWMutex.Lock()
defer d.RWMutex.Unlock()
return c.CollectGarbage(ctx)
}
return nil
}

3
vendor/github.com/ipfs/go-datastore/version.json generated vendored Normal file
View File

@@ -0,0 +1,3 @@
{
"version": "v0.6.0"
}