Integrate BACKBEAT SDK and resolve KACHING license validation
Major integrations and fixes: - Added BACKBEAT SDK integration for P2P operation timing - Implemented beat-aware status tracking for distributed operations - Added Docker secrets support for secure license management - Resolved KACHING license validation via HTTPS/TLS - Updated docker-compose configuration for clean stack deployment - Disabled rollback policies to prevent deployment failures - Added license credential storage (CHORUS-DEV-MULTI-001) Technical improvements: - BACKBEAT P2P operation tracking with phase management - Enhanced configuration system with file-based secrets - Improved error handling for license validation - Clean separation of KACHING and CHORUS deployment stacks 🤖 Generated with [Claude Code](https://claude.ai/code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
102
vendor/github.com/ipfs/go-datastore/query/filter.go
generated
vendored
Normal file
102
vendor/github.com/ipfs/go-datastore/query/filter.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Filter is an object that tests ResultEntries
|
||||
type Filter interface {
|
||||
// Filter returns whether an entry passes the filter
|
||||
Filter(e Entry) bool
|
||||
}
|
||||
|
||||
// Op is a comparison operator
|
||||
type Op string
|
||||
|
||||
var (
|
||||
Equal = Op("==")
|
||||
NotEqual = Op("!=")
|
||||
GreaterThan = Op(">")
|
||||
GreaterThanOrEqual = Op(">=")
|
||||
LessThan = Op("<")
|
||||
LessThanOrEqual = Op("<=")
|
||||
)
|
||||
|
||||
// FilterValueCompare is used to signal to datastores they
|
||||
// should apply internal comparisons. unfortunately, there
|
||||
// is no way to apply comparisons* to interface{} types in
|
||||
// Go, so if the datastore doesnt have a special way to
|
||||
// handle these comparisons, you must provided the
|
||||
// TypedFilter to actually do filtering.
|
||||
//
|
||||
// [*] other than == and !=, which use reflect.DeepEqual.
|
||||
type FilterValueCompare struct {
|
||||
Op Op
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (f FilterValueCompare) Filter(e Entry) bool {
|
||||
cmp := bytes.Compare(e.Value, f.Value)
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return cmp == 0
|
||||
case NotEqual:
|
||||
return cmp != 0
|
||||
case LessThan:
|
||||
return cmp < 0
|
||||
case LessThanOrEqual:
|
||||
return cmp <= 0
|
||||
case GreaterThan:
|
||||
return cmp > 0
|
||||
case GreaterThanOrEqual:
|
||||
return cmp >= 0
|
||||
default:
|
||||
panic(fmt.Errorf("unknown operation: %s", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
func (f FilterValueCompare) String() string {
|
||||
return fmt.Sprintf("VALUE %s %q", f.Op, string(f.Value))
|
||||
}
|
||||
|
||||
type FilterKeyCompare struct {
|
||||
Op Op
|
||||
Key string
|
||||
}
|
||||
|
||||
func (f FilterKeyCompare) Filter(e Entry) bool {
|
||||
switch f.Op {
|
||||
case Equal:
|
||||
return e.Key == f.Key
|
||||
case NotEqual:
|
||||
return e.Key != f.Key
|
||||
case GreaterThan:
|
||||
return e.Key > f.Key
|
||||
case GreaterThanOrEqual:
|
||||
return e.Key >= f.Key
|
||||
case LessThan:
|
||||
return e.Key < f.Key
|
||||
case LessThanOrEqual:
|
||||
return e.Key <= f.Key
|
||||
default:
|
||||
panic(fmt.Errorf("unknown op '%s'", f.Op))
|
||||
}
|
||||
}
|
||||
|
||||
func (f FilterKeyCompare) String() string {
|
||||
return fmt.Sprintf("KEY %s %q", f.Op, f.Key)
|
||||
}
|
||||
|
||||
type FilterKeyPrefix struct {
|
||||
Prefix string
|
||||
}
|
||||
|
||||
func (f FilterKeyPrefix) Filter(e Entry) bool {
|
||||
return strings.HasPrefix(e.Key, f.Prefix)
|
||||
}
|
||||
|
||||
func (f FilterKeyPrefix) String() string {
|
||||
return fmt.Sprintf("PREFIX(%q)", f.Prefix)
|
||||
}
|
||||
94
vendor/github.com/ipfs/go-datastore/query/order.go
generated
vendored
Normal file
94
vendor/github.com/ipfs/go-datastore/query/order.go
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Order is an object used to order objects
|
||||
type Order interface {
|
||||
Compare(a, b Entry) int
|
||||
}
|
||||
|
||||
// OrderByFunction orders the results based on the result of the given function.
|
||||
type OrderByFunction func(a, b Entry) int
|
||||
|
||||
func (o OrderByFunction) Compare(a, b Entry) int {
|
||||
return o(a, b)
|
||||
}
|
||||
|
||||
func (OrderByFunction) String() string {
|
||||
return "FN"
|
||||
}
|
||||
|
||||
// OrderByValue is used to signal to datastores they should apply internal
|
||||
// orderings.
|
||||
type OrderByValue struct{}
|
||||
|
||||
func (o OrderByValue) Compare(a, b Entry) int {
|
||||
return bytes.Compare(a.Value, b.Value)
|
||||
}
|
||||
|
||||
func (OrderByValue) String() string {
|
||||
return "VALUE"
|
||||
}
|
||||
|
||||
// OrderByValueDescending is used to signal to datastores they
|
||||
// should apply internal orderings.
|
||||
type OrderByValueDescending struct{}
|
||||
|
||||
func (o OrderByValueDescending) Compare(a, b Entry) int {
|
||||
return -bytes.Compare(a.Value, b.Value)
|
||||
}
|
||||
|
||||
func (OrderByValueDescending) String() string {
|
||||
return "desc(VALUE)"
|
||||
}
|
||||
|
||||
// OrderByKey
|
||||
type OrderByKey struct{}
|
||||
|
||||
func (o OrderByKey) Compare(a, b Entry) int {
|
||||
return strings.Compare(a.Key, b.Key)
|
||||
}
|
||||
|
||||
func (OrderByKey) String() string {
|
||||
return "KEY"
|
||||
}
|
||||
|
||||
// OrderByKeyDescending
|
||||
type OrderByKeyDescending struct{}
|
||||
|
||||
func (o OrderByKeyDescending) Compare(a, b Entry) int {
|
||||
return -strings.Compare(a.Key, b.Key)
|
||||
}
|
||||
|
||||
func (OrderByKeyDescending) String() string {
|
||||
return "desc(KEY)"
|
||||
}
|
||||
|
||||
// Less returns true if a comes before b with the requested orderings.
|
||||
func Less(orders []Order, a, b Entry) bool {
|
||||
for _, cmp := range orders {
|
||||
switch cmp.Compare(a, b) {
|
||||
case 0:
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// This gives us a *stable* sort for free. We don't care
|
||||
// preserving the order from the underlying datastore
|
||||
// because it's undefined.
|
||||
return a.Key < b.Key
|
||||
}
|
||||
|
||||
// Sort sorts the given entries using the given orders.
|
||||
func Sort(orders []Order, entries []Entry) {
|
||||
sort.Slice(entries, func(i int, j int) bool {
|
||||
return Less(orders, entries[i], entries[j])
|
||||
})
|
||||
}
|
||||
426
vendor/github.com/ipfs/go-datastore/query/query.go
generated
vendored
Normal file
426
vendor/github.com/ipfs/go-datastore/query/query.go
generated
vendored
Normal file
@@ -0,0 +1,426 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
)
|
||||
|
||||
/*
|
||||
Query represents storage for any key-value pair.
|
||||
|
||||
tl;dr:
|
||||
|
||||
queries are supported across datastores.
|
||||
Cheap on top of relational dbs, and expensive otherwise.
|
||||
Pick the right tool for the job!
|
||||
|
||||
In addition to the key-value store get and set semantics, datastore
|
||||
provides an interface to retrieve multiple records at a time through
|
||||
the use of queries. The datastore Query model gleans a common set of
|
||||
operations performed when querying. To avoid pasting here years of
|
||||
database research, let’s summarize the operations datastore supports.
|
||||
|
||||
Query Operations, applied in-order:
|
||||
|
||||
* prefix - scope the query to a given path prefix
|
||||
* filters - select a subset of values by applying constraints
|
||||
* orders - sort the results by applying sort conditions, hierarchically.
|
||||
* offset - skip a number of results (for efficient pagination)
|
||||
* limit - impose a numeric limit on the number of results
|
||||
|
||||
Datastore combines these operations into a simple Query class that allows
|
||||
applications to define their constraints in a simple, generic, way without
|
||||
introducing datastore specific calls, languages, etc.
|
||||
|
||||
However, take heed: not all datastores support efficiently performing these
|
||||
operations. Pick a datastore based on your needs. If you need efficient look-ups,
|
||||
go for a simple key/value store. If you need efficient queries, consider an SQL
|
||||
backed datastore.
|
||||
|
||||
Notes:
|
||||
|
||||
* Prefix: When a query filters by prefix, it selects keys that are strict
|
||||
children of the prefix. For example, a prefix "/foo" would select "/foo/bar"
|
||||
but not "/foobar" or "/foo",
|
||||
* Orders: Orders are applied hierarchically. Results are sorted by the first
|
||||
ordering, then entries equal under the first ordering are sorted with the
|
||||
second ordering, etc.
|
||||
* Limits & Offset: Limits and offsets are applied after everything else.
|
||||
*/
|
||||
type Query struct {
|
||||
Prefix string // namespaces the query to results whose keys have Prefix
|
||||
Filters []Filter // filter results. apply sequentially
|
||||
Orders []Order // order results. apply hierarchically
|
||||
Limit int // maximum number of results
|
||||
Offset int // skip given number of results
|
||||
KeysOnly bool // return only keys.
|
||||
ReturnExpirations bool // return expirations (see TTLDatastore)
|
||||
ReturnsSizes bool // always return sizes. If not set, datastore impl can return
|
||||
// // it anyway if it doesn't involve a performance cost. If KeysOnly
|
||||
// // is not set, Size should always be set.
|
||||
}
|
||||
|
||||
// String returns a string representation of the Query for debugging/validation
|
||||
// purposes. Do not use it for SQL queries.
|
||||
func (q Query) String() string {
|
||||
s := "SELECT keys"
|
||||
if !q.KeysOnly {
|
||||
s += ",vals"
|
||||
}
|
||||
if q.ReturnExpirations {
|
||||
s += ",exps"
|
||||
}
|
||||
|
||||
s += " "
|
||||
|
||||
if q.Prefix != "" {
|
||||
s += fmt.Sprintf("FROM %q ", q.Prefix)
|
||||
}
|
||||
|
||||
if len(q.Filters) > 0 {
|
||||
s += fmt.Sprintf("FILTER [%s", q.Filters[0])
|
||||
for _, f := range q.Filters[1:] {
|
||||
s += fmt.Sprintf(", %s", f)
|
||||
}
|
||||
s += "] "
|
||||
}
|
||||
|
||||
if len(q.Orders) > 0 {
|
||||
s += fmt.Sprintf("ORDER [%s", q.Orders[0])
|
||||
for _, f := range q.Orders[1:] {
|
||||
s += fmt.Sprintf(", %s", f)
|
||||
}
|
||||
s += "] "
|
||||
}
|
||||
|
||||
if q.Offset > 0 {
|
||||
s += fmt.Sprintf("OFFSET %d ", q.Offset)
|
||||
}
|
||||
|
||||
if q.Limit > 0 {
|
||||
s += fmt.Sprintf("LIMIT %d ", q.Limit)
|
||||
}
|
||||
// Will always end with a space, strip it.
|
||||
return s[:len(s)-1]
|
||||
}
|
||||
|
||||
// Entry is a query result entry.
|
||||
type Entry struct {
|
||||
Key string // cant be ds.Key because circular imports ...!!!
|
||||
Value []byte // Will be nil if KeysOnly has been passed.
|
||||
Expiration time.Time // Entry expiration timestamp if requested and supported (see TTLDatastore).
|
||||
Size int // Might be -1 if the datastore doesn't support listing the size with KeysOnly
|
||||
// // or if ReturnsSizes is not set
|
||||
}
|
||||
|
||||
// Result is a special entry that includes an error, so that the client
|
||||
// may be warned about internal errors. If Error is non-nil, Entry must be
|
||||
// empty.
|
||||
type Result struct {
|
||||
Entry
|
||||
|
||||
Error error
|
||||
}
|
||||
|
||||
// Results is a set of Query results. This is the interface for clients.
|
||||
// Example:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// for r := range qr.Next() {
|
||||
// if r.Error != nil {
|
||||
// // handle.
|
||||
// break
|
||||
// }
|
||||
//
|
||||
// fmt.Println(r.Entry.Key, r.Entry.Value)
|
||||
// }
|
||||
//
|
||||
// or, wait on all results at once:
|
||||
//
|
||||
// qr, _ := myds.Query(q)
|
||||
// es, _ := qr.Rest()
|
||||
// for _, e := range es {
|
||||
// fmt.Println(e.Key, e.Value)
|
||||
// }
|
||||
//
|
||||
type Results interface {
|
||||
Query() Query // the query these Results correspond to
|
||||
Next() <-chan Result // returns a channel to wait for the next result
|
||||
NextSync() (Result, bool) // blocks and waits to return the next result, second parameter returns false when results are exhausted
|
||||
Rest() ([]Entry, error) // waits till processing finishes, returns all entries at once.
|
||||
Close() error // client may call Close to signal early exit
|
||||
|
||||
// Process returns a goprocess.Process associated with these results.
|
||||
// most users will not need this function (Close is all they want),
|
||||
// but it's here in case you want to connect the results to other
|
||||
// goprocess-friendly things.
|
||||
Process() goprocess.Process
|
||||
}
|
||||
|
||||
// results implements Results
|
||||
type results struct {
|
||||
query Query
|
||||
proc goprocess.Process
|
||||
res <-chan Result
|
||||
}
|
||||
|
||||
func (r *results) Next() <-chan Result {
|
||||
return r.res
|
||||
}
|
||||
|
||||
func (r *results) NextSync() (Result, bool) {
|
||||
val, ok := <-r.res
|
||||
return val, ok
|
||||
}
|
||||
|
||||
func (r *results) Rest() ([]Entry, error) {
|
||||
var es []Entry
|
||||
for e := range r.res {
|
||||
if e.Error != nil {
|
||||
return es, e.Error
|
||||
}
|
||||
es = append(es, e.Entry)
|
||||
}
|
||||
<-r.proc.Closed() // wait till the processing finishes.
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (r *results) Process() goprocess.Process {
|
||||
return r.proc
|
||||
}
|
||||
|
||||
func (r *results) Close() error {
|
||||
return r.proc.Close()
|
||||
}
|
||||
|
||||
func (r *results) Query() Query {
|
||||
return r.query
|
||||
}
|
||||
|
||||
// ResultBuilder is what implementors use to construct results
|
||||
// Implementors of datastores and their clients must respect the
|
||||
// Process of the Request:
|
||||
//
|
||||
// * clients must call r.Process().Close() on an early exit, so
|
||||
// implementations can reclaim resources.
|
||||
// * if the Entries are read to completion (channel closed), Process
|
||||
// should be closed automatically.
|
||||
// * datastores must respect <-Process.Closing(), which intermediates
|
||||
// an early close signal from the client.
|
||||
//
|
||||
type ResultBuilder struct {
|
||||
Query Query
|
||||
Process goprocess.Process
|
||||
Output chan Result
|
||||
}
|
||||
|
||||
// Results returns a Results to to this builder.
|
||||
func (rb *ResultBuilder) Results() Results {
|
||||
return &results{
|
||||
query: rb.Query,
|
||||
proc: rb.Process,
|
||||
res: rb.Output,
|
||||
}
|
||||
}
|
||||
|
||||
const NormalBufSize = 1
|
||||
const KeysOnlyBufSize = 128
|
||||
|
||||
func NewResultBuilder(q Query) *ResultBuilder {
|
||||
bufSize := NormalBufSize
|
||||
if q.KeysOnly {
|
||||
bufSize = KeysOnlyBufSize
|
||||
}
|
||||
b := &ResultBuilder{
|
||||
Query: q,
|
||||
Output: make(chan Result, bufSize),
|
||||
}
|
||||
b.Process = goprocess.WithTeardown(func() error {
|
||||
close(b.Output)
|
||||
return nil
|
||||
})
|
||||
return b
|
||||
}
|
||||
|
||||
// ResultsWithChan returns a Results object from a channel
|
||||
// of Result entries.
|
||||
//
|
||||
// DEPRECATED: This iterator is impossible to cancel correctly. Canceling it
|
||||
// will leave anything trying to write to the result channel hanging.
|
||||
func ResultsWithChan(q Query, res <-chan Result) Results {
|
||||
return ResultsWithProcess(q, func(worker goprocess.Process, out chan<- Result) {
|
||||
for {
|
||||
select {
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
case e, more := <-res:
|
||||
if !more {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case out <- e:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// ResultsWithProcess returns a Results object with the results generated by the
|
||||
// passed subprocess.
|
||||
func ResultsWithProcess(q Query, proc func(goprocess.Process, chan<- Result)) Results {
|
||||
b := NewResultBuilder(q)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
proc(worker, b.Output)
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren() //nolint
|
||||
return b.Results()
|
||||
}
|
||||
|
||||
// ResultsWithEntries returns a Results object from a list of entries
|
||||
func ResultsWithEntries(q Query, res []Entry) Results {
|
||||
i := 0
|
||||
return ResultsFromIterator(q, Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
if i >= len(res) {
|
||||
return Result{}, false
|
||||
}
|
||||
next := res[i]
|
||||
i++
|
||||
return Result{Entry: next}, true
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func ResultsReplaceQuery(r Results, q Query) Results {
|
||||
switch r := r.(type) {
|
||||
case *results:
|
||||
// note: not using field names to make sure all fields are copied
|
||||
return &results{q, r.proc, r.res}
|
||||
case *resultsIter:
|
||||
// note: not using field names to make sure all fields are copied
|
||||
lr := r.legacyResults
|
||||
if lr != nil {
|
||||
lr = &results{q, lr.proc, lr.res}
|
||||
}
|
||||
return &resultsIter{q, r.next, r.close, lr}
|
||||
default:
|
||||
panic("unknown results type")
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// ResultFromIterator provides an alternative way to to construct
|
||||
// results without the use of channels.
|
||||
//
|
||||
|
||||
func ResultsFromIterator(q Query, iter Iterator) Results {
|
||||
if iter.Close == nil {
|
||||
iter.Close = noopClose
|
||||
}
|
||||
return &resultsIter{
|
||||
query: q,
|
||||
next: iter.Next,
|
||||
close: iter.Close,
|
||||
}
|
||||
}
|
||||
|
||||
func noopClose() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type Iterator struct {
|
||||
Next func() (Result, bool)
|
||||
Close func() error // note: might be called more than once
|
||||
}
|
||||
|
||||
type resultsIter struct {
|
||||
query Query
|
||||
next func() (Result, bool)
|
||||
close func() error
|
||||
legacyResults *results
|
||||
}
|
||||
|
||||
func (r *resultsIter) Next() <-chan Result {
|
||||
r.useLegacyResults()
|
||||
return r.legacyResults.Next()
|
||||
}
|
||||
|
||||
func (r *resultsIter) NextSync() (Result, bool) {
|
||||
if r.legacyResults != nil {
|
||||
return r.legacyResults.NextSync()
|
||||
} else {
|
||||
res, ok := r.next()
|
||||
if !ok {
|
||||
r.close()
|
||||
}
|
||||
return res, ok
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resultsIter) Rest() ([]Entry, error) {
|
||||
var es []Entry
|
||||
for {
|
||||
e, ok := r.NextSync()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if e.Error != nil {
|
||||
return es, e.Error
|
||||
}
|
||||
es = append(es, e.Entry)
|
||||
}
|
||||
return es, nil
|
||||
}
|
||||
|
||||
func (r *resultsIter) Process() goprocess.Process {
|
||||
r.useLegacyResults()
|
||||
return r.legacyResults.Process()
|
||||
}
|
||||
|
||||
func (r *resultsIter) Close() error {
|
||||
if r.legacyResults != nil {
|
||||
return r.legacyResults.Close()
|
||||
} else {
|
||||
return r.close()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *resultsIter) Query() Query {
|
||||
return r.query
|
||||
}
|
||||
|
||||
func (r *resultsIter) useLegacyResults() {
|
||||
if r.legacyResults != nil {
|
||||
return
|
||||
}
|
||||
|
||||
b := NewResultBuilder(r.query)
|
||||
|
||||
// go consume all the entries and add them to the results.
|
||||
b.Process.Go(func(worker goprocess.Process) {
|
||||
defer r.close()
|
||||
for {
|
||||
e, ok := r.next()
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case b.Output <- e:
|
||||
case <-worker.Closing(): // client told us to close early
|
||||
return
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
go b.Process.CloseAfterChildren() //nolint
|
||||
|
||||
r.legacyResults = b.Results().(*results)
|
||||
}
|
||||
158
vendor/github.com/ipfs/go-datastore/query/query_impl.go
generated
vendored
Normal file
158
vendor/github.com/ipfs/go-datastore/query/query_impl.go
generated
vendored
Normal file
@@ -0,0 +1,158 @@
|
||||
package query
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
goprocess "github.com/jbenet/goprocess"
|
||||
)
|
||||
|
||||
// NaiveFilter applies a filter to the results.
|
||||
func NaiveFilter(qr Results, filter Filter) Results {
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
for {
|
||||
e, ok := qr.NextSync()
|
||||
if !ok {
|
||||
return Result{}, false
|
||||
}
|
||||
if e.Error != nil || filter.Filter(e.Entry) {
|
||||
return e, true
|
||||
}
|
||||
}
|
||||
},
|
||||
Close: func() error {
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveLimit truncates the results to a given int limit
|
||||
func NaiveLimit(qr Results, limit int) Results {
|
||||
if limit == 0 {
|
||||
// 0 means no limit
|
||||
return qr
|
||||
}
|
||||
closed := false
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
if limit == 0 {
|
||||
if !closed {
|
||||
closed = true
|
||||
err := qr.Close()
|
||||
if err != nil {
|
||||
return Result{Error: err}, true
|
||||
}
|
||||
}
|
||||
return Result{}, false
|
||||
}
|
||||
limit--
|
||||
return qr.NextSync()
|
||||
},
|
||||
Close: func() error {
|
||||
if closed {
|
||||
return nil
|
||||
}
|
||||
closed = true
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveOffset skips a given number of results
|
||||
func NaiveOffset(qr Results, offset int) Results {
|
||||
return ResultsFromIterator(qr.Query(), Iterator{
|
||||
Next: func() (Result, bool) {
|
||||
for ; offset > 0; offset-- {
|
||||
res, ok := qr.NextSync()
|
||||
if !ok || res.Error != nil {
|
||||
return res, ok
|
||||
}
|
||||
}
|
||||
return qr.NextSync()
|
||||
},
|
||||
Close: func() error {
|
||||
return qr.Close()
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// NaiveOrder reorders results according to given orders.
|
||||
// WARNING: this is the only non-stream friendly operation!
|
||||
func NaiveOrder(qr Results, orders ...Order) Results {
|
||||
// Short circuit.
|
||||
if len(orders) == 0 {
|
||||
return qr
|
||||
}
|
||||
|
||||
return ResultsWithProcess(qr.Query(), func(worker goprocess.Process, out chan<- Result) {
|
||||
defer qr.Close()
|
||||
var entries []Entry
|
||||
collect:
|
||||
for {
|
||||
select {
|
||||
case <-worker.Closing():
|
||||
return
|
||||
case e, ok := <-qr.Next():
|
||||
if !ok {
|
||||
break collect
|
||||
}
|
||||
if e.Error != nil {
|
||||
out <- e
|
||||
continue
|
||||
}
|
||||
entries = append(entries, e.Entry)
|
||||
}
|
||||
}
|
||||
|
||||
Sort(orders, entries)
|
||||
|
||||
for _, e := range entries {
|
||||
select {
|
||||
case <-worker.Closing():
|
||||
return
|
||||
case out <- Result{Entry: e}:
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func NaiveQueryApply(q Query, qr Results) Results {
|
||||
if q.Prefix != "" {
|
||||
// Clean the prefix as a key and append / so a prefix of /bar
|
||||
// only finds /bar/baz, not /barbaz.
|
||||
prefix := q.Prefix
|
||||
if len(prefix) == 0 {
|
||||
prefix = "/"
|
||||
} else {
|
||||
if prefix[0] != '/' {
|
||||
prefix = "/" + prefix
|
||||
}
|
||||
prefix = path.Clean(prefix)
|
||||
}
|
||||
// If the prefix is empty, ignore it.
|
||||
if prefix != "/" {
|
||||
qr = NaiveFilter(qr, FilterKeyPrefix{prefix + "/"})
|
||||
}
|
||||
}
|
||||
for _, f := range q.Filters {
|
||||
qr = NaiveFilter(qr, f)
|
||||
}
|
||||
if len(q.Orders) > 0 {
|
||||
qr = NaiveOrder(qr, q.Orders...)
|
||||
}
|
||||
if q.Offset != 0 {
|
||||
qr = NaiveOffset(qr, q.Offset)
|
||||
}
|
||||
if q.Limit != 0 {
|
||||
qr = NaiveLimit(qr, q.Limit)
|
||||
}
|
||||
return qr
|
||||
}
|
||||
|
||||
func ResultEntriesFrom(keys []string, vals [][]byte) []Entry {
|
||||
re := make([]Entry, len(keys))
|
||||
for i, k := range keys {
|
||||
re[i] = Entry{Key: k, Size: len(vals[i]), Value: vals[i]}
|
||||
}
|
||||
return re
|
||||
}
|
||||
Reference in New Issue
Block a user